From 56c99b314f6643f3f9d41675ca50ef68c0faa2d3 Mon Sep 17 00:00:00 2001 From: Marvin Zhang Date: Mon, 22 Jul 2019 12:51:52 +0800 Subject: [PATCH] added golang --- backend | 1 - backend/Dockerfile | 19 + backend/Makefile | 32 + backend/README.md | 3 + backend/conf/config.yml | 20 + backend/config/config.go | 55 + backend/constants/message.go | 7 + backend/constants/model.go | 6 + backend/constants/node.go | 6 + backend/constants/spider.go | 6 + backend/constants/system.go | 7 + backend/constants/task.go | 14 + backend/database/mongo.go | 45 + backend/database/pubsub.go | 72 + backend/database/redis.go | 119 + backend/dump.rdb | Bin 0 -> 155 bytes backend/go.mod | 14 + backend/go.sum | 193 + backend/lib/cron/.gitignore | 22 + backend/lib/cron/.travis.yml | 1 + backend/lib/cron/LICENSE | 21 + backend/lib/cron/README.md | 125 + backend/lib/cron/chain.go | 92 + backend/lib/cron/chain_test.go | 221 + backend/lib/cron/constantdelay.go | 27 + backend/lib/cron/constantdelay_test.go | 54 + backend/lib/cron/cron.go | 350 + backend/lib/cron/cron_test.go | 702 + backend/lib/cron/doc.go | 212 + backend/lib/cron/logger.go | 86 + backend/lib/cron/option.go | 45 + backend/lib/cron/option_test.go | 42 + backend/lib/cron/parser.go | 434 + backend/lib/cron/parser_test.go | 383 + backend/lib/cron/spec.go | 188 + backend/lib/cron/spec_test.go | 300 + backend/main.go | 121 + backend/middlewares/cors.go | 19 + backend/model/base.go | 12 + backend/model/node.go | 117 + backend/model/schedule.go | 131 + backend/model/spider.go | 167 + backend/model/task.go | 192 + backend/routes/base.go | 16 + backend/routes/node.go | 93 + backend/routes/schedule.go | 125 + backend/routes/spider.go | 239 + backend/routes/task.go | 267 + backend/routes/utils.go | 24 + backend/services/log.go | 57 + backend/services/node.go | 348 + backend/services/schedule.go | 133 + backend/services/spider.go | 387 + backend/services/task.go | 444 + backend/test/test.http | 49 + backend/utils/chan.go | 27 + backend/utils/file.go | 200 + backend/utils/model.go | 26 + backend/vendor/github.com/apex/log/.gitignore | 1 + backend/vendor/github.com/apex/log/History.md | 12 + backend/vendor/github.com/apex/log/LICENSE | 22 + backend/vendor/github.com/apex/log/Makefile | 2 + backend/vendor/github.com/apex/log/Readme.md | 29 + backend/vendor/github.com/apex/log/default.go | 45 + backend/vendor/github.com/apex/log/doc.go | 10 + backend/vendor/github.com/apex/log/entry.go | 172 + backend/vendor/github.com/apex/log/go.mod | 25 + backend/vendor/github.com/apex/log/go.sum | 90 + .../vendor/github.com/apex/log/interface.go | 19 + backend/vendor/github.com/apex/log/levels.go | 81 + backend/vendor/github.com/apex/log/logger.go | 149 + backend/vendor/github.com/apex/log/pkg.go | 100 + backend/vendor/github.com/apex/log/stack.go | 8 + .../fsnotify/fsnotify/.editorconfig | 5 + .../github.com/fsnotify/fsnotify/.gitignore | 6 + .../github.com/fsnotify/fsnotify/.travis.yml | 30 + .../github.com/fsnotify/fsnotify/AUTHORS | 52 + .../github.com/fsnotify/fsnotify/CHANGELOG.md | 317 + .../fsnotify/fsnotify/CONTRIBUTING.md | 77 + .../github.com/fsnotify/fsnotify/LICENSE | 28 + .../github.com/fsnotify/fsnotify/README.md | 79 + .../github.com/fsnotify/fsnotify/fen.go | 37 + .../github.com/fsnotify/fsnotify/fsnotify.go | 66 + .../github.com/fsnotify/fsnotify/inotify.go | 337 + .../fsnotify/fsnotify/inotify_poller.go | 187 + .../github.com/fsnotify/fsnotify/kqueue.go | 521 + .../fsnotify/fsnotify/open_mode_bsd.go | 11 + .../fsnotify/fsnotify/open_mode_darwin.go | 12 + .../github.com/fsnotify/fsnotify/windows.go | 561 + .../github.com/gin-contrib/sse/.travis.yml | 26 + .../vendor/github.com/gin-contrib/sse/LICENSE | 21 + .../github.com/gin-contrib/sse/README.md | 58 + .../github.com/gin-contrib/sse/sse-decoder.go | 116 + .../github.com/gin-contrib/sse/sse-encoder.go | 110 + .../github.com/gin-contrib/sse/writer.go | 24 + .../github.com/gin-gonic/gin/.gitignore | 7 + .../github.com/gin-gonic/gin/.travis.yml | 44 + .../github.com/gin-gonic/gin/AUTHORS.md | 231 + .../github.com/gin-gonic/gin/BENCHMARKS.md | 604 + .../github.com/gin-gonic/gin/CHANGELOG.md | 269 + .../gin-gonic/gin/CODE_OF_CONDUCT.md | 46 + .../github.com/gin-gonic/gin/CONTRIBUTING.md | 13 + .../vendor/github.com/gin-gonic/gin/LICENSE | 21 + .../vendor/github.com/gin-gonic/gin/Makefile | 80 + .../vendor/github.com/gin-gonic/gin/README.md | 2070 + .../vendor/github.com/gin-gonic/gin/auth.go | 96 + .../gin-gonic/gin/binding/binding.go | 113 + .../gin/binding/default_validator.go | 51 + .../github.com/gin-gonic/gin/binding/form.go | 89 + .../gin-gonic/gin/binding/form_mapping.go | 330 + .../github.com/gin-gonic/gin/binding/json.go | 47 + .../gin-gonic/gin/binding/msgpack.go | 35 + .../gin-gonic/gin/binding/protobuf.go | 36 + .../github.com/gin-gonic/gin/binding/query.go | 21 + .../github.com/gin-gonic/gin/binding/uri.go | 18 + .../github.com/gin-gonic/gin/binding/xml.go | 33 + .../github.com/gin-gonic/gin/binding/yaml.go | 35 + .../github.com/gin-gonic/gin/codecov.yml | 5 + .../github.com/gin-gonic/gin/context.go | 1023 + .../gin-gonic/gin/context_appengine.go | 11 + .../vendor/github.com/gin-gonic/gin/debug.go | 103 + .../github.com/gin-gonic/gin/deprecated.go | 21 + .../vendor/github.com/gin-gonic/gin/doc.go | 6 + .../vendor/github.com/gin-gonic/gin/errors.go | 169 + backend/vendor/github.com/gin-gonic/gin/fs.go | 45 + .../vendor/github.com/gin-gonic/gin/gin.go | 477 + .../vendor/github.com/gin-gonic/gin/go.mod | 18 + .../vendor/github.com/gin-gonic/gin/go.sum | 36 + .../gin-gonic/gin/internal/json/json.go | 22 + .../gin-gonic/gin/internal/json/jsoniter.go | 23 + .../vendor/github.com/gin-gonic/gin/logger.go | 270 + .../vendor/github.com/gin-gonic/gin/mode.go | 83 + .../vendor/github.com/gin-gonic/gin/path.go | 123 + .../github.com/gin-gonic/gin/recovery.go | 151 + .../github.com/gin-gonic/gin/render/data.go | 25 + .../github.com/gin-gonic/gin/render/html.go | 92 + .../github.com/gin-gonic/gin/render/json.go | 194 + .../gin-gonic/gin/render/msgpack.go | 35 + .../gin-gonic/gin/render/protobuf.go | 36 + .../github.com/gin-gonic/gin/render/reader.go | 43 + .../gin-gonic/gin/render/redirect.go | 29 + .../github.com/gin-gonic/gin/render/render.go | 41 + .../github.com/gin-gonic/gin/render/text.go | 40 + .../github.com/gin-gonic/gin/render/xml.go | 28 + .../github.com/gin-gonic/gin/render/yaml.go | 36 + .../gin-gonic/gin/response_writer.go | 126 + .../github.com/gin-gonic/gin/routergroup.go | 228 + .../github.com/gin-gonic/gin/test_helpers.go | 16 + .../vendor/github.com/gin-gonic/gin/tree.go | 627 + .../vendor/github.com/gin-gonic/gin/utils.go | 151 + .../github.com/gin-gonic/gin/version.go | 8 + .../github.com/globalsign/mgo/.gitignore | 2 + .../github.com/globalsign/mgo/.travis.yml | 49 + .../github.com/globalsign/mgo/CONTRIBUTING.md | 14 + .../vendor/github.com/globalsign/mgo/LICENSE | 25 + .../vendor/github.com/globalsign/mgo/Makefile | 5 + .../github.com/globalsign/mgo/README.md | 105 + .../vendor/github.com/globalsign/mgo/auth.go | 467 + .../github.com/globalsign/mgo/bson/LICENSE | 25 + .../github.com/globalsign/mgo/bson/README.md | 12 + .../github.com/globalsign/mgo/bson/bson.go | 836 + .../bson/bson_corpus_spec_test_generator.go | 294 + .../globalsign/mgo/bson/compatibility.go | 29 + .../github.com/globalsign/mgo/bson/decimal.go | 312 + .../github.com/globalsign/mgo/bson/decode.go | 1055 + .../github.com/globalsign/mgo/bson/encode.go | 645 + .../github.com/globalsign/mgo/bson/json.go | 384 + .../github.com/globalsign/mgo/bson/stream.go | 90 + .../vendor/github.com/globalsign/mgo/bulk.go | 366 + .../globalsign/mgo/changestreams.go | 357 + .../github.com/globalsign/mgo/cluster.go | 704 + .../github.com/globalsign/mgo/coarse_time.go | 62 + .../vendor/github.com/globalsign/mgo/doc.go | 35 + .../github.com/globalsign/mgo/gridfs.go | 782 + .../globalsign/mgo/internal/json/LICENSE | 27 + .../globalsign/mgo/internal/json/decode.go | 1685 + .../globalsign/mgo/internal/json/encode.go | 1260 + .../globalsign/mgo/internal/json/extension.go | 95 + .../globalsign/mgo/internal/json/fold.go | 143 + .../globalsign/mgo/internal/json/indent.go | 141 + .../globalsign/mgo/internal/json/scanner.go | 697 + .../globalsign/mgo/internal/json/stream.go | 510 + .../globalsign/mgo/internal/json/tags.go | 44 + .../globalsign/mgo/internal/sasl/sasl.c | 77 + .../globalsign/mgo/internal/sasl/sasl.go | 142 + .../mgo/internal/sasl/sasl_windows.c | 122 + .../mgo/internal/sasl/sasl_windows.go | 142 + .../mgo/internal/sasl/sasl_windows.h | 7 + .../mgo/internal/sasl/sspi_windows.c | 96 + .../mgo/internal/sasl/sspi_windows.h | 70 + .../globalsign/mgo/internal/scram/scram.go | 266 + .../vendor/github.com/globalsign/mgo/log.go | 133 + .../vendor/github.com/globalsign/mgo/queue.go | 91 + .../github.com/globalsign/mgo/raceoff.go | 5 + .../github.com/globalsign/mgo/raceon.go | 5 + .../github.com/globalsign/mgo/saslimpl.go | 11 + .../github.com/globalsign/mgo/saslstub.go | 11 + .../github.com/globalsign/mgo/server.go | 609 + .../github.com/globalsign/mgo/session.go | 5656 +++ .../github.com/globalsign/mgo/socket.go | 756 + .../vendor/github.com/globalsign/mgo/stats.go | 184 + .../vendor/github.com/golang/protobuf/AUTHORS | 3 + .../github.com/golang/protobuf/CONTRIBUTORS | 3 + .../vendor/github.com/golang/protobuf/LICENSE | 28 + .../github.com/golang/protobuf/proto/clone.go | 253 + .../golang/protobuf/proto/decode.go | 427 + .../golang/protobuf/proto/deprecated.go | 63 + .../golang/protobuf/proto/discard.go | 350 + .../golang/protobuf/proto/encode.go | 203 + .../github.com/golang/protobuf/proto/equal.go | 301 + .../golang/protobuf/proto/extensions.go | 607 + .../github.com/golang/protobuf/proto/lib.go | 965 + .../golang/protobuf/proto/message_set.go | 181 + .../golang/protobuf/proto/pointer_reflect.go | 360 + .../golang/protobuf/proto/pointer_unsafe.go | 313 + .../golang/protobuf/proto/properties.go | 545 + .../golang/protobuf/proto/table_marshal.go | 2776 ++ .../golang/protobuf/proto/table_merge.go | 654 + .../golang/protobuf/proto/table_unmarshal.go | 2053 + .../github.com/golang/protobuf/proto/text.go | 843 + .../golang/protobuf/proto/text_parser.go | 880 + .../vendor/github.com/gomodule/redigo/LICENSE | 175 + .../gomodule/redigo/internal/commandinfo.go | 54 + .../github.com/gomodule/redigo/redis/conn.go | 673 + .../github.com/gomodule/redigo/redis/doc.go | 177 + .../github.com/gomodule/redigo/redis/go16.go | 27 + .../github.com/gomodule/redigo/redis/go17.go | 29 + .../github.com/gomodule/redigo/redis/go18.go | 9 + .../github.com/gomodule/redigo/redis/log.go | 134 + .../github.com/gomodule/redigo/redis/pool.go | 562 + .../gomodule/redigo/redis/pool17.go | 35 + .../gomodule/redigo/redis/pubsub.go | 148 + .../github.com/gomodule/redigo/redis/redis.go | 117 + .../github.com/gomodule/redigo/redis/reply.go | 479 + .../github.com/gomodule/redigo/redis/scan.go | 585 + .../gomodule/redigo/redis/script.go | 91 + .../github.com/hashicorp/hcl/.gitignore | 9 + .../github.com/hashicorp/hcl/.travis.yml | 13 + .../vendor/github.com/hashicorp/hcl/LICENSE | 354 + .../vendor/github.com/hashicorp/hcl/Makefile | 18 + .../vendor/github.com/hashicorp/hcl/README.md | 125 + .../github.com/hashicorp/hcl/appveyor.yml | 19 + .../github.com/hashicorp/hcl/decoder.go | 729 + .../vendor/github.com/hashicorp/hcl/go.mod | 3 + .../vendor/github.com/hashicorp/hcl/go.sum | 2 + .../vendor/github.com/hashicorp/hcl/hcl.go | 11 + .../github.com/hashicorp/hcl/hcl/ast/ast.go | 219 + .../github.com/hashicorp/hcl/hcl/ast/walk.go | 52 + .../hashicorp/hcl/hcl/parser/error.go | 17 + .../hashicorp/hcl/hcl/parser/parser.go | 532 + .../hashicorp/hcl/hcl/printer/nodes.go | 789 + .../hashicorp/hcl/hcl/printer/printer.go | 66 + .../hashicorp/hcl/hcl/scanner/scanner.go | 652 + .../hashicorp/hcl/hcl/strconv/quote.go | 241 + .../hashicorp/hcl/hcl/token/position.go | 46 + .../hashicorp/hcl/hcl/token/token.go | 219 + .../hashicorp/hcl/json/parser/flatten.go | 117 + .../hashicorp/hcl/json/parser/parser.go | 313 + .../hashicorp/hcl/json/scanner/scanner.go | 451 + .../hashicorp/hcl/json/token/position.go | 46 + .../hashicorp/hcl/json/token/token.go | 118 + .../vendor/github.com/hashicorp/hcl/lex.go | 38 + .../vendor/github.com/hashicorp/hcl/parse.go | 39 + .../github.com/json-iterator/go/.codecov.yml | 3 + .../github.com/json-iterator/go/.gitignore | 4 + .../github.com/json-iterator/go/.travis.yml | 14 + .../github.com/json-iterator/go/Gopkg.lock | 21 + .../github.com/json-iterator/go/Gopkg.toml | 26 + .../github.com/json-iterator/go/LICENSE | 21 + .../github.com/json-iterator/go/README.md | 87 + .../github.com/json-iterator/go/adapter.go | 150 + .../vendor/github.com/json-iterator/go/any.go | 325 + .../github.com/json-iterator/go/any_array.go | 278 + .../github.com/json-iterator/go/any_bool.go | 137 + .../github.com/json-iterator/go/any_float.go | 83 + .../github.com/json-iterator/go/any_int32.go | 74 + .../github.com/json-iterator/go/any_int64.go | 74 + .../json-iterator/go/any_invalid.go | 82 + .../github.com/json-iterator/go/any_nil.go | 69 + .../github.com/json-iterator/go/any_number.go | 123 + .../github.com/json-iterator/go/any_object.go | 374 + .../github.com/json-iterator/go/any_str.go | 166 + .../github.com/json-iterator/go/any_uint32.go | 74 + .../github.com/json-iterator/go/any_uint64.go | 74 + .../github.com/json-iterator/go/build.sh | 12 + .../github.com/json-iterator/go/config.go | 375 + .../go/fuzzy_mode_convert_table.md | 7 + .../github.com/json-iterator/go/iter.go | 322 + .../github.com/json-iterator/go/iter_array.go | 58 + .../github.com/json-iterator/go/iter_float.go | 339 + .../github.com/json-iterator/go/iter_int.go | 345 + .../json-iterator/go/iter_object.go | 251 + .../github.com/json-iterator/go/iter_skip.go | 129 + .../json-iterator/go/iter_skip_sloppy.go | 144 + .../json-iterator/go/iter_skip_strict.go | 99 + .../github.com/json-iterator/go/iter_str.go | 215 + .../github.com/json-iterator/go/jsoniter.go | 18 + .../github.com/json-iterator/go/pool.go | 42 + .../github.com/json-iterator/go/reflect.go | 332 + .../json-iterator/go/reflect_array.go | 104 + .../json-iterator/go/reflect_dynamic.go | 70 + .../json-iterator/go/reflect_extension.go | 483 + .../json-iterator/go/reflect_json_number.go | 112 + .../go/reflect_json_raw_message.go | 60 + .../json-iterator/go/reflect_map.go | 338 + .../json-iterator/go/reflect_marshaler.go | 217 + .../json-iterator/go/reflect_native.go | 451 + .../json-iterator/go/reflect_optional.go | 133 + .../json-iterator/go/reflect_slice.go | 99 + .../go/reflect_struct_decoder.go | 1048 + .../go/reflect_struct_encoder.go | 210 + .../github.com/json-iterator/go/stream.go | 211 + .../json-iterator/go/stream_float.go | 94 + .../github.com/json-iterator/go/stream_int.go | 190 + .../github.com/json-iterator/go/stream_str.go | 372 + .../github.com/json-iterator/go/test.sh | 12 + .../magiconair/properties/.gitignore | 6 + .../magiconair/properties/.travis.yml | 10 + .../magiconair/properties/CHANGELOG.md | 131 + .../github.com/magiconair/properties/LICENSE | 25 + .../magiconair/properties/README.md | 129 + .../magiconair/properties/decode.go | 289 + .../github.com/magiconair/properties/doc.go | 156 + .../magiconair/properties/integrate.go | 34 + .../github.com/magiconair/properties/lex.go | 407 + .../github.com/magiconair/properties/load.go | 292 + .../magiconair/properties/parser.go | 95 + .../magiconair/properties/properties.go | 833 + .../magiconair/properties/rangecheck.go | 31 + .../github.com/mattn/go-isatty/.travis.yml | 13 + .../vendor/github.com/mattn/go-isatty/LICENSE | 9 + .../github.com/mattn/go-isatty/README.md | 50 + .../vendor/github.com/mattn/go-isatty/doc.go | 2 + .../vendor/github.com/mattn/go-isatty/go.mod | 3 + .../vendor/github.com/mattn/go-isatty/go.sum | 2 + .../mattn/go-isatty/isatty_android.go | 23 + .../github.com/mattn/go-isatty/isatty_bsd.go | 24 + .../mattn/go-isatty/isatty_others.go | 15 + .../mattn/go-isatty/isatty_solaris.go | 22 + .../mattn/go-isatty/isatty_tcgets.go | 19 + .../mattn/go-isatty/isatty_windows.go | 94 + .../mitchellh/mapstructure/.travis.yml | 8 + .../mitchellh/mapstructure/CHANGELOG.md | 21 + .../github.com/mitchellh/mapstructure/LICENSE | 21 + .../mitchellh/mapstructure/README.md | 46 + .../mitchellh/mapstructure/decode_hooks.go | 217 + .../mitchellh/mapstructure/error.go | 50 + .../github.com/mitchellh/mapstructure/go.mod | 3 + .../mitchellh/mapstructure/mapstructure.go | 1149 + .../modern-go/concurrent/.gitignore | 1 + .../modern-go/concurrent/.travis.yml | 14 + .../github.com/modern-go/concurrent/LICENSE | 201 + .../github.com/modern-go/concurrent/README.md | 49 + .../modern-go/concurrent/executor.go | 14 + .../modern-go/concurrent/go_above_19.go | 15 + .../modern-go/concurrent/go_below_19.go | 33 + .../github.com/modern-go/concurrent/log.go | 13 + .../github.com/modern-go/concurrent/test.sh | 12 + .../concurrent/unbounded_executor.go | 119 + .../github.com/modern-go/reflect2/.gitignore | 2 + .../github.com/modern-go/reflect2/.travis.yml | 15 + .../github.com/modern-go/reflect2/Gopkg.lock | 15 + .../github.com/modern-go/reflect2/Gopkg.toml | 35 + .../github.com/modern-go/reflect2/LICENSE | 201 + .../github.com/modern-go/reflect2/README.md | 71 + .../modern-go/reflect2/go_above_17.go | 8 + .../modern-go/reflect2/go_above_19.go | 14 + .../modern-go/reflect2/go_below_17.go | 9 + .../modern-go/reflect2/go_below_19.go | 14 + .../github.com/modern-go/reflect2/reflect2.go | 298 + .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 + .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 + .../github.com/modern-go/reflect2/safe_map.go | 101 + .../modern-go/reflect2/safe_slice.go | 92 + .../modern-go/reflect2/safe_struct.go | 29 + .../modern-go/reflect2/safe_type.go | 78 + .../github.com/modern-go/reflect2/test.sh | 12 + .../github.com/modern-go/reflect2/type_map.go | 113 + .../modern-go/reflect2/unsafe_array.go | 65 + .../modern-go/reflect2/unsafe_eface.go | 59 + .../modern-go/reflect2/unsafe_field.go | 74 + .../modern-go/reflect2/unsafe_iface.go | 64 + .../modern-go/reflect2/unsafe_link.go | 70 + .../modern-go/reflect2/unsafe_map.go | 138 + .../modern-go/reflect2/unsafe_ptr.go | 46 + .../modern-go/reflect2/unsafe_slice.go | 177 + .../modern-go/reflect2/unsafe_struct.go | 59 + .../modern-go/reflect2/unsafe_type.go | 85 + .../github.com/pelletier/go-toml/.gitignore | 2 + .../github.com/pelletier/go-toml/.travis.yml | 23 + .../github.com/pelletier/go-toml/LICENSE | 21 + .../github.com/pelletier/go-toml/README.md | 131 + .../pelletier/go-toml/benchmark.json | 164 + .../github.com/pelletier/go-toml/benchmark.sh | 32 + .../pelletier/go-toml/benchmark.toml | 244 + .../pelletier/go-toml/benchmark.yml | 121 + .../github.com/pelletier/go-toml/doc.go | 23 + .../pelletier/go-toml/example-crlf.toml | 29 + .../github.com/pelletier/go-toml/example.toml | 29 + .../github.com/pelletier/go-toml/fuzz.go | 31 + .../github.com/pelletier/go-toml/fuzz.sh | 15 + .../pelletier/go-toml/keysparsing.go | 85 + .../github.com/pelletier/go-toml/lexer.go | 750 + .../github.com/pelletier/go-toml/marshal.go | 609 + .../pelletier/go-toml/marshal_test.toml | 38 + .../github.com/pelletier/go-toml/parser.go | 430 + .../github.com/pelletier/go-toml/position.go | 29 + .../github.com/pelletier/go-toml/test.sh | 88 + .../github.com/pelletier/go-toml/token.go | 144 + .../github.com/pelletier/go-toml/toml.go | 367 + .../pelletier/go-toml/tomltree_create.go | 142 + .../pelletier/go-toml/tomltree_write.go | 333 + .../vendor/github.com/pkg/errors/.gitignore | 24 + .../vendor/github.com/pkg/errors/.travis.yml | 15 + backend/vendor/github.com/pkg/errors/LICENSE | 23 + .../vendor/github.com/pkg/errors/README.md | 52 + .../vendor/github.com/pkg/errors/appveyor.yml | 32 + .../vendor/github.com/pkg/errors/errors.go | 282 + backend/vendor/github.com/pkg/errors/stack.go | 147 + .../vendor/github.com/robfig/cron/.gitignore | 22 + .../vendor/github.com/robfig/cron/.travis.yml | 1 + backend/vendor/github.com/robfig/cron/LICENSE | 21 + .../vendor/github.com/robfig/cron/README.md | 6 + .../github.com/robfig/cron/constantdelay.go | 27 + backend/vendor/github.com/robfig/cron/cron.go | 259 + backend/vendor/github.com/robfig/cron/doc.go | 129 + .../vendor/github.com/robfig/cron/parser.go | 380 + backend/vendor/github.com/robfig/cron/spec.go | 158 + .../github.com/satori/go.uuid/.travis.yml | 23 + .../vendor/github.com/satori/go.uuid/LICENSE | 20 + .../github.com/satori/go.uuid/README.md | 65 + .../vendor/github.com/satori/go.uuid/codec.go | 206 + .../github.com/satori/go.uuid/generator.go | 239 + .../vendor/github.com/satori/go.uuid/sql.go | 78 + .../vendor/github.com/satori/go.uuid/uuid.go | 161 + .../vendor/github.com/spf13/afero/.travis.yml | 21 + .../vendor/github.com/spf13/afero/LICENSE.txt | 174 + .../vendor/github.com/spf13/afero/README.md | 452 + .../vendor/github.com/spf13/afero/afero.go | 108 + .../github.com/spf13/afero/appveyor.yml | 15 + .../vendor/github.com/spf13/afero/basepath.go | 180 + .../github.com/spf13/afero/cacheOnReadFs.go | 290 + .../github.com/spf13/afero/const_bsds.go | 22 + .../github.com/spf13/afero/const_win_unix.go | 25 + .../github.com/spf13/afero/copyOnWriteFs.go | 292 + backend/vendor/github.com/spf13/afero/go.mod | 3 + .../vendor/github.com/spf13/afero/httpFs.go | 110 + .../vendor/github.com/spf13/afero/ioutil.go | 230 + .../vendor/github.com/spf13/afero/lstater.go | 27 + .../vendor/github.com/spf13/afero/match.go | 110 + .../vendor/github.com/spf13/afero/mem/dir.go | 37 + .../github.com/spf13/afero/mem/dirmap.go | 43 + .../vendor/github.com/spf13/afero/mem/file.go | 317 + .../vendor/github.com/spf13/afero/memmap.go | 365 + backend/vendor/github.com/spf13/afero/os.go | 101 + backend/vendor/github.com/spf13/afero/path.go | 106 + .../github.com/spf13/afero/readonlyfs.go | 80 + .../vendor/github.com/spf13/afero/regexpfs.go | 214 + .../github.com/spf13/afero/unionFile.go | 305 + backend/vendor/github.com/spf13/afero/util.go | 330 + .../vendor/github.com/spf13/cast/.gitignore | 25 + .../vendor/github.com/spf13/cast/.travis.yml | 15 + backend/vendor/github.com/spf13/cast/LICENSE | 21 + backend/vendor/github.com/spf13/cast/Makefile | 38 + .../vendor/github.com/spf13/cast/README.md | 75 + backend/vendor/github.com/spf13/cast/cast.go | 171 + backend/vendor/github.com/spf13/cast/caste.go | 1249 + backend/vendor/github.com/spf13/cast/go.mod | 7 + backend/vendor/github.com/spf13/cast/go.sum | 6 + .../spf13/jwalterweatherman/.gitignore | 22 + .../spf13/jwalterweatherman/LICENSE | 21 + .../spf13/jwalterweatherman/README.md | 148 + .../jwalterweatherman/default_notepad.go | 113 + .../github.com/spf13/jwalterweatherman/go.mod | 3 + .../spf13/jwalterweatherman/log_counter.go | 55 + .../spf13/jwalterweatherman/notepad.go | 194 + .../vendor/github.com/spf13/pflag/.gitignore | 2 + .../vendor/github.com/spf13/pflag/.travis.yml | 21 + backend/vendor/github.com/spf13/pflag/LICENSE | 28 + .../vendor/github.com/spf13/pflag/README.md | 296 + backend/vendor/github.com/spf13/pflag/bool.go | 94 + .../github.com/spf13/pflag/bool_slice.go | 147 + .../vendor/github.com/spf13/pflag/bytes.go | 209 + .../vendor/github.com/spf13/pflag/count.go | 96 + .../vendor/github.com/spf13/pflag/duration.go | 86 + .../github.com/spf13/pflag/duration_slice.go | 128 + backend/vendor/github.com/spf13/pflag/flag.go | 1227 + .../vendor/github.com/spf13/pflag/float32.go | 88 + .../vendor/github.com/spf13/pflag/float64.go | 84 + .../github.com/spf13/pflag/golangflag.go | 105 + backend/vendor/github.com/spf13/pflag/int.go | 84 + .../vendor/github.com/spf13/pflag/int16.go | 88 + .../vendor/github.com/spf13/pflag/int32.go | 88 + .../vendor/github.com/spf13/pflag/int64.go | 84 + backend/vendor/github.com/spf13/pflag/int8.go | 88 + .../github.com/spf13/pflag/int_slice.go | 128 + backend/vendor/github.com/spf13/pflag/ip.go | 94 + .../vendor/github.com/spf13/pflag/ip_slice.go | 148 + .../vendor/github.com/spf13/pflag/ipmask.go | 122 + .../vendor/github.com/spf13/pflag/ipnet.go | 98 + .../vendor/github.com/spf13/pflag/string.go | 80 + .../github.com/spf13/pflag/string_array.go | 103 + .../github.com/spf13/pflag/string_slice.go | 149 + .../github.com/spf13/pflag/string_to_int.go | 149 + .../spf13/pflag/string_to_string.go | 160 + backend/vendor/github.com/spf13/pflag/uint.go | 88 + .../vendor/github.com/spf13/pflag/uint16.go | 88 + .../vendor/github.com/spf13/pflag/uint32.go | 88 + .../vendor/github.com/spf13/pflag/uint64.go | 88 + .../vendor/github.com/spf13/pflag/uint8.go | 88 + .../github.com/spf13/pflag/uint_slice.go | 126 + .../vendor/github.com/spf13/viper/.gitignore | 29 + .../vendor/github.com/spf13/viper/.travis.yml | 31 + backend/vendor/github.com/spf13/viper/LICENSE | 21 + .../vendor/github.com/spf13/viper/README.md | 691 + .../vendor/github.com/spf13/viper/flags.go | 57 + backend/vendor/github.com/spf13/viper/go.mod | 43 + backend/vendor/github.com/spf13/viper/go.sum | 178 + backend/vendor/github.com/spf13/viper/util.go | 221 + .../vendor/github.com/spf13/viper/viper.go | 1868 + backend/vendor/github.com/ugorji/go/LICENSE | 22 + .../ugorji/go/codec/0_importpath.go | 7 + .../vendor/github.com/ugorji/go/codec/0doc.go | 227 + .../github.com/ugorji/go/codec/README.md | 207 + .../vendor/github.com/ugorji/go/codec/binc.go | 1203 + .../github.com/ugorji/go/codec/build.sh | 264 + .../vendor/github.com/ugorji/go/codec/cbor.go | 767 + .../github.com/ugorji/go/codec/codecgen.go | 13 + .../github.com/ugorji/go/codec/decode.go | 3109 ++ .../github.com/ugorji/go/codec/encode.go | 1808 + .../ugorji/go/codec/fast-path.generated.go | 33668 ++++++++++++++++ .../ugorji/go/codec/fast-path.go.tmpl | 491 + .../ugorji/go/codec/fast-path.not.go | 47 + .../ugorji/go/codec/gen-dec-array.go.tmpl | 78 + .../ugorji/go/codec/gen-dec-map.go.tmpl | 42 + .../ugorji/go/codec/gen-enc-chan.go.tmpl | 27 + .../ugorji/go/codec/gen-helper.generated.go | 343 + .../ugorji/go/codec/gen-helper.go.tmpl | 308 + .../ugorji/go/codec/gen.generated.go | 164 + .../vendor/github.com/ugorji/go/codec/gen.go | 2149 + .../go/codec/goversion_arrayof_gte_go15.go | 14 + .../go/codec/goversion_arrayof_lt_go15.go | 14 + .../go/codec/goversion_makemap_gte_go19.go | 15 + .../go/codec/goversion_makemap_lt_go19.go | 12 + ...version_unexportedembeddedptr_gte_go110.go | 8 + ...oversion_unexportedembeddedptr_lt_go110.go | 8 + .../go/codec/goversion_unsupported_lt_go14.go | 17 + .../go/codec/goversion_vendor_eq_go15.go | 10 + .../go/codec/goversion_vendor_eq_go16.go | 10 + .../go/codec/goversion_vendor_gte_go17.go | 8 + .../go/codec/goversion_vendor_lt_go15.go | 8 + .../github.com/ugorji/go/codec/helper.go | 2697 ++ .../ugorji/go/codec/helper_internal.go | 121 + .../ugorji/go/codec/helper_not_unsafe.go | 331 + .../ugorji/go/codec/helper_unsafe.go | 745 + .../vendor/github.com/ugorji/go/codec/json.go | 1508 + .../ugorji/go/codec/mammoth-test.go.tmpl | 154 + .../ugorji/go/codec/mammoth2-test.go.tmpl | 94 + .../github.com/ugorji/go/codec/msgpack.go | 1126 + .../vendor/github.com/ugorji/go/codec/rpc.go | 225 + .../github.com/ugorji/go/codec/simple.go | 666 + .../ugorji/go/codec/test-cbor-goldens.json | 639 + .../vendor/github.com/ugorji/go/codec/test.py | 126 + .../vendor/github.com/ugorji/go/codec/xml.go | 508 + backend/vendor/golang.org/x/sys/AUTHORS | 3 + backend/vendor/golang.org/x/sys/CONTRIBUTORS | 3 + backend/vendor/golang.org/x/sys/LICENSE | 27 + backend/vendor/golang.org/x/sys/PATENTS | 22 + .../vendor/golang.org/x/sys/unix/.gitignore | 2 + .../vendor/golang.org/x/sys/unix/README.md | 173 + .../golang.org/x/sys/unix/affinity_linux.go | 124 + .../vendor/golang.org/x/sys/unix/aliases.go | 14 + .../golang.org/x/sys/unix/asm_aix_ppc64.s | 17 + .../golang.org/x/sys/unix/asm_darwin_386.s | 29 + .../golang.org/x/sys/unix/asm_darwin_amd64.s | 29 + .../golang.org/x/sys/unix/asm_darwin_arm.s | 30 + .../golang.org/x/sys/unix/asm_darwin_arm64.s | 30 + .../x/sys/unix/asm_dragonfly_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_386.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_freebsd_arm64.s | 29 + .../golang.org/x/sys/unix/asm_linux_386.s | 65 + .../golang.org/x/sys/unix/asm_linux_amd64.s | 57 + .../golang.org/x/sys/unix/asm_linux_arm.s | 56 + .../golang.org/x/sys/unix/asm_linux_arm64.s | 52 + .../golang.org/x/sys/unix/asm_linux_mips64x.s | 56 + .../golang.org/x/sys/unix/asm_linux_mipsx.s | 54 + .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 44 + .../golang.org/x/sys/unix/asm_linux_s390x.s | 56 + .../golang.org/x/sys/unix/asm_netbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_netbsd_arm64.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_386.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_amd64.s | 29 + .../golang.org/x/sys/unix/asm_openbsd_arm.s | 29 + .../golang.org/x/sys/unix/asm_solaris_amd64.s | 17 + .../golang.org/x/sys/unix/bluetooth_linux.go | 35 + .../golang.org/x/sys/unix/cap_freebsd.go | 195 + .../vendor/golang.org/x/sys/unix/constants.go | 13 + .../golang.org/x/sys/unix/dev_aix_ppc.go | 27 + .../golang.org/x/sys/unix/dev_aix_ppc64.go | 29 + .../golang.org/x/sys/unix/dev_darwin.go | 24 + .../golang.org/x/sys/unix/dev_dragonfly.go | 30 + .../golang.org/x/sys/unix/dev_freebsd.go | 30 + .../vendor/golang.org/x/sys/unix/dev_linux.go | 42 + .../golang.org/x/sys/unix/dev_netbsd.go | 29 + .../golang.org/x/sys/unix/dev_openbsd.go | 29 + .../vendor/golang.org/x/sys/unix/dirent.go | 17 + .../golang.org/x/sys/unix/endian_big.go | 9 + .../golang.org/x/sys/unix/endian_little.go | 9 + .../vendor/golang.org/x/sys/unix/env_unix.go | 31 + .../x/sys/unix/errors_freebsd_386.go | 227 + .../x/sys/unix/errors_freebsd_amd64.go | 227 + .../x/sys/unix/errors_freebsd_arm.go | 226 + backend/vendor/golang.org/x/sys/unix/fcntl.go | 32 + .../golang.org/x/sys/unix/fcntl_darwin.go | 18 + .../x/sys/unix/fcntl_linux_32bit.go | 13 + backend/vendor/golang.org/x/sys/unix/gccgo.go | 62 + .../vendor/golang.org/x/sys/unix/gccgo_c.c | 39 + .../x/sys/unix/gccgo_linux_amd64.go | 20 + backend/vendor/golang.org/x/sys/unix/ioctl.go | 30 + backend/vendor/golang.org/x/sys/unix/mkall.sh | 212 + .../golang.org/x/sys/unix/mkasm_darwin.go | 61 + .../vendor/golang.org/x/sys/unix/mkerrors.sh | 659 + .../vendor/golang.org/x/sys/unix/mkpost.go | 106 + .../vendor/golang.org/x/sys/unix/mksyscall.go | 407 + .../x/sys/unix/mksyscall_aix_ppc.go | 404 + .../x/sys/unix/mksyscall_aix_ppc64.go | 602 + .../x/sys/unix/mksyscall_solaris.go | 335 + .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 265 + .../vendor/golang.org/x/sys/unix/mksysnum.go | 190 + .../golang.org/x/sys/unix/openbsd_pledge.go | 166 + .../golang.org/x/sys/unix/openbsd_unveil.go | 44 + .../golang.org/x/sys/unix/pagesize_unix.go | 15 + backend/vendor/golang.org/x/sys/unix/race.go | 30 + backend/vendor/golang.org/x/sys/unix/race0.go | 25 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 36 + .../golang.org/x/sys/unix/sockcmsg_unix.go | 117 + backend/vendor/golang.org/x/sys/unix/str.go | 26 + .../vendor/golang.org/x/sys/unix/syscall.go | 54 + .../golang.org/x/sys/unix/syscall_aix.go | 549 + .../golang.org/x/sys/unix/syscall_aix_ppc.go | 34 + .../x/sys/unix/syscall_aix_ppc64.go | 34 + .../golang.org/x/sys/unix/syscall_bsd.go | 624 + .../golang.org/x/sys/unix/syscall_darwin.go | 706 + .../x/sys/unix/syscall_darwin_386.go | 63 + .../x/sys/unix/syscall_darwin_amd64.go | 63 + .../x/sys/unix/syscall_darwin_arm.go | 64 + .../x/sys/unix/syscall_darwin_arm64.go | 66 + .../x/sys/unix/syscall_darwin_libSystem.go | 31 + .../x/sys/unix/syscall_dragonfly.go | 539 + .../x/sys/unix/syscall_dragonfly_amd64.go | 52 + .../golang.org/x/sys/unix/syscall_freebsd.go | 824 + .../x/sys/unix/syscall_freebsd_386.go | 52 + .../x/sys/unix/syscall_freebsd_amd64.go | 52 + .../x/sys/unix/syscall_freebsd_arm.go | 52 + .../x/sys/unix/syscall_freebsd_arm64.go | 52 + .../golang.org/x/sys/unix/syscall_linux.go | 1771 + .../x/sys/unix/syscall_linux_386.go | 386 + .../x/sys/unix/syscall_linux_amd64.go | 190 + .../x/sys/unix/syscall_linux_amd64_gc.go | 13 + .../x/sys/unix/syscall_linux_arm.go | 274 + .../x/sys/unix/syscall_linux_arm64.go | 223 + .../golang.org/x/sys/unix/syscall_linux_gc.go | 14 + .../x/sys/unix/syscall_linux_gc_386.go | 16 + .../x/sys/unix/syscall_linux_gccgo_386.go | 30 + .../x/sys/unix/syscall_linux_gccgo_arm.go | 20 + .../x/sys/unix/syscall_linux_mips64x.go | 222 + .../x/sys/unix/syscall_linux_mipsx.go | 234 + .../x/sys/unix/syscall_linux_ppc64x.go | 152 + .../x/sys/unix/syscall_linux_riscv64.go | 226 + .../x/sys/unix/syscall_linux_s390x.go | 338 + .../x/sys/unix/syscall_linux_sparc64.go | 147 + .../golang.org/x/sys/unix/syscall_netbsd.go | 622 + .../x/sys/unix/syscall_netbsd_386.go | 33 + .../x/sys/unix/syscall_netbsd_amd64.go | 33 + .../x/sys/unix/syscall_netbsd_arm.go | 33 + .../x/sys/unix/syscall_netbsd_arm64.go | 33 + .../golang.org/x/sys/unix/syscall_openbsd.go | 416 + .../x/sys/unix/syscall_openbsd_386.go | 37 + .../x/sys/unix/syscall_openbsd_amd64.go | 37 + .../x/sys/unix/syscall_openbsd_arm.go | 37 + .../golang.org/x/sys/unix/syscall_solaris.go | 737 + .../x/sys/unix/syscall_solaris_amd64.go | 23 + .../golang.org/x/sys/unix/syscall_unix.go | 431 + .../golang.org/x/sys/unix/syscall_unix_gc.go | 15 + .../x/sys/unix/syscall_unix_gc_ppc64x.go | 24 + .../golang.org/x/sys/unix/timestruct.go | 82 + .../vendor/golang.org/x/sys/unix/types_aix.go | 236 + .../golang.org/x/sys/unix/types_darwin.go | 283 + .../golang.org/x/sys/unix/types_dragonfly.go | 263 + .../golang.org/x/sys/unix/types_freebsd.go | 356 + .../golang.org/x/sys/unix/types_netbsd.go | 289 + .../golang.org/x/sys/unix/types_openbsd.go | 282 + .../golang.org/x/sys/unix/types_solaris.go | 266 + .../vendor/golang.org/x/sys/unix/xattr_bsd.go | 240 + .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1372 + .../x/sys/unix/zerrors_aix_ppc64.go | 1373 + .../x/sys/unix/zerrors_darwin_386.go | 1783 + .../x/sys/unix/zerrors_darwin_amd64.go | 1783 + .../x/sys/unix/zerrors_darwin_arm.go | 1783 + .../x/sys/unix/zerrors_darwin_arm64.go | 1783 + .../x/sys/unix/zerrors_dragonfly_amd64.go | 1650 + .../x/sys/unix/zerrors_freebsd_386.go | 1793 + .../x/sys/unix/zerrors_freebsd_amd64.go | 1794 + .../x/sys/unix/zerrors_freebsd_arm.go | 1802 + .../x/sys/unix/zerrors_freebsd_arm64.go | 1794 + .../x/sys/unix/zerrors_linux_386.go | 2835 ++ .../x/sys/unix/zerrors_linux_amd64.go | 2835 ++ .../x/sys/unix/zerrors_linux_arm.go | 2841 ++ .../x/sys/unix/zerrors_linux_arm64.go | 2826 ++ .../x/sys/unix/zerrors_linux_mips.go | 2842 ++ .../x/sys/unix/zerrors_linux_mips64.go | 2842 ++ .../x/sys/unix/zerrors_linux_mips64le.go | 2842 ++ .../x/sys/unix/zerrors_linux_mipsle.go | 2842 ++ .../x/sys/unix/zerrors_linux_ppc64.go | 2897 ++ .../x/sys/unix/zerrors_linux_ppc64le.go | 2897 ++ .../x/sys/unix/zerrors_linux_riscv64.go | 2822 ++ .../x/sys/unix/zerrors_linux_s390x.go | 2895 ++ .../x/sys/unix/zerrors_linux_sparc64.go | 2891 ++ .../x/sys/unix/zerrors_netbsd_386.go | 1772 + .../x/sys/unix/zerrors_netbsd_amd64.go | 1762 + .../x/sys/unix/zerrors_netbsd_arm.go | 1751 + .../x/sys/unix/zerrors_netbsd_arm64.go | 1762 + .../x/sys/unix/zerrors_openbsd_386.go | 1654 + .../x/sys/unix/zerrors_openbsd_amd64.go | 1765 + .../x/sys/unix/zerrors_openbsd_arm.go | 1656 + .../x/sys/unix/zerrors_solaris_amd64.go | 1532 + .../golang.org/x/sys/unix/zptrace386_linux.go | 80 + .../golang.org/x/sys/unix/zptracearm_linux.go | 41 + .../x/sys/unix/zptracemips_linux.go | 50 + .../x/sys/unix/zptracemipsle_linux.go | 50 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1450 + .../x/sys/unix/zsyscall_aix_ppc64.go | 1416 + .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1172 + .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1051 + .../x/sys/unix/zsyscall_darwin_386.1_11.go | 1810 + .../x/sys/unix/zsyscall_darwin_386.go | 2505 ++ .../x/sys/unix/zsyscall_darwin_386.s | 284 + .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 1810 + .../x/sys/unix/zsyscall_darwin_amd64.go | 2520 ++ .../x/sys/unix/zsyscall_darwin_amd64.s | 286 + .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 1793 + .../x/sys/unix/zsyscall_darwin_arm.go | 2483 ++ .../x/sys/unix/zsyscall_darwin_arm.s | 282 + .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 1793 + .../x/sys/unix/zsyscall_darwin_arm64.go | 2483 ++ .../x/sys/unix/zsyscall_darwin_arm64.s | 282 + .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1659 + .../x/sys/unix/zsyscall_freebsd_386.go | 2015 + .../x/sys/unix/zsyscall_freebsd_amd64.go | 2015 + .../x/sys/unix/zsyscall_freebsd_arm.go | 2015 + .../x/sys/unix/zsyscall_freebsd_arm64.go | 2015 + .../x/sys/unix/zsyscall_linux_386.go | 2220 + .../x/sys/unix/zsyscall_linux_amd64.go | 2387 ++ .../x/sys/unix/zsyscall_linux_arm.go | 2342 ++ .../x/sys/unix/zsyscall_linux_arm64.go | 2244 + .../x/sys/unix/zsyscall_linux_mips.go | 2400 ++ .../x/sys/unix/zsyscall_linux_mips64.go | 2371 ++ .../x/sys/unix/zsyscall_linux_mips64le.go | 2371 ++ .../x/sys/unix/zsyscall_linux_mipsle.go | 2400 ++ .../x/sys/unix/zsyscall_linux_ppc64.go | 2449 ++ .../x/sys/unix/zsyscall_linux_ppc64le.go | 2449 ++ .../x/sys/unix/zsyscall_linux_riscv64.go | 2224 + .../x/sys/unix/zsyscall_linux_s390x.go | 2219 + .../x/sys/unix/zsyscall_linux_sparc64.go | 2382 ++ .../x/sys/unix/zsyscall_netbsd_386.go | 1826 + .../x/sys/unix/zsyscall_netbsd_amd64.go | 1826 + .../x/sys/unix/zsyscall_netbsd_arm.go | 1826 + .../x/sys/unix/zsyscall_netbsd_arm64.go | 1826 + .../x/sys/unix/zsyscall_openbsd_386.go | 1692 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 1692 + .../x/sys/unix/zsyscall_openbsd_arm.go | 1692 + .../x/sys/unix/zsyscall_solaris_amd64.go | 1953 + .../x/sys/unix/zsysctl_openbsd_386.go | 270 + .../x/sys/unix/zsysctl_openbsd_amd64.go | 270 + .../x/sys/unix/zsysctl_openbsd_arm.go | 270 + .../x/sys/unix/zsysnum_darwin_386.go | 436 + .../x/sys/unix/zsysnum_darwin_amd64.go | 438 + .../x/sys/unix/zsysnum_darwin_arm.go | 436 + .../x/sys/unix/zsysnum_darwin_arm64.go | 436 + .../x/sys/unix/zsysnum_dragonfly_amd64.go | 315 + .../x/sys/unix/zsysnum_freebsd_386.go | 403 + .../x/sys/unix/zsysnum_freebsd_amd64.go | 403 + .../x/sys/unix/zsysnum_freebsd_arm.go | 403 + .../x/sys/unix/zsysnum_freebsd_arm64.go | 395 + .../x/sys/unix/zsysnum_linux_386.go | 392 + .../x/sys/unix/zsysnum_linux_amd64.go | 344 + .../x/sys/unix/zsysnum_linux_arm.go | 364 + .../x/sys/unix/zsysnum_linux_arm64.go | 289 + .../x/sys/unix/zsysnum_linux_mips.go | 377 + .../x/sys/unix/zsysnum_linux_mips64.go | 337 + .../x/sys/unix/zsysnum_linux_mips64le.go | 337 + .../x/sys/unix/zsysnum_linux_mipsle.go | 377 + .../x/sys/unix/zsysnum_linux_ppc64.go | 375 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 375 + .../x/sys/unix/zsysnum_linux_riscv64.go | 288 + .../x/sys/unix/zsysnum_linux_s390x.go | 337 + .../x/sys/unix/zsysnum_linux_sparc64.go | 351 + .../x/sys/unix/zsysnum_netbsd_386.go | 274 + .../x/sys/unix/zsysnum_netbsd_amd64.go | 274 + .../x/sys/unix/zsysnum_netbsd_arm.go | 274 + .../x/sys/unix/zsysnum_netbsd_arm64.go | 274 + .../x/sys/unix/zsysnum_openbsd_386.go | 218 + .../x/sys/unix/zsysnum_openbsd_amd64.go | 218 + .../x/sys/unix/zsysnum_openbsd_arm.go | 218 + .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 345 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 354 + .../x/sys/unix/ztypes_darwin_386.go | 499 + .../x/sys/unix/ztypes_darwin_amd64.go | 509 + .../x/sys/unix/ztypes_darwin_arm.go | 500 + .../x/sys/unix/ztypes_darwin_arm64.go | 509 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 469 + .../x/sys/unix/ztypes_freebsd_386.go | 603 + .../x/sys/unix/ztypes_freebsd_amd64.go | 602 + .../x/sys/unix/ztypes_freebsd_arm.go | 602 + .../x/sys/unix/ztypes_freebsd_arm64.go | 602 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 2135 + .../x/sys/unix/ztypes_linux_amd64.go | 2148 + .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2126 + .../x/sys/unix/ztypes_linux_arm64.go | 2127 + .../x/sys/unix/ztypes_linux_mips.go | 2132 + .../x/sys/unix/ztypes_linux_mips64.go | 2129 + .../x/sys/unix/ztypes_linux_mips64le.go | 2129 + .../x/sys/unix/ztypes_linux_mipsle.go | 2132 + .../x/sys/unix/ztypes_linux_ppc64.go | 2137 + .../x/sys/unix/ztypes_linux_ppc64le.go | 2137 + .../x/sys/unix/ztypes_linux_riscv64.go | 2154 + .../x/sys/unix/ztypes_linux_s390x.go | 2151 + .../x/sys/unix/ztypes_linux_sparc64.go | 2132 + .../x/sys/unix/ztypes_netbsd_386.go | 465 + .../x/sys/unix/ztypes_netbsd_amd64.go | 472 + .../x/sys/unix/ztypes_netbsd_arm.go | 470 + .../x/sys/unix/ztypes_netbsd_arm64.go | 472 + .../x/sys/unix/ztypes_openbsd_386.go | 570 + .../x/sys/unix/ztypes_openbsd_amd64.go | 570 + .../x/sys/unix/ztypes_openbsd_arm.go | 571 + .../x/sys/unix/ztypes_solaris_amd64.go | 442 + backend/vendor/golang.org/x/text/AUTHORS | 3 + backend/vendor/golang.org/x/text/CONTRIBUTORS | 3 + backend/vendor/golang.org/x/text/LICENSE | 27 + backend/vendor/golang.org/x/text/PATENTS | 22 + .../golang.org/x/text/transform/transform.go | 705 + .../x/text/unicode/norm/composition.go | 508 + .../x/text/unicode/norm/forminfo.go | 259 + .../golang.org/x/text/unicode/norm/input.go | 109 + .../golang.org/x/text/unicode/norm/iter.go | 457 + .../x/text/unicode/norm/maketables.go | 976 + .../x/text/unicode/norm/normalize.go | 609 + .../x/text/unicode/norm/readwriter.go | 125 + .../x/text/unicode/norm/tables10.0.0.go | 7653 ++++ .../x/text/unicode/norm/tables9.0.0.go | 7633 ++++ .../x/text/unicode/norm/transform.go | 88 + .../golang.org/x/text/unicode/norm/trie.go | 54 + .../golang.org/x/text/unicode/norm/triegen.go | 117 + .../go-playground/validator.v8/.gitignore | 29 + .../go-playground/validator.v8/LICENSE | 22 + .../go-playground/validator.v8/README.md | 366 + .../go-playground/validator.v8/baked_in.go | 1410 + .../go-playground/validator.v8/cache.go | 263 + .../go-playground/validator.v8/doc.go | 852 + .../go-playground/validator.v8/logo.png | Bin 0 -> 13443 bytes .../go-playground/validator.v8/regexes.go | 59 + .../go-playground/validator.v8/util.go | 252 + .../go-playground/validator.v8/validator.go | 782 + backend/vendor/gopkg.in/yaml.v2/.travis.yml | 12 + backend/vendor/gopkg.in/yaml.v2/LICENSE | 201 + .../vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + backend/vendor/gopkg.in/yaml.v2/NOTICE | 13 + backend/vendor/gopkg.in/yaml.v2/README.md | 133 + backend/vendor/gopkg.in/yaml.v2/apic.go | 739 + backend/vendor/gopkg.in/yaml.v2/decode.go | 775 + backend/vendor/gopkg.in/yaml.v2/emitterc.go | 1685 + backend/vendor/gopkg.in/yaml.v2/encode.go | 390 + backend/vendor/gopkg.in/yaml.v2/go.mod | 3 + backend/vendor/gopkg.in/yaml.v2/go.sum | 1 + backend/vendor/gopkg.in/yaml.v2/parserc.go | 1095 + backend/vendor/gopkg.in/yaml.v2/readerc.go | 412 + backend/vendor/gopkg.in/yaml.v2/resolve.go | 258 + backend/vendor/gopkg.in/yaml.v2/scannerc.go | 2696 ++ backend/vendor/gopkg.in/yaml.v2/sorter.go | 113 + backend/vendor/gopkg.in/yaml.v2/writerc.go | 26 + backend/vendor/gopkg.in/yaml.v2/yaml.go | 466 + backend/vendor/gopkg.in/yaml.v2/yamlh.go | 738 + .../vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 + backend/vendor/modules.txt | 75 + 897 files changed, 398043 insertions(+), 1 deletion(-) delete mode 160000 backend create mode 100644 backend/Dockerfile create mode 100644 backend/Makefile create mode 100644 backend/README.md create mode 100644 backend/conf/config.yml create mode 100644 backend/config/config.go create mode 100644 backend/constants/message.go create mode 100644 backend/constants/model.go create mode 100644 backend/constants/node.go create mode 100644 backend/constants/spider.go create mode 100644 backend/constants/system.go create mode 100644 backend/constants/task.go create mode 100644 backend/database/mongo.go create mode 100644 backend/database/pubsub.go create mode 100644 backend/database/redis.go create mode 100644 backend/dump.rdb create mode 100644 backend/go.mod create mode 100644 backend/go.sum create mode 100644 backend/lib/cron/.gitignore create mode 100644 backend/lib/cron/.travis.yml create mode 100644 backend/lib/cron/LICENSE create mode 100644 backend/lib/cron/README.md create mode 100644 backend/lib/cron/chain.go create mode 100644 backend/lib/cron/chain_test.go create mode 100644 backend/lib/cron/constantdelay.go create mode 100644 backend/lib/cron/constantdelay_test.go create mode 100644 backend/lib/cron/cron.go create mode 100644 backend/lib/cron/cron_test.go create mode 100644 backend/lib/cron/doc.go create mode 100644 backend/lib/cron/logger.go create mode 100644 backend/lib/cron/option.go create mode 100644 backend/lib/cron/option_test.go create mode 100644 backend/lib/cron/parser.go create mode 100644 backend/lib/cron/parser_test.go create mode 100644 backend/lib/cron/spec.go create mode 100644 backend/lib/cron/spec_test.go create mode 100644 backend/main.go create mode 100644 backend/middlewares/cors.go create mode 100644 backend/model/base.go create mode 100644 backend/model/node.go create mode 100644 backend/model/schedule.go create mode 100644 backend/model/spider.go create mode 100644 backend/model/task.go create mode 100644 backend/routes/base.go create mode 100644 backend/routes/node.go create mode 100644 backend/routes/schedule.go create mode 100644 backend/routes/spider.go create mode 100644 backend/routes/task.go create mode 100644 backend/routes/utils.go create mode 100644 backend/services/log.go create mode 100644 backend/services/node.go create mode 100644 backend/services/schedule.go create mode 100644 backend/services/spider.go create mode 100644 backend/services/task.go create mode 100644 backend/test/test.http create mode 100644 backend/utils/chan.go create mode 100644 backend/utils/file.go create mode 100644 backend/utils/model.go create mode 100644 backend/vendor/github.com/apex/log/.gitignore create mode 100644 backend/vendor/github.com/apex/log/History.md create mode 100644 backend/vendor/github.com/apex/log/LICENSE create mode 100644 backend/vendor/github.com/apex/log/Makefile create mode 100644 backend/vendor/github.com/apex/log/Readme.md create mode 100644 backend/vendor/github.com/apex/log/default.go create mode 100644 backend/vendor/github.com/apex/log/doc.go create mode 100644 backend/vendor/github.com/apex/log/entry.go create mode 100644 backend/vendor/github.com/apex/log/go.mod create mode 100644 backend/vendor/github.com/apex/log/go.sum create mode 100644 backend/vendor/github.com/apex/log/interface.go create mode 100644 backend/vendor/github.com/apex/log/levels.go create mode 100644 backend/vendor/github.com/apex/log/logger.go create mode 100644 backend/vendor/github.com/apex/log/pkg.go create mode 100644 backend/vendor/github.com/apex/log/stack.go create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/.editorconfig create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/.gitignore create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/.travis.yml create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/AUTHORS create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/LICENSE create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/README.md create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/fen.go create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/fsnotify.go create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/inotify.go create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/inotify_poller.go create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/kqueue.go create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go create mode 100644 backend/vendor/github.com/fsnotify/fsnotify/windows.go create mode 100644 backend/vendor/github.com/gin-contrib/sse/.travis.yml create mode 100644 backend/vendor/github.com/gin-contrib/sse/LICENSE create mode 100644 backend/vendor/github.com/gin-contrib/sse/README.md create mode 100644 backend/vendor/github.com/gin-contrib/sse/sse-decoder.go create mode 100644 backend/vendor/github.com/gin-contrib/sse/sse-encoder.go create mode 100644 backend/vendor/github.com/gin-contrib/sse/writer.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/.gitignore create mode 100644 backend/vendor/github.com/gin-gonic/gin/.travis.yml create mode 100644 backend/vendor/github.com/gin-gonic/gin/AUTHORS.md create mode 100644 backend/vendor/github.com/gin-gonic/gin/BENCHMARKS.md create mode 100644 backend/vendor/github.com/gin-gonic/gin/CHANGELOG.md create mode 100644 backend/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md create mode 100644 backend/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md create mode 100644 backend/vendor/github.com/gin-gonic/gin/LICENSE create mode 100644 backend/vendor/github.com/gin-gonic/gin/Makefile create mode 100644 backend/vendor/github.com/gin-gonic/gin/README.md create mode 100644 backend/vendor/github.com/gin-gonic/gin/auth.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/binding.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/default_validator.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/form.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/form_mapping.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/json.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/msgpack.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/protobuf.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/query.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/uri.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/xml.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/binding/yaml.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/codecov.yml create mode 100644 backend/vendor/github.com/gin-gonic/gin/context.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/context_appengine.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/debug.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/deprecated.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/doc.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/errors.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/fs.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/gin.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/go.mod create mode 100644 backend/vendor/github.com/gin-gonic/gin/go.sum create mode 100644 backend/vendor/github.com/gin-gonic/gin/internal/json/json.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/logger.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/mode.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/path.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/recovery.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/data.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/html.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/json.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/msgpack.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/protobuf.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/reader.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/redirect.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/render.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/text.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/xml.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/render/yaml.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/response_writer.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/routergroup.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/test_helpers.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/tree.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/utils.go create mode 100644 backend/vendor/github.com/gin-gonic/gin/version.go create mode 100644 backend/vendor/github.com/globalsign/mgo/.gitignore create mode 100644 backend/vendor/github.com/globalsign/mgo/.travis.yml create mode 100644 backend/vendor/github.com/globalsign/mgo/CONTRIBUTING.md create mode 100644 backend/vendor/github.com/globalsign/mgo/LICENSE create mode 100644 backend/vendor/github.com/globalsign/mgo/Makefile create mode 100644 backend/vendor/github.com/globalsign/mgo/README.md create mode 100644 backend/vendor/github.com/globalsign/mgo/auth.go create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/LICENSE create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/README.md create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/bson.go create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/compatibility.go create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/decimal.go create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/decode.go create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/encode.go create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/json.go create mode 100644 backend/vendor/github.com/globalsign/mgo/bson/stream.go create mode 100644 backend/vendor/github.com/globalsign/mgo/bulk.go create mode 100644 backend/vendor/github.com/globalsign/mgo/changestreams.go create mode 100644 backend/vendor/github.com/globalsign/mgo/cluster.go create mode 100644 backend/vendor/github.com/globalsign/mgo/coarse_time.go create mode 100644 backend/vendor/github.com/globalsign/mgo/doc.go create mode 100644 backend/vendor/github.com/globalsign/mgo/gridfs.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/json/LICENSE create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/json/decode.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/json/encode.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/json/extension.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/json/fold.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/json/indent.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/json/scanner.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/json/stream.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/json/tags.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/sasl/sasl.c create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/sasl/sasl.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/sasl/sasl_windows.c create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/sasl/sasl_windows.go create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/sasl/sasl_windows.h create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/sasl/sspi_windows.c create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/sasl/sspi_windows.h create mode 100644 backend/vendor/github.com/globalsign/mgo/internal/scram/scram.go create mode 100644 backend/vendor/github.com/globalsign/mgo/log.go create mode 100644 backend/vendor/github.com/globalsign/mgo/queue.go create mode 100644 backend/vendor/github.com/globalsign/mgo/raceoff.go create mode 100644 backend/vendor/github.com/globalsign/mgo/raceon.go create mode 100644 backend/vendor/github.com/globalsign/mgo/saslimpl.go create mode 100644 backend/vendor/github.com/globalsign/mgo/saslstub.go create mode 100644 backend/vendor/github.com/globalsign/mgo/server.go create mode 100644 backend/vendor/github.com/globalsign/mgo/session.go create mode 100644 backend/vendor/github.com/globalsign/mgo/socket.go create mode 100644 backend/vendor/github.com/globalsign/mgo/stats.go create mode 100644 backend/vendor/github.com/golang/protobuf/AUTHORS create mode 100644 backend/vendor/github.com/golang/protobuf/CONTRIBUTORS create mode 100644 backend/vendor/github.com/golang/protobuf/LICENSE create mode 100644 backend/vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/deprecated.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/discard.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/table_marshal.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/table_merge.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/table_unmarshal.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/text.go create mode 100644 backend/vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 backend/vendor/github.com/gomodule/redigo/LICENSE create mode 100644 backend/vendor/github.com/gomodule/redigo/internal/commandinfo.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/conn.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/doc.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/go16.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/go17.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/go18.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/log.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/pool.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/pool17.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/pubsub.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/redis.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/reply.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/scan.go create mode 100644 backend/vendor/github.com/gomodule/redigo/redis/script.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/.gitignore create mode 100644 backend/vendor/github.com/hashicorp/hcl/.travis.yml create mode 100644 backend/vendor/github.com/hashicorp/hcl/LICENSE create mode 100644 backend/vendor/github.com/hashicorp/hcl/Makefile create mode 100644 backend/vendor/github.com/hashicorp/hcl/README.md create mode 100644 backend/vendor/github.com/hashicorp/hcl/appveyor.yml create mode 100644 backend/vendor/github.com/hashicorp/hcl/decoder.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/go.mod create mode 100644 backend/vendor/github.com/hashicorp/hcl/go.sum create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/parser/error.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/token/position.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/hcl/token/token.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/json/parser/flatten.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/json/parser/parser.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/json/token/position.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/json/token/token.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/lex.go create mode 100644 backend/vendor/github.com/hashicorp/hcl/parse.go create mode 100644 backend/vendor/github.com/json-iterator/go/.codecov.yml create mode 100644 backend/vendor/github.com/json-iterator/go/.gitignore create mode 100644 backend/vendor/github.com/json-iterator/go/.travis.yml create mode 100644 backend/vendor/github.com/json-iterator/go/Gopkg.lock create mode 100644 backend/vendor/github.com/json-iterator/go/Gopkg.toml create mode 100644 backend/vendor/github.com/json-iterator/go/LICENSE create mode 100644 backend/vendor/github.com/json-iterator/go/README.md create mode 100644 backend/vendor/github.com/json-iterator/go/adapter.go create mode 100644 backend/vendor/github.com/json-iterator/go/any.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_array.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_bool.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_float.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_int32.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_int64.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_invalid.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_nil.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_number.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_object.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_str.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_uint32.go create mode 100644 backend/vendor/github.com/json-iterator/go/any_uint64.go create mode 100644 backend/vendor/github.com/json-iterator/go/build.sh create mode 100644 backend/vendor/github.com/json-iterator/go/config.go create mode 100644 backend/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md create mode 100644 backend/vendor/github.com/json-iterator/go/iter.go create mode 100644 backend/vendor/github.com/json-iterator/go/iter_array.go create mode 100644 backend/vendor/github.com/json-iterator/go/iter_float.go create mode 100644 backend/vendor/github.com/json-iterator/go/iter_int.go create mode 100644 backend/vendor/github.com/json-iterator/go/iter_object.go create mode 100644 backend/vendor/github.com/json-iterator/go/iter_skip.go create mode 100644 backend/vendor/github.com/json-iterator/go/iter_skip_sloppy.go create mode 100644 backend/vendor/github.com/json-iterator/go/iter_skip_strict.go create mode 100644 backend/vendor/github.com/json-iterator/go/iter_str.go create mode 100644 backend/vendor/github.com/json-iterator/go/jsoniter.go create mode 100644 backend/vendor/github.com/json-iterator/go/pool.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_array.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_dynamic.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_extension.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_json_number.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_json_raw_message.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_map.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_marshaler.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_native.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_optional.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_slice.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_struct_decoder.go create mode 100644 backend/vendor/github.com/json-iterator/go/reflect_struct_encoder.go create mode 100644 backend/vendor/github.com/json-iterator/go/stream.go create mode 100644 backend/vendor/github.com/json-iterator/go/stream_float.go create mode 100644 backend/vendor/github.com/json-iterator/go/stream_int.go create mode 100644 backend/vendor/github.com/json-iterator/go/stream_str.go create mode 100644 backend/vendor/github.com/json-iterator/go/test.sh create mode 100644 backend/vendor/github.com/magiconair/properties/.gitignore create mode 100644 backend/vendor/github.com/magiconair/properties/.travis.yml create mode 100644 backend/vendor/github.com/magiconair/properties/CHANGELOG.md create mode 100644 backend/vendor/github.com/magiconair/properties/LICENSE create mode 100644 backend/vendor/github.com/magiconair/properties/README.md create mode 100644 backend/vendor/github.com/magiconair/properties/decode.go create mode 100644 backend/vendor/github.com/magiconair/properties/doc.go create mode 100644 backend/vendor/github.com/magiconair/properties/integrate.go create mode 100644 backend/vendor/github.com/magiconair/properties/lex.go create mode 100644 backend/vendor/github.com/magiconair/properties/load.go create mode 100644 backend/vendor/github.com/magiconair/properties/parser.go create mode 100644 backend/vendor/github.com/magiconair/properties/properties.go create mode 100644 backend/vendor/github.com/magiconair/properties/rangecheck.go create mode 100644 backend/vendor/github.com/mattn/go-isatty/.travis.yml create mode 100644 backend/vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 backend/vendor/github.com/mattn/go-isatty/README.md create mode 100644 backend/vendor/github.com/mattn/go-isatty/doc.go create mode 100644 backend/vendor/github.com/mattn/go-isatty/go.mod create mode 100644 backend/vendor/github.com/mattn/go-isatty/go.sum create mode 100644 backend/vendor/github.com/mattn/go-isatty/isatty_android.go create mode 100644 backend/vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 backend/vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 backend/vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 backend/vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 backend/vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 backend/vendor/github.com/mitchellh/mapstructure/.travis.yml create mode 100644 backend/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md create mode 100644 backend/vendor/github.com/mitchellh/mapstructure/LICENSE create mode 100644 backend/vendor/github.com/mitchellh/mapstructure/README.md create mode 100644 backend/vendor/github.com/mitchellh/mapstructure/decode_hooks.go create mode 100644 backend/vendor/github.com/mitchellh/mapstructure/error.go create mode 100644 backend/vendor/github.com/mitchellh/mapstructure/go.mod create mode 100644 backend/vendor/github.com/mitchellh/mapstructure/mapstructure.go create mode 100644 backend/vendor/github.com/modern-go/concurrent/.gitignore create mode 100644 backend/vendor/github.com/modern-go/concurrent/.travis.yml create mode 100644 backend/vendor/github.com/modern-go/concurrent/LICENSE create mode 100644 backend/vendor/github.com/modern-go/concurrent/README.md create mode 100644 backend/vendor/github.com/modern-go/concurrent/executor.go create mode 100644 backend/vendor/github.com/modern-go/concurrent/go_above_19.go create mode 100644 backend/vendor/github.com/modern-go/concurrent/go_below_19.go create mode 100644 backend/vendor/github.com/modern-go/concurrent/log.go create mode 100644 backend/vendor/github.com/modern-go/concurrent/test.sh create mode 100644 backend/vendor/github.com/modern-go/concurrent/unbounded_executor.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/.gitignore create mode 100644 backend/vendor/github.com/modern-go/reflect2/.travis.yml create mode 100644 backend/vendor/github.com/modern-go/reflect2/Gopkg.lock create mode 100644 backend/vendor/github.com/modern-go/reflect2/Gopkg.toml create mode 100644 backend/vendor/github.com/modern-go/reflect2/LICENSE create mode 100644 backend/vendor/github.com/modern-go/reflect2/README.md create mode 100644 backend/vendor/github.com/modern-go/reflect2/go_above_17.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/go_above_19.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/go_below_17.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/go_below_19.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/reflect2.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/reflect2_amd64.s create mode 100644 backend/vendor/github.com/modern-go/reflect2/reflect2_kind.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/relfect2_386.s create mode 100644 backend/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s create mode 100644 backend/vendor/github.com/modern-go/reflect2/relfect2_arm.s create mode 100644 backend/vendor/github.com/modern-go/reflect2/relfect2_arm64.s create mode 100644 backend/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s create mode 100644 backend/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s create mode 100644 backend/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s create mode 100644 backend/vendor/github.com/modern-go/reflect2/relfect2_s390x.s create mode 100644 backend/vendor/github.com/modern-go/reflect2/safe_field.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/safe_map.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/safe_slice.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/safe_struct.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/safe_type.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/test.sh create mode 100644 backend/vendor/github.com/modern-go/reflect2/type_map.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_array.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_eface.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_field.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_iface.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_link.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_map.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_ptr.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_slice.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_struct.go create mode 100644 backend/vendor/github.com/modern-go/reflect2/unsafe_type.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/.gitignore create mode 100644 backend/vendor/github.com/pelletier/go-toml/.travis.yml create mode 100644 backend/vendor/github.com/pelletier/go-toml/LICENSE create mode 100644 backend/vendor/github.com/pelletier/go-toml/README.md create mode 100644 backend/vendor/github.com/pelletier/go-toml/benchmark.json create mode 100644 backend/vendor/github.com/pelletier/go-toml/benchmark.sh create mode 100644 backend/vendor/github.com/pelletier/go-toml/benchmark.toml create mode 100644 backend/vendor/github.com/pelletier/go-toml/benchmark.yml create mode 100644 backend/vendor/github.com/pelletier/go-toml/doc.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/example-crlf.toml create mode 100644 backend/vendor/github.com/pelletier/go-toml/example.toml create mode 100644 backend/vendor/github.com/pelletier/go-toml/fuzz.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/fuzz.sh create mode 100644 backend/vendor/github.com/pelletier/go-toml/keysparsing.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/lexer.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/marshal.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/marshal_test.toml create mode 100644 backend/vendor/github.com/pelletier/go-toml/parser.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/position.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/test.sh create mode 100644 backend/vendor/github.com/pelletier/go-toml/token.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/toml.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/tomltree_create.go create mode 100644 backend/vendor/github.com/pelletier/go-toml/tomltree_write.go create mode 100644 backend/vendor/github.com/pkg/errors/.gitignore create mode 100644 backend/vendor/github.com/pkg/errors/.travis.yml create mode 100644 backend/vendor/github.com/pkg/errors/LICENSE create mode 100644 backend/vendor/github.com/pkg/errors/README.md create mode 100644 backend/vendor/github.com/pkg/errors/appveyor.yml create mode 100644 backend/vendor/github.com/pkg/errors/errors.go create mode 100644 backend/vendor/github.com/pkg/errors/stack.go create mode 100644 backend/vendor/github.com/robfig/cron/.gitignore create mode 100644 backend/vendor/github.com/robfig/cron/.travis.yml create mode 100644 backend/vendor/github.com/robfig/cron/LICENSE create mode 100644 backend/vendor/github.com/robfig/cron/README.md create mode 100644 backend/vendor/github.com/robfig/cron/constantdelay.go create mode 100644 backend/vendor/github.com/robfig/cron/cron.go create mode 100644 backend/vendor/github.com/robfig/cron/doc.go create mode 100644 backend/vendor/github.com/robfig/cron/parser.go create mode 100644 backend/vendor/github.com/robfig/cron/spec.go create mode 100644 backend/vendor/github.com/satori/go.uuid/.travis.yml create mode 100644 backend/vendor/github.com/satori/go.uuid/LICENSE create mode 100644 backend/vendor/github.com/satori/go.uuid/README.md create mode 100644 backend/vendor/github.com/satori/go.uuid/codec.go create mode 100644 backend/vendor/github.com/satori/go.uuid/generator.go create mode 100644 backend/vendor/github.com/satori/go.uuid/sql.go create mode 100644 backend/vendor/github.com/satori/go.uuid/uuid.go create mode 100644 backend/vendor/github.com/spf13/afero/.travis.yml create mode 100644 backend/vendor/github.com/spf13/afero/LICENSE.txt create mode 100644 backend/vendor/github.com/spf13/afero/README.md create mode 100644 backend/vendor/github.com/spf13/afero/afero.go create mode 100644 backend/vendor/github.com/spf13/afero/appveyor.yml create mode 100644 backend/vendor/github.com/spf13/afero/basepath.go create mode 100644 backend/vendor/github.com/spf13/afero/cacheOnReadFs.go create mode 100644 backend/vendor/github.com/spf13/afero/const_bsds.go create mode 100644 backend/vendor/github.com/spf13/afero/const_win_unix.go create mode 100644 backend/vendor/github.com/spf13/afero/copyOnWriteFs.go create mode 100644 backend/vendor/github.com/spf13/afero/go.mod create mode 100644 backend/vendor/github.com/spf13/afero/httpFs.go create mode 100644 backend/vendor/github.com/spf13/afero/ioutil.go create mode 100644 backend/vendor/github.com/spf13/afero/lstater.go create mode 100644 backend/vendor/github.com/spf13/afero/match.go create mode 100644 backend/vendor/github.com/spf13/afero/mem/dir.go create mode 100644 backend/vendor/github.com/spf13/afero/mem/dirmap.go create mode 100644 backend/vendor/github.com/spf13/afero/mem/file.go create mode 100644 backend/vendor/github.com/spf13/afero/memmap.go create mode 100644 backend/vendor/github.com/spf13/afero/os.go create mode 100644 backend/vendor/github.com/spf13/afero/path.go create mode 100644 backend/vendor/github.com/spf13/afero/readonlyfs.go create mode 100644 backend/vendor/github.com/spf13/afero/regexpfs.go create mode 100644 backend/vendor/github.com/spf13/afero/unionFile.go create mode 100644 backend/vendor/github.com/spf13/afero/util.go create mode 100644 backend/vendor/github.com/spf13/cast/.gitignore create mode 100644 backend/vendor/github.com/spf13/cast/.travis.yml create mode 100644 backend/vendor/github.com/spf13/cast/LICENSE create mode 100644 backend/vendor/github.com/spf13/cast/Makefile create mode 100644 backend/vendor/github.com/spf13/cast/README.md create mode 100644 backend/vendor/github.com/spf13/cast/cast.go create mode 100644 backend/vendor/github.com/spf13/cast/caste.go create mode 100644 backend/vendor/github.com/spf13/cast/go.mod create mode 100644 backend/vendor/github.com/spf13/cast/go.sum create mode 100644 backend/vendor/github.com/spf13/jwalterweatherman/.gitignore create mode 100644 backend/vendor/github.com/spf13/jwalterweatherman/LICENSE create mode 100644 backend/vendor/github.com/spf13/jwalterweatherman/README.md create mode 100644 backend/vendor/github.com/spf13/jwalterweatherman/default_notepad.go create mode 100644 backend/vendor/github.com/spf13/jwalterweatherman/go.mod create mode 100644 backend/vendor/github.com/spf13/jwalterweatherman/log_counter.go create mode 100644 backend/vendor/github.com/spf13/jwalterweatherman/notepad.go create mode 100644 backend/vendor/github.com/spf13/pflag/.gitignore create mode 100644 backend/vendor/github.com/spf13/pflag/.travis.yml create mode 100644 backend/vendor/github.com/spf13/pflag/LICENSE create mode 100644 backend/vendor/github.com/spf13/pflag/README.md create mode 100644 backend/vendor/github.com/spf13/pflag/bool.go create mode 100644 backend/vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 backend/vendor/github.com/spf13/pflag/bytes.go create mode 100644 backend/vendor/github.com/spf13/pflag/count.go create mode 100644 backend/vendor/github.com/spf13/pflag/duration.go create mode 100644 backend/vendor/github.com/spf13/pflag/duration_slice.go create mode 100644 backend/vendor/github.com/spf13/pflag/flag.go create mode 100644 backend/vendor/github.com/spf13/pflag/float32.go create mode 100644 backend/vendor/github.com/spf13/pflag/float64.go create mode 100644 backend/vendor/github.com/spf13/pflag/golangflag.go create mode 100644 backend/vendor/github.com/spf13/pflag/int.go create mode 100644 backend/vendor/github.com/spf13/pflag/int16.go create mode 100644 backend/vendor/github.com/spf13/pflag/int32.go create mode 100644 backend/vendor/github.com/spf13/pflag/int64.go create mode 100644 backend/vendor/github.com/spf13/pflag/int8.go create mode 100644 backend/vendor/github.com/spf13/pflag/int_slice.go create mode 100644 backend/vendor/github.com/spf13/pflag/ip.go create mode 100644 backend/vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 backend/vendor/github.com/spf13/pflag/ipmask.go create mode 100644 backend/vendor/github.com/spf13/pflag/ipnet.go create mode 100644 backend/vendor/github.com/spf13/pflag/string.go create mode 100644 backend/vendor/github.com/spf13/pflag/string_array.go create mode 100644 backend/vendor/github.com/spf13/pflag/string_slice.go create mode 100644 backend/vendor/github.com/spf13/pflag/string_to_int.go create mode 100644 backend/vendor/github.com/spf13/pflag/string_to_string.go create mode 100644 backend/vendor/github.com/spf13/pflag/uint.go create mode 100644 backend/vendor/github.com/spf13/pflag/uint16.go create mode 100644 backend/vendor/github.com/spf13/pflag/uint32.go create mode 100644 backend/vendor/github.com/spf13/pflag/uint64.go create mode 100644 backend/vendor/github.com/spf13/pflag/uint8.go create mode 100644 backend/vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 backend/vendor/github.com/spf13/viper/.gitignore create mode 100644 backend/vendor/github.com/spf13/viper/.travis.yml create mode 100644 backend/vendor/github.com/spf13/viper/LICENSE create mode 100644 backend/vendor/github.com/spf13/viper/README.md create mode 100644 backend/vendor/github.com/spf13/viper/flags.go create mode 100644 backend/vendor/github.com/spf13/viper/go.mod create mode 100644 backend/vendor/github.com/spf13/viper/go.sum create mode 100644 backend/vendor/github.com/spf13/viper/util.go create mode 100644 backend/vendor/github.com/spf13/viper/viper.go create mode 100644 backend/vendor/github.com/ugorji/go/LICENSE create mode 100644 backend/vendor/github.com/ugorji/go/codec/0_importpath.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/0doc.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/README.md create mode 100644 backend/vendor/github.com/ugorji/go/codec/binc.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/build.sh create mode 100644 backend/vendor/github.com/ugorji/go/codec/cbor.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/codecgen.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/decode.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/encode.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/fast-path.generated.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/fast-path.go.tmpl create mode 100644 backend/vendor/github.com/ugorji/go/codec/fast-path.not.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl create mode 100644 backend/vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl create mode 100644 backend/vendor/github.com/ugorji/go/codec/gen-enc-chan.go.tmpl create mode 100644 backend/vendor/github.com/ugorji/go/codec/gen-helper.generated.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl create mode 100644 backend/vendor/github.com/ugorji/go/codec/gen.generated.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/gen.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/helper.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/helper_internal.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/helper_unsafe.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/json.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/mammoth-test.go.tmpl create mode 100644 backend/vendor/github.com/ugorji/go/codec/mammoth2-test.go.tmpl create mode 100644 backend/vendor/github.com/ugorji/go/codec/msgpack.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/rpc.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/simple.go create mode 100644 backend/vendor/github.com/ugorji/go/codec/test-cbor-goldens.json create mode 100644 backend/vendor/github.com/ugorji/go/codec/test.py create mode 100644 backend/vendor/github.com/ugorji/go/codec/xml.go create mode 100644 backend/vendor/golang.org/x/sys/AUTHORS create mode 100644 backend/vendor/golang.org/x/sys/CONTRIBUTORS create mode 100644 backend/vendor/golang.org/x/sys/LICENSE create mode 100644 backend/vendor/golang.org/x/sys/PATENTS create mode 100644 backend/vendor/golang.org/x/sys/unix/.gitignore create mode 100644 backend/vendor/golang.org/x/sys/unix/README.md create mode 100644 backend/vendor/golang.org/x/sys/unix/affinity_linux.go create mode 100644 backend/vendor/golang.org/x/sys/unix/aliases.go create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_darwin_386.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_darwin_arm.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_freebsd_386.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_linux_386.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_linux_amd64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_linux_arm.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_linux_arm64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_linux_s390x.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_netbsd_386.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_openbsd_386.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_openbsd_arm.s create mode 100644 backend/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/bluetooth_linux.go create mode 100644 backend/vendor/golang.org/x/sys/unix/cap_freebsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/constants.go create mode 100644 backend/vendor/golang.org/x/sys/unix/dev_aix_ppc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/dev_darwin.go create mode 100644 backend/vendor/golang.org/x/sys/unix/dev_dragonfly.go create mode 100644 backend/vendor/golang.org/x/sys/unix/dev_freebsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/dev_linux.go create mode 100644 backend/vendor/golang.org/x/sys/unix/dev_netbsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/dev_openbsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/dirent.go create mode 100644 backend/vendor/golang.org/x/sys/unix/endian_big.go create mode 100644 backend/vendor/golang.org/x/sys/unix/endian_little.go create mode 100644 backend/vendor/golang.org/x/sys/unix/env_unix.go create mode 100644 backend/vendor/golang.org/x/sys/unix/errors_freebsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/errors_freebsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/fcntl.go create mode 100644 backend/vendor/golang.org/x/sys/unix/fcntl_darwin.go create mode 100644 backend/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go create mode 100644 backend/vendor/golang.org/x/sys/unix/gccgo.go create mode 100644 backend/vendor/golang.org/x/sys/unix/gccgo_c.c create mode 100644 backend/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ioctl.go create mode 100644 backend/vendor/golang.org/x/sys/unix/mkall.sh create mode 100644 backend/vendor/golang.org/x/sys/unix/mkasm_darwin.go create mode 100644 backend/vendor/golang.org/x/sys/unix/mkerrors.sh create mode 100644 backend/vendor/golang.org/x/sys/unix/mkpost.go create mode 100644 backend/vendor/golang.org/x/sys/unix/mksyscall.go create mode 100644 backend/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/mksyscall_solaris.go create mode 100644 backend/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl create mode 100644 backend/vendor/golang.org/x/sys/unix/mksysnum.go create mode 100644 backend/vendor/golang.org/x/sys/unix/openbsd_pledge.go create mode 100644 backend/vendor/golang.org/x/sys/unix/openbsd_unveil.go create mode 100644 backend/vendor/golang.org/x/sys/unix/pagesize_unix.go create mode 100644 backend/vendor/golang.org/x/sys/unix/race.go create mode 100644 backend/vendor/golang.org/x/sys/unix/race0.go create mode 100644 backend/vendor/golang.org/x/sys/unix/sockcmsg_linux.go create mode 100644 backend/vendor/golang.org/x/sys/unix/sockcmsg_unix.go create mode 100644 backend/vendor/golang.org/x/sys/unix/str.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_aix.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_bsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_darwin.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_darwin_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_dragonfly.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_freebsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_gc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_netbsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_openbsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_solaris.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_unix.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_unix_gc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go create mode 100644 backend/vendor/golang.org/x/sys/unix/timestruct.go create mode 100644 backend/vendor/golang.org/x/sys/unix/types_aix.go create mode 100644 backend/vendor/golang.org/x/sys/unix/types_darwin.go create mode 100644 backend/vendor/golang.org/x/sys/unix/types_dragonfly.go create mode 100644 backend/vendor/golang.org/x/sys/unix/types_freebsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/types_netbsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/types_openbsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/types_solaris.go create mode 100644 backend/vendor/golang.org/x/sys/unix/xattr_bsd.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zptrace386_linux.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zptracearm_linux.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zptracemips_linux.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go create mode 100644 backend/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go create mode 100644 backend/vendor/golang.org/x/text/AUTHORS create mode 100644 backend/vendor/golang.org/x/text/CONTRIBUTORS create mode 100644 backend/vendor/golang.org/x/text/LICENSE create mode 100644 backend/vendor/golang.org/x/text/PATENTS create mode 100644 backend/vendor/golang.org/x/text/transform/transform.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 backend/vendor/golang.org/x/text/unicode/norm/triegen.go create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/.gitignore create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/LICENSE create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/README.md create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/baked_in.go create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/cache.go create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/doc.go create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/logo.png create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/regexes.go create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/util.go create mode 100644 backend/vendor/gopkg.in/go-playground/validator.v8/validator.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/.travis.yml create mode 100644 backend/vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 backend/vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 backend/vendor/gopkg.in/yaml.v2/NOTICE create mode 100644 backend/vendor/gopkg.in/yaml.v2/README.md create mode 100644 backend/vendor/gopkg.in/yaml.v2/apic.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/decode.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/encode.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/go.mod create mode 100644 backend/vendor/gopkg.in/yaml.v2/go.sum create mode 100644 backend/vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 backend/vendor/gopkg.in/yaml.v2/yamlprivateh.go create mode 100644 backend/vendor/modules.txt diff --git a/backend b/backend deleted file mode 160000 index 6e4a8ab1..00000000 --- a/backend +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 6e4a8ab152f168874c6c769dbe0263f1d21b8613 diff --git a/backend/Dockerfile b/backend/Dockerfile new file mode 100644 index 00000000..49e86de1 --- /dev/null +++ b/backend/Dockerfile @@ -0,0 +1,19 @@ +FROM golang:1.12-alpine AS build + +WORKDIR /go/src/app +COPY . . + +ENV GO111MODULE on +ENV GOPROXY https://mirrors.aliyun.com/goproxy/ + +RUN go mod vendor +RUN go install -v ./... + +FROM alpine:latest +WORKDIR /root +COPY --from=build /go/src/app . +COPY --from=build /go/bin/crawlab /usr/local/bin +RUN ls + +EXPOSE 8000 +CMD ["crawlab"] diff --git a/backend/Makefile b/backend/Makefile new file mode 100644 index 00000000..a2fb5ef1 --- /dev/null +++ b/backend/Makefile @@ -0,0 +1,32 @@ +SHELL := /bin/bash +BASEDIR = $(shell pwd) + +# build with verison infos +versionDir = "apiserver/pkg/version" +gitTag = $(shell if [ "`git describe --tags --abbrev=0 2>/dev/null`" != "" ];then git describe --tags --abbrev=0; else git log --pretty=format:'%h' -n 1; fi) +buildDate = $(shell TZ=Asia/Shanghai date +%FT%T%z) +gitCommit = $(shell git log --pretty=format:'%H' -n 1) +gitTreeState = $(shell if git status|grep -q 'clean';then echo clean; else echo dirty; fi) + +ldflags="-w -X ${versionDir}.gitTag=${gitTag} -X ${versionDir}.buildDate=${buildDate} -X ${versionDir}.gitCommit=${gitCommit} -X ${versionDir}.gitTreeState=${gitTreeState}" + +all: gotool + @go build -v -ldflags ${ldflags} . +clean: + rm -f apiserver + find . -name "[._]*.s[a-w][a-z]" | xargs -i rm -f {} +gotool: + gofmt -w . + go tool vet . |& grep -v vendor;true +ca: + openssl req -new -nodes -x509 -out conf/server.crt -keyout conf/server.key -days 3650 -subj "/C=DE/ST=NRW/L=Earth/O=Random Company/OU=IT/CN=127.0.0.1/emailAddress=xxxxx@qq.com" + +help: + @echo "make - compile the source code" + @echo "make clean - remove binary file and vim swp files" + @echo "make gotool - run go tool 'fmt' and 'vet'" + @echo "make ca - generate ca files" + +.PHONY: clean gotool ca help + + diff --git a/backend/README.md b/backend/README.md new file mode 100644 index 00000000..bd6eb122 --- /dev/null +++ b/backend/README.md @@ -0,0 +1,3 @@ +# crawlab-backend + +Backend (Golang) for Crawlab \ No newline at end of file diff --git a/backend/conf/config.yml b/backend/conf/config.yml new file mode 100644 index 00000000..8170b994 --- /dev/null +++ b/backend/conf/config.yml @@ -0,0 +1,20 @@ +mongo: + host: localhost + port: 27017 + db: crawlab_test +redis: + network: tcp + address: "localhost:6379" +log: + level: info + path: "/var/logs/crawlab" +server: + host: 0.0.0.0 + port: 8000 + master: "Y" +spider: + path: "/Users/yeqing/projects/crawlab/spiders" +task: + workers: 4 +other: + tmppath: "/tmp" diff --git a/backend/config/config.go b/backend/config/config.go new file mode 100644 index 00000000..4d83c0f7 --- /dev/null +++ b/backend/config/config.go @@ -0,0 +1,55 @@ +package config + +import ( + "github.com/fsnotify/fsnotify" + "github.com/spf13/viper" + "log" + "strings" +) + +type Config struct { + Name string +} + +// 监控配置文件变化并热加载程序 +func (c *Config) WatchConfig() { + viper.WatchConfig() + viper.OnConfigChange(func(e fsnotify.Event) { + log.Printf("Config file changed: %s", e.Name) + }) +} + +func (c *Config) Init() error { + if c.Name != "" { + viper.SetConfigFile(c.Name) // 如果指定了配置文件,则解析指定的配置文件 + } else { + viper.AddConfigPath("./conf") // 如果没有指定配置文件,则解析默认的配置文件 + viper.SetConfigName("config") + } + viper.SetConfigType("yaml") // 设置配置文件格式为YAML + viper.AutomaticEnv() // 读取匹配的环境变量 + viper.SetEnvPrefix("CRAWLAB") // 读取环境变量的前缀为APISERVER + replacer := strings.NewReplacer(".", "_") + viper.SetEnvKeyReplacer(replacer) + if err := viper.ReadInConfig(); err != nil { // viper解析配置文件 + return err + } + + return nil +} + +func InitConfig(cfg string) error { + c := Config{ + Name: cfg, + } + + // 初始化配置文件 + if err := c.Init(); err != nil { + return err + } + + // 监控配置文件变化并热加载程序 + c.WatchConfig() + + return nil +} diff --git a/backend/constants/message.go b/backend/constants/message.go new file mode 100644 index 00000000..34974e1e --- /dev/null +++ b/backend/constants/message.go @@ -0,0 +1,7 @@ +package constants + +const ( + MsgTypeGetLog = "get-log" + MsgTypeGetEnv = "get-env" + MsgTypeCancelTask = "cancel-task" +) diff --git a/backend/constants/model.go b/backend/constants/model.go new file mode 100644 index 00000000..da66b15f --- /dev/null +++ b/backend/constants/model.go @@ -0,0 +1,6 @@ +package constants + +const ( + ObjectIdNull = "000000000000000000000000" + Infinite = 999999999 +) diff --git a/backend/constants/node.go b/backend/constants/node.go new file mode 100644 index 00000000..29b0b7c1 --- /dev/null +++ b/backend/constants/node.go @@ -0,0 +1,6 @@ +package constants + +const ( + StatusOnline = "online" + StatusOffline = "offline" +) diff --git a/backend/constants/spider.go b/backend/constants/spider.go new file mode 100644 index 00000000..b4b7f65e --- /dev/null +++ b/backend/constants/spider.go @@ -0,0 +1,6 @@ +package constants + +const ( + Customized = "customized" + Configurable = "configurable" +) diff --git a/backend/constants/system.go b/backend/constants/system.go new file mode 100644 index 00000000..59c39787 --- /dev/null +++ b/backend/constants/system.go @@ -0,0 +1,7 @@ +package constants + +const ( + Windows = "windows" + Linux = "linux" + Darwin = "darwin" +) diff --git a/backend/constants/task.go b/backend/constants/task.go new file mode 100644 index 00000000..5eeee967 --- /dev/null +++ b/backend/constants/task.go @@ -0,0 +1,14 @@ +package constants + +const ( + StatusPending string = "pending" + StatusRunning string = "running" + StatusFinished string = "finished" + StatusError string = "error" + StatusCancelled string = "cancelled" +) + +const ( + TaskFinish string = "finish" + TaskCancel string = "cancel" +) diff --git a/backend/database/mongo.go b/backend/database/mongo.go new file mode 100644 index 00000000..ecf3ba22 --- /dev/null +++ b/backend/database/mongo.go @@ -0,0 +1,45 @@ +package database + +import ( + "github.com/globalsign/mgo" + "github.com/spf13/viper" +) + +var Session *mgo.Session + +func GetSession() *mgo.Session { + return Session.Copy() +} + +func GetDb() (*mgo.Session, *mgo.Database) { + s := GetSession() + return s, s.DB(viper.GetString("mongo.db")) +} + +func GetCol(collectionName string) (*mgo.Session, *mgo.Collection) { + s := GetSession() + db := s.DB(viper.GetString("mongo.db")) + col := db.C(collectionName) + return s, col +} + +func GetGridFs(prefix string) (*mgo.Session, *mgo.GridFS) { + s, db := GetDb() + gf := db.GridFS(prefix) + return s, gf +} + +func InitMongo() error { + var mongoHost = viper.GetString("mongo.host") + var mongoPort = viper.GetString("mongo.port") + var mongoDb = viper.GetString("mongo.db") + + if Session == nil { + sess, err := mgo.Dial("mongodb://" + mongoHost + ":" + mongoPort + "/" + mongoDb) + if err != nil { + return err + } + Session = sess + } + return nil +} diff --git a/backend/database/pubsub.go b/backend/database/pubsub.go new file mode 100644 index 00000000..4570e7b4 --- /dev/null +++ b/backend/database/pubsub.go @@ -0,0 +1,72 @@ +package database + +import ( + "fmt" + "github.com/apex/log" + "github.com/gomodule/redigo/redis" + "unsafe" +) + +type SubscribeCallback func(channel, message string) + +type Subscriber struct { + client redis.PubSubConn + cbMap map[string]SubscribeCallback +} + +func (c *Subscriber) Connect() { + conn, err := GetRedisConn() + if err != nil { + log.Fatalf("redis dial failed.") + } + + c.client = redis.PubSubConn{Conn: conn} + c.cbMap = make(map[string]SubscribeCallback) + + go func() { + for { + log.Debug("wait...") + switch res := c.client.Receive().(type) { + case redis.Message: + channel := (*string)(unsafe.Pointer(&res.Channel)) + message := (*string)(unsafe.Pointer(&res.Data)) + c.cbMap[*channel](*channel, *message) + case redis.Subscription: + fmt.Printf("%s: %s %d\n", res.Channel, res.Kind, res.Count) + case error: + log.Error("error handle...") + continue + } + } + }() + +} + +func (c *Subscriber) Close() { + err := c.client.Close() + if err != nil { + log.Errorf("redis close error.") + } +} + +func (c *Subscriber) Subscribe(channel interface{}, cb SubscribeCallback) { + err := c.client.Subscribe(channel) + if err != nil { + log.Fatalf("redis Subscribe error.") + } + + c.cbMap[channel.(string)] = cb +} + +func Publish(channel string, msg string) error { + c, err := GetRedisConn() + if err != nil { + return err + } + + if _, err := c.Do("PUBLISH", channel, msg); err != nil { + return err + } + + return nil +} diff --git a/backend/database/redis.go b/backend/database/redis.go new file mode 100644 index 00000000..17382122 --- /dev/null +++ b/backend/database/redis.go @@ -0,0 +1,119 @@ +package database + +import ( + "github.com/gomodule/redigo/redis" + "github.com/spf13/viper" + "runtime/debug" +) + +var RedisClient = Redis{} + +type ConsumeFunc func(channel string, message []byte) error + +type Redis struct { +} + +func (r *Redis) RPush(collection string, value interface{}) error { + c, err := GetRedisConn() + if err != nil { + debug.PrintStack() + return err + } + defer c.Close() + + if _, err := c.Do("RPUSH", collection, value); err != nil { + debug.PrintStack() + return err + } + return nil +} + +func (r *Redis) LPop(collection string) (string, error) { + c, err := GetRedisConn() + if err != nil { + debug.PrintStack() + return "", err + } + defer c.Close() + + value, err2 := redis.String(c.Do("LPOP", collection)) + if err2 != nil { + return value, err2 + } + return value, nil +} + +func (r *Redis) HSet(collection string, key string, value string) error { + c, err := GetRedisConn() + if err != nil { + debug.PrintStack() + return err + } + defer c.Close() + + if _, err := c.Do("HSET", collection, key, value); err != nil { + debug.PrintStack() + return err + } + return nil +} + +func (r *Redis) HGet(collection string, key string) (string, error) { + c, err := GetRedisConn() + if err != nil { + debug.PrintStack() + return "", err + } + defer c.Close() + + value, err2 := redis.String(c.Do("HGET", collection, key)) + if err2 != nil { + return value, err2 + } + return value, nil +} + +func (r *Redis) HDel(collection string, key string) error { + c, err := GetRedisConn() + if err != nil { + debug.PrintStack() + return err + } + defer c.Close() + + if _, err := c.Do("HDEL", collection, key); err != nil { + return err + } + return nil +} + +func (r *Redis) HKeys(collection string) ([]string, error) { + c, err := GetRedisConn() + if err != nil { + debug.PrintStack() + return []string{}, err + } + defer c.Close() + + value, err2 := redis.Strings(c.Do("HKeys", collection)) + if err2 != nil { + return []string{}, err2 + } + return value, nil +} + +func GetRedisConn() (redis.Conn, error) { + c, err := redis.Dial( + viper.GetString("redis.network"), + viper.GetString("redis.address"), + ) + if err != nil { + debug.PrintStack() + return c, err + } + return c, nil +} + +func InitRedis() error { + return nil +} diff --git a/backend/dump.rdb b/backend/dump.rdb new file mode 100644 index 0000000000000000000000000000000000000000..6f8d62d29bd347cf8cb196b5852bbd9f5961fec1 GIT binary patch literal 155 zcmWG?b@2=~FfcIt$H2mxm!Fba%qW;>Vr80YWoBTNVq}$MX_b=XkX@~mo0zO*rGz1+ zq@$Es0F*E^H_|gQ&@(j9GXe_bCKi{Z7AaYk6qTmxD3unZB$lMcmlOll85tN_>Kd5q o8X1Kc8d(_|S{a$@85kIuSr}^@SXdbtDAoSA{lxkEoA|$T0MiRBZvX%Q literal 0 HcmV?d00001 diff --git a/backend/go.mod b/backend/go.mod new file mode 100644 index 00000000..9e8515d9 --- /dev/null +++ b/backend/go.mod @@ -0,0 +1,14 @@ +module crawlab + +go 1.12 + +require ( + github.com/apex/log v1.1.1 + github.com/fsnotify/fsnotify v1.4.7 + github.com/gin-gonic/gin v1.4.0 + github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 + github.com/gomodule/redigo v2.0.0+incompatible + github.com/pkg/errors v0.8.1 + github.com/satori/go.uuid v1.2.0 + github.com/spf13/viper v1.4.0 +) diff --git a/backend/go.sum b/backend/go.sum new file mode 100644 index 00000000..9f763036 --- /dev/null +++ b/backend/go.sum @@ -0,0 +1,193 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apex/log v1.1.1 h1:BwhRZ0qbjYtTob0I+2M+smavV0kOC8XgcnGZcyL9liA= +github.com/apex/log v1.1.1/go.mod h1:Ls949n1HFtXfbDcjiTTFQqkVUrte0puoIBfO3SVgwOA= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0 h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/backend/lib/cron/.gitignore b/backend/lib/cron/.gitignore new file mode 100644 index 00000000..00268614 --- /dev/null +++ b/backend/lib/cron/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/backend/lib/cron/.travis.yml b/backend/lib/cron/.travis.yml new file mode 100644 index 00000000..4f2ee4d9 --- /dev/null +++ b/backend/lib/cron/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/backend/lib/cron/LICENSE b/backend/lib/cron/LICENSE new file mode 100644 index 00000000..3a0f627f --- /dev/null +++ b/backend/lib/cron/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2012 Rob Figueiredo +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/backend/lib/cron/README.md b/backend/lib/cron/README.md new file mode 100644 index 00000000..979f71e6 --- /dev/null +++ b/backend/lib/cron/README.md @@ -0,0 +1,125 @@ +[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) +[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) + +# cron + +Cron V3 has been released! + +To download the specific tagged release, run: + + go get github.com/robfig/cron/v3@v3.0.0 + +Import it in your program as: + + import "github.com/robfig/cron/v3" + +It requires Go 1.11 or later due to usage of Go Modules. + +Refer to the documentation here: +http://godoc.org/github.com/robfig/cron + +The rest of this document describes the the advances in v3 and a list of +breaking changes for users that wish to upgrade from an earlier version. + +## Upgrading to v3 (June 2019) + +cron v3 is a major upgrade to the library that addresses all outstanding bugs, +feature requests, and rough edges. It is based on a merge of master which +contains various fixes to issues found over the years and the v2 branch which +contains some backwards-incompatible features like the ability to remove cron +jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like +the timezone support, and fixes a number of bugs. + +New features: + +- Support for Go modules. Callers must now import this library as + `github.com/robfig/cron/v3`, instead of `gopkg.in/...` + +- Fixed bugs: + - 0f01e6b parser: fix combining of Dow and Dom (#70) + - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157) + - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144) + - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97) + - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206) + +- Standard cron spec parsing by default (first field is "minute"), with an easy + way to opt into the seconds field (quartz-compatible). Although, note that the + year field (optional in Quartz) is not supported. + +- Extensible, key/value logging via an interface that complies with + the https://github.com/go-logr/logr project. + +- The new Chain & JobWrapper types allow you to install "interceptors" to add + cross-cutting behavior like the following: + - Recover any panics from jobs + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + - Notification when jobs are completed + +It is backwards incompatible with both v1 and v2. These updates are required: + +- The v1 branch accepted an optional seconds field at the beginning of the cron + spec. This is non-standard and has led to a lot of confusion. The new default + parser conforms to the standard as described by [the Cron wikipedia page]. + + UPDATING: To retain the old behavior, construct your Cron with a custom + parser: + + // Seconds field, required + cron.New(cron.WithSeconds()) + + // Seconds field, optional + cron.New( + cron.WithParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)) + +- The Cron type now accepts functional options on construction rather than the + previous ad-hoc behavior modification mechanisms (setting a field, calling a setter). + + UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be + updated to provide those values on construction. + +- CRON_TZ is now the recommended way to specify the timezone of a single + schedule, which is sanctioned by the specification. The legacy "TZ=" prefix + will continue to be supported since it is unambiguous and easy to do so. + + UPDATING: No update is required. + +- By default, cron will no longer recover panics in jobs that it runs. + Recovering can be surprising (see issue #192) and seems to be at odds with + typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option + has been removed to accommodate the more general JobWrapper type. + + UPDATING: To opt into panic recovery and configure the panic logger: + + cron.New(cron.WithChain( + cron.Recover(logger), // or use cron.DefaultLogger + )) + +- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was + removed, since it is duplicative with the leveled logging. + + UPDATING: Callers should use `WithLogger` and specify a logger that does not + discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`: + + cron.New( + cron.WithLogger(cron.VerbosePrintfLogger(logger))) + + +### Background - Cron spec format + +There are two cron spec formats in common usage: + +- The "standard" cron format, described on [the Cron wikipedia page] and used by + the cron Linux system utility. + +- The cron format used by [the Quartz Scheduler], commonly used for scheduled + jobs in Java software + +[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron +[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html + +The original version of this package included an optional "seconds" field, which +made it incompatible with both of these formats. Now, the "standard" format is +the default format accepted, and the Quartz format is opt-in. diff --git a/backend/lib/cron/chain.go b/backend/lib/cron/chain.go new file mode 100644 index 00000000..118e5bbe --- /dev/null +++ b/backend/lib/cron/chain.go @@ -0,0 +1,92 @@ +package cron + +import ( + "fmt" + "runtime" + "sync" + "time" +) + +// JobWrapper decorates the given Job with some behavior. +type JobWrapper func(Job) Job + +// Chain is a sequence of JobWrappers that decorates submitted jobs with +// cross-cutting behaviors like logging or synchronization. +type Chain struct { + wrappers []JobWrapper +} + +// NewChain returns a Chain consisting of the given JobWrappers. +func NewChain(c ...JobWrapper) Chain { + return Chain{c} +} + +// Then decorates the given job with all JobWrappers in the chain. +// +// This: +// NewChain(m1, m2, m3).Then(job) +// is equivalent to: +// m1(m2(m3(job))) +func (c Chain) Then(j Job) Job { + for i := range c.wrappers { + j = c.wrappers[len(c.wrappers)-i-1](j) + } + return j +} + +// Recover panics in wrapped jobs and log them with the provided logger. +func Recover(logger Logger) JobWrapper { + return func(j Job) Job { + return FuncJob(func() { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err, ok := r.(error) + if !ok { + err = fmt.Errorf("%v", r) + } + logger.Error(err, "panic", "stack", "...\n"+string(buf)) + } + }() + j.Run() + }) + } +} + +// DelayIfStillRunning serializes jobs, delaying subsequent runs until the +// previous one is complete. Jobs running after a delay of more than a minute +// have the delay logged at Info. +func DelayIfStillRunning(logger Logger) JobWrapper { + return func(j Job) Job { + var mu sync.Mutex + return FuncJob(func() { + start := time.Now() + mu.Lock() + defer mu.Unlock() + if dur := time.Since(start); dur > time.Minute { + logger.Info("delay", "duration", dur) + } + j.Run() + }) + } +} + +// SkipIfStillRunning skips an invocation of the Job if a previous invocation is +// still running. It logs skips to the given logger at Info level. +func SkipIfStillRunning(logger Logger) JobWrapper { + var ch = make(chan struct{}, 1) + ch <- struct{}{} + return func(j Job) Job { + return FuncJob(func() { + select { + case v := <-ch: + j.Run() + ch <- v + default: + logger.Info("skip") + } + }) + } +} diff --git a/backend/lib/cron/chain_test.go b/backend/lib/cron/chain_test.go new file mode 100644 index 00000000..2561bd7f --- /dev/null +++ b/backend/lib/cron/chain_test.go @@ -0,0 +1,221 @@ +package cron + +import ( + "io/ioutil" + "log" + "reflect" + "sync" + "testing" + "time" +) + +func appendingJob(slice *[]int, value int) Job { + var m sync.Mutex + return FuncJob(func() { + m.Lock() + *slice = append(*slice, value) + m.Unlock() + }) +} + +func appendingWrapper(slice *[]int, value int) JobWrapper { + return func(j Job) Job { + return FuncJob(func() { + appendingJob(slice, value).Run() + j.Run() + }) + } +} + +func TestChain(t *testing.T) { + var nums []int + var ( + append1 = appendingWrapper(&nums, 1) + append2 = appendingWrapper(&nums, 2) + append3 = appendingWrapper(&nums, 3) + append4 = appendingJob(&nums, 4) + ) + NewChain(append1, append2, append3).Then(append4).Run() + if !reflect.DeepEqual(nums, []int{1, 2, 3, 4}) { + t.Error("unexpected order of calls:", nums) + } +} + +func TestChainRecover(t *testing.T) { + panickingJob := FuncJob(func() { + panic("panickingJob panics") + }) + + t.Run("panic exits job by default", func(t *testing.T) { + defer func() { + if err := recover(); err == nil { + t.Errorf("panic expected, but none received") + } + }() + NewChain().Then(panickingJob). + Run() + }) + + t.Run("Recovering JobWrapper recovers", func(t *testing.T) { + NewChain(Recover(PrintfLogger(log.New(ioutil.Discard, "", 0)))). + Then(panickingJob). + Run() + }) + + t.Run("composed with the *IfStillRunning wrappers", func(t *testing.T) { + NewChain(Recover(PrintfLogger(log.New(ioutil.Discard, "", 0)))). + Then(panickingJob). + Run() + }) +} + +type countJob struct { + m sync.Mutex + started int + done int + delay time.Duration +} + +func (j *countJob) Run() { + j.m.Lock() + j.started++ + j.m.Unlock() + time.Sleep(j.delay) + j.m.Lock() + j.done++ + j.m.Unlock() +} + +func (j *countJob) Started() int { + defer j.m.Unlock() + j.m.Lock() + return j.started +} + +func (j *countJob) Done() int { + defer j.m.Unlock() + j.m.Lock() + return j.done +} + +func TestChainDelayIfStillRunning(t *testing.T) { + + t.Run("runs immediately", func(t *testing.T) { + var j countJob + wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger)).Then(&j) + go wrappedJob.Run() + time.Sleep(2 * time.Millisecond) // Give the job 2ms to complete. + if c := j.Done(); c != 1 { + t.Errorf("expected job run once, immediately, got %d", c) + } + }) + + t.Run("second run immediate if first done", func(t *testing.T) { + var j countJob + wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger)).Then(&j) + go func() { + go wrappedJob.Run() + time.Sleep(time.Millisecond) + go wrappedJob.Run() + }() + time.Sleep(3 * time.Millisecond) // Give both jobs 3ms to complete. + if c := j.Done(); c != 2 { + t.Errorf("expected job run twice, immediately, got %d", c) + } + }) + + t.Run("second run delayed if first not done", func(t *testing.T) { + var j countJob + j.delay = 10 * time.Millisecond + wrappedJob := NewChain(DelayIfStillRunning(DiscardLogger)).Then(&j) + go func() { + go wrappedJob.Run() + time.Sleep(time.Millisecond) + go wrappedJob.Run() + }() + + // After 5ms, the first job is still in progress, and the second job was + // run but should be waiting for it to finish. + time.Sleep(5 * time.Millisecond) + started, done := j.Started(), j.Done() + if started != 1 || done != 0 { + t.Error("expected first job started, but not finished, got", started, done) + } + + // Verify that the second job completes. + time.Sleep(25 * time.Millisecond) + started, done = j.Started(), j.Done() + if started != 2 || done != 2 { + t.Error("expected both jobs done, got", started, done) + } + }) + +} + +func TestChainSkipIfStillRunning(t *testing.T) { + + t.Run("runs immediately", func(t *testing.T) { + var j countJob + wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j) + go wrappedJob.Run() + time.Sleep(2 * time.Millisecond) // Give the job 2ms to complete. + if c := j.Done(); c != 1 { + t.Errorf("expected job run once, immediately, got %d", c) + } + }) + + t.Run("second run immediate if first done", func(t *testing.T) { + var j countJob + wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j) + go func() { + go wrappedJob.Run() + time.Sleep(time.Millisecond) + go wrappedJob.Run() + }() + time.Sleep(3 * time.Millisecond) // Give both jobs 3ms to complete. + if c := j.Done(); c != 2 { + t.Errorf("expected job run twice, immediately, got %d", c) + } + }) + + t.Run("second run skipped if first not done", func(t *testing.T) { + var j countJob + j.delay = 10 * time.Millisecond + wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j) + go func() { + go wrappedJob.Run() + time.Sleep(time.Millisecond) + go wrappedJob.Run() + }() + + // After 5ms, the first job is still in progress, and the second job was + // aleady skipped. + time.Sleep(5 * time.Millisecond) + started, done := j.Started(), j.Done() + if started != 1 || done != 0 { + t.Error("expected first job started, but not finished, got", started, done) + } + + // Verify that the first job completes and second does not run. + time.Sleep(25 * time.Millisecond) + started, done = j.Started(), j.Done() + if started != 1 || done != 1 { + t.Error("expected second job skipped, got", started, done) + } + }) + + t.Run("skip 10 jobs on rapid fire", func(t *testing.T) { + var j countJob + j.delay = 10 * time.Millisecond + wrappedJob := NewChain(SkipIfStillRunning(DiscardLogger)).Then(&j) + for i := 0; i < 11; i++ { + go wrappedJob.Run() + } + time.Sleep(200 * time.Millisecond) + done := j.Done() + if done != 1 { + t.Error("expected 1 jobs executed, 10 jobs dropped, got", done) + } + }) + +} diff --git a/backend/lib/cron/constantdelay.go b/backend/lib/cron/constantdelay.go new file mode 100644 index 00000000..cd6e7b1b --- /dev/null +++ b/backend/lib/cron/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/backend/lib/cron/constantdelay_test.go b/backend/lib/cron/constantdelay_test.go new file mode 100644 index 00000000..f43a58ad --- /dev/null +++ b/backend/lib/cron/constantdelay_test.go @@ -0,0 +1,54 @@ +package cron + +import ( + "testing" + "time" +) + +func TestConstantDelayNext(t *testing.T) { + tests := []struct { + time string + delay time.Duration + expected string + }{ + // Simple cases + {"Mon Jul 9 14:45 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"}, + {"Mon Jul 9 14:59 2012", 15 * time.Minute, "Mon Jul 9 15:14 2012"}, + {"Mon Jul 9 14:59:59 2012", 15 * time.Minute, "Mon Jul 9 15:14:59 2012"}, + + // Wrap around hours + {"Mon Jul 9 15:45 2012", 35 * time.Minute, "Mon Jul 9 16:20 2012"}, + + // Wrap around days + {"Mon Jul 9 23:46 2012", 14 * time.Minute, "Tue Jul 10 00:00 2012"}, + {"Mon Jul 9 23:45 2012", 35 * time.Minute, "Tue Jul 10 00:20 2012"}, + {"Mon Jul 9 23:35:51 2012", 44*time.Minute + 24*time.Second, "Tue Jul 10 00:20:15 2012"}, + {"Mon Jul 9 23:35:51 2012", 25*time.Hour + 44*time.Minute + 24*time.Second, "Thu Jul 11 01:20:15 2012"}, + + // Wrap around months + {"Mon Jul 9 23:35 2012", 91*24*time.Hour + 25*time.Minute, "Thu Oct 9 00:00 2012"}, + + // Wrap around minute, hour, day, month, and year + {"Mon Dec 31 23:59:45 2012", 15 * time.Second, "Tue Jan 1 00:00:00 2013"}, + + // Round to nearest second on the delay + {"Mon Jul 9 14:45 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"}, + + // Round up to 1 second if the duration is less. + {"Mon Jul 9 14:45:00 2012", 15 * time.Millisecond, "Mon Jul 9 14:45:01 2012"}, + + // Round to nearest second when calculating the next time. + {"Mon Jul 9 14:45:00.005 2012", 15 * time.Minute, "Mon Jul 9 15:00 2012"}, + + // Round to nearest second for both. + {"Mon Jul 9 14:45:00.005 2012", 15*time.Minute + 50*time.Nanosecond, "Mon Jul 9 15:00 2012"}, + } + + for _, c := range tests { + actual := Every(c.delay).Next(getTime(c.time)) + expected := getTime(c.expected) + if actual != expected { + t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.delay, expected, actual) + } + } +} diff --git a/backend/lib/cron/cron.go b/backend/lib/cron/cron.go new file mode 100644 index 00000000..f6e451db --- /dev/null +++ b/backend/lib/cron/cron.go @@ -0,0 +1,350 @@ +package cron + +import ( + "context" + "sort" + "sync" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + chain Chain + stop chan struct{} + add chan *Entry + remove chan EntryID + snapshot chan chan []Entry + running bool + logger Logger + runningMu sync.Mutex + location *time.Location + parser Parser + nextID EntryID + jobWaiter sync.WaitGroup +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// Schedule describes a job's duty cycle. +type Schedule interface { + // Next returns the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// EntryID identifies an entry within a Cron instance +type EntryID int + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // ID is the cron-assigned ID of this entry, which may be used to look up a + // snapshot or remove it. + ID EntryID + + // Schedule on which this job should be run. + Schedule Schedule + + // Next time the job will run, or the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // Prev is the last time this job was run, or the zero time if never. + Prev time.Time + + // WrappedJob is the thing to run when the Schedule is activated. + WrappedJob Job + + // Job is the thing that was submitted to cron. + // It is kept around so that user code that needs to get at the job later, + // e.g. via Entries() can do so. + Job Job +} + +// Valid returns true if this is not the zero entry. +func (e Entry) Valid() bool { return e.ID != 0 } + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, modified by the given options. +// +// Available Settings +// +// Time Zone +// Description: The time zone in which schedules are interpreted +// Default: time.Local +// +// Parser +// Description: Parser converts cron spec strings into cron.Schedules. +// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron +// +// Chain +// Description: Wrap submitted jobs to customize behavior. +// Default: A chain that recovers panics and logs them to stderr. +// +// See "cron.With*" to modify the default behavior. +func New(opts ...Option) *Cron { + c := &Cron{ + entries: nil, + chain: NewChain(), + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan chan []Entry), + remove: make(chan EntryID), + running: false, + runningMu: sync.Mutex{}, + logger: DefaultLogger, + location: time.Local, + parser: standardParser, + } + for _, opt := range opts { + opt(c) + } + return c +} + +// FuncJob is a wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { + schedule, err := c.parser.Parse(spec) + if err != nil { + return 0, err + } + return c.Schedule(schedule, cmd), nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +// The job is wrapped with the configured Chain. +func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { + c.runningMu.Lock() + defer c.runningMu.Unlock() + c.nextID++ + entry := &Entry{ + ID: c.nextID, + Schedule: schedule, + WrappedJob: c.chain.Then(cmd), + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + } else { + c.add <- entry + } + return entry.ID +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []Entry { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + replyChan := make(chan []Entry, 1) + c.snapshot <- replyChan + return <-replyChan + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Entry returns a snapshot of the given entry, or nil if it couldn't be found. +func (c *Cron) Entry(id EntryID) Entry { + for _, entry := range c.Entries() { + if id == entry.ID { + return entry + } + } + return Entry{} +} + +// Remove an entry from being run in the future. +func (c *Cron) Remove(id EntryID) { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.remove <- id + } else { + c.removeEntry(id) + } +} + +// Start the cron scheduler in its own goroutine, or no-op if already started. +func (c *Cron) Start() { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + c.runningMu.Lock() + if c.running { + c.runningMu.Unlock() + return + } + c.running = true + c.runningMu.Unlock() + c.run() +} + +// run the scheduler.. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + c.logger.Info("start") + + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + c.logger.Info("wake", "now", now) + + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + c.startJob(e.WrappedJob) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next) + + case replyChan := <-c.snapshot: + replyChan <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + c.logger.Info("stop") + return + + case id := <-c.remove: + timer.Stop() + now = c.now() + c.removeEntry(id) + c.logger.Info("removed", "entry", id) + } + + break + } + } +} + +// startJob runs the given job in a new goroutine. +func (c *Cron) startJob(j Job) { + c.jobWaiter.Add(1) + go func() { + defer c.jobWaiter.Done() + j.Run() + }() +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +// A context is returned so the caller can wait for running jobs to complete. +func (c *Cron) Stop() context.Context { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.stop <- struct{}{} + c.running = false + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + c.jobWaiter.Wait() + cancel() + }() + return ctx +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []Entry { + var entries = make([]Entry, len(c.entries)) + for i, e := range c.entries { + entries[i] = *e + } + return entries +} + +func (c *Cron) removeEntry(id EntryID) { + var entries []*Entry + for _, e := range c.entries { + if e.ID != id { + entries = append(entries, e) + } + } + c.entries = entries +} diff --git a/backend/lib/cron/cron_test.go b/backend/lib/cron/cron_test.go new file mode 100644 index 00000000..36f06bf7 --- /dev/null +++ b/backend/lib/cron/cron_test.go @@ -0,0 +1,702 @@ +package cron + +import ( + "bytes" + "fmt" + "log" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +// Many tests schedule a job for every second, and then wait at most a second +// for it to run. This amount is just slightly larger than 1 second to +// compensate for a few milliseconds of runtime. +const OneSecond = 1*time.Second + 50*time.Millisecond + +type syncWriter struct { + wr bytes.Buffer + m sync.Mutex +} + +func (sw *syncWriter) Write(data []byte) (n int, err error) { + sw.m.Lock() + n, err = sw.wr.Write(data) + sw.m.Unlock() + return +} + +func (sw *syncWriter) String() string { + sw.m.Lock() + defer sw.m.Unlock() + return sw.wr.String() +} + +func newBufLogger(sw *syncWriter) Logger { + return PrintfLogger(log.New(sw, "", log.LstdFlags)) +} + +func TestFuncPanicRecovery(t *testing.T) { + var buf syncWriter + cron := New(WithParser(secondParser), + WithChain(Recover(newBufLogger(&buf)))) + cron.Start() + defer cron.Stop() + cron.AddFunc("* * * * * ?", func() { + panic("YOLO") + }) + + select { + case <-time.After(OneSecond): + if !strings.Contains(buf.String(), "YOLO") { + t.Error("expected a panic to be logged, got none") + } + return + } +} + +type DummyJob struct{} + +func (d DummyJob) Run() { + panic("YOLO") +} + +func TestJobPanicRecovery(t *testing.T) { + var job DummyJob + + var buf syncWriter + cron := New(WithParser(secondParser), + WithChain(Recover(newBufLogger(&buf)))) + cron.Start() + defer cron.Stop() + cron.AddJob("* * * * * ?", job) + + select { + case <-time.After(OneSecond): + if !strings.Contains(buf.String(), "YOLO") { + t.Error("expected a panic to be logged, got none") + } + return + } +} + +// Start and stop cron with no entries. +func TestNoEntries(t *testing.T) { + cron := newWithSeconds() + cron.Start() + + select { + case <-time.After(OneSecond): + t.Fatal("expected cron will be stopped immediately") + case <-stop(cron): + } +} + +// Start, stop, then add an entry. Verify entry doesn't run. +func TestStopCausesJobsToNotRun(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(1) + + cron := newWithSeconds() + cron.Start() + cron.Stop() + cron.AddFunc("* * * * * ?", func() { wg.Done() }) + + select { + case <-time.After(OneSecond): + // No job ran! + case <-wait(wg): + t.Fatal("expected stopped cron does not run any job") + } +} + +// Add a job, start cron, expect it runs. +func TestAddBeforeRunning(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(1) + + cron := newWithSeconds() + cron.AddFunc("* * * * * ?", func() { wg.Done() }) + cron.Start() + defer cron.Stop() + + // Give cron 2 seconds to run our job (which is always activated). + select { + case <-time.After(OneSecond): + t.Fatal("expected job runs") + case <-wait(wg): + } +} + +// Start cron, add a job, expect it runs. +func TestAddWhileRunning(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(1) + + cron := newWithSeconds() + cron.Start() + defer cron.Stop() + cron.AddFunc("* * * * * ?", func() { wg.Done() }) + + select { + case <-time.After(OneSecond): + t.Fatal("expected job runs") + case <-wait(wg): + } +} + +// Test for #34. Adding a job after calling start results in multiple job invocations +func TestAddWhileRunningWithDelay(t *testing.T) { + cron := newWithSeconds() + cron.Start() + defer cron.Stop() + time.Sleep(5 * time.Second) + var calls int64 + cron.AddFunc("* * * * * *", func() { atomic.AddInt64(&calls, 1) }) + + <-time.After(OneSecond) + if atomic.LoadInt64(&calls) != 1 { + t.Errorf("called %d times, expected 1\n", calls) + } +} + +// Add a job, remove a job, start cron, expect nothing runs. +func TestRemoveBeforeRunning(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(1) + + cron := newWithSeconds() + id, _ := cron.AddFunc("* * * * * ?", func() { wg.Done() }) + cron.Remove(id) + cron.Start() + defer cron.Stop() + + select { + case <-time.After(OneSecond): + // Success, shouldn't run + case <-wait(wg): + t.FailNow() + } +} + +// Start cron, add a job, remove it, expect it doesn't run. +func TestRemoveWhileRunning(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(1) + + cron := newWithSeconds() + cron.Start() + defer cron.Stop() + id, _ := cron.AddFunc("* * * * * ?", func() { wg.Done() }) + cron.Remove(id) + + select { + case <-time.After(OneSecond): + case <-wait(wg): + t.FailNow() + } +} + +// Test timing with Entries. +func TestSnapshotEntries(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(1) + + cron := New() + cron.AddFunc("@every 2s", func() { wg.Done() }) + cron.Start() + defer cron.Stop() + + // Cron should fire in 2 seconds. After 1 second, call Entries. + select { + case <-time.After(OneSecond): + cron.Entries() + } + + // Even though Entries was called, the cron should fire at the 2 second mark. + select { + case <-time.After(OneSecond): + t.Error("expected job runs at 2 second mark") + case <-wait(wg): + } +} + +// Test that the entries are correctly sorted. +// Add a bunch of long-in-the-future entries, and an immediate entry, and ensure +// that the immediate entry runs immediately. +// Also: Test that multiple jobs run in the same instant. +func TestMultipleEntries(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(2) + + cron := newWithSeconds() + cron.AddFunc("0 0 0 1 1 ?", func() {}) + cron.AddFunc("* * * * * ?", func() { wg.Done() }) + id1, _ := cron.AddFunc("* * * * * ?", func() { t.Fatal() }) + id2, _ := cron.AddFunc("* * * * * ?", func() { t.Fatal() }) + cron.AddFunc("0 0 0 31 12 ?", func() {}) + cron.AddFunc("* * * * * ?", func() { wg.Done() }) + + cron.Remove(id1) + cron.Start() + cron.Remove(id2) + defer cron.Stop() + + select { + case <-time.After(OneSecond): + t.Error("expected job run in proper order") + case <-wait(wg): + } +} + +// Test running the same job twice. +func TestRunningJobTwice(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(2) + + cron := newWithSeconds() + cron.AddFunc("0 0 0 1 1 ?", func() {}) + cron.AddFunc("0 0 0 31 12 ?", func() {}) + cron.AddFunc("* * * * * ?", func() { wg.Done() }) + + cron.Start() + defer cron.Stop() + + select { + case <-time.After(2 * OneSecond): + t.Error("expected job fires 2 times") + case <-wait(wg): + } +} + +func TestRunningMultipleSchedules(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(2) + + cron := newWithSeconds() + cron.AddFunc("0 0 0 1 1 ?", func() {}) + cron.AddFunc("0 0 0 31 12 ?", func() {}) + cron.AddFunc("* * * * * ?", func() { wg.Done() }) + cron.Schedule(Every(time.Minute), FuncJob(func() {})) + cron.Schedule(Every(time.Second), FuncJob(func() { wg.Done() })) + cron.Schedule(Every(time.Hour), FuncJob(func() {})) + + cron.Start() + defer cron.Stop() + + select { + case <-time.After(2 * OneSecond): + t.Error("expected job fires 2 times") + case <-wait(wg): + } +} + +// Test that the cron is run in the local time zone (as opposed to UTC). +func TestLocalTimezone(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(2) + + now := time.Now() + // FIX: Issue #205 + // This calculation doesn't work in seconds 58 or 59. + // Take the easy way out and sleep. + if now.Second() >= 58 { + time.Sleep(2 * time.Second) + now = time.Now() + } + spec := fmt.Sprintf("%d,%d %d %d %d %d ?", + now.Second()+1, now.Second()+2, now.Minute(), now.Hour(), now.Day(), now.Month()) + + cron := newWithSeconds() + cron.AddFunc(spec, func() { wg.Done() }) + cron.Start() + defer cron.Stop() + + select { + case <-time.After(OneSecond * 2): + t.Error("expected job fires 2 times") + case <-wait(wg): + } +} + +// Test that the cron is run in the given time zone (as opposed to local). +func TestNonLocalTimezone(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(2) + + loc, err := time.LoadLocation("Atlantic/Cape_Verde") + if err != nil { + fmt.Printf("Failed to load time zone Atlantic/Cape_Verde: %+v", err) + t.Fail() + } + + now := time.Now().In(loc) + // FIX: Issue #205 + // This calculation doesn't work in seconds 58 or 59. + // Take the easy way out and sleep. + if now.Second() >= 58 { + time.Sleep(2 * time.Second) + now = time.Now().In(loc) + } + spec := fmt.Sprintf("%d,%d %d %d %d %d ?", + now.Second()+1, now.Second()+2, now.Minute(), now.Hour(), now.Day(), now.Month()) + + cron := New(WithLocation(loc), WithParser(secondParser)) + cron.AddFunc(spec, func() { wg.Done() }) + cron.Start() + defer cron.Stop() + + select { + case <-time.After(OneSecond * 2): + t.Error("expected job fires 2 times") + case <-wait(wg): + } +} + +// Test that calling stop before start silently returns without +// blocking the stop channel. +func TestStopWithoutStart(t *testing.T) { + cron := New() + cron.Stop() +} + +type testJob struct { + wg *sync.WaitGroup + name string +} + +func (t testJob) Run() { + t.wg.Done() +} + +// Test that adding an invalid job spec returns an error +func TestInvalidJobSpec(t *testing.T) { + cron := New() + _, err := cron.AddJob("this will not parse", nil) + if err == nil { + t.Errorf("expected an error with invalid spec, got nil") + } +} + +// Test blocking run method behaves as Start() +func TestBlockingRun(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(1) + + cron := newWithSeconds() + cron.AddFunc("* * * * * ?", func() { wg.Done() }) + + var unblockChan = make(chan struct{}) + + go func() { + cron.Run() + close(unblockChan) + }() + defer cron.Stop() + + select { + case <-time.After(OneSecond): + t.Error("expected job fires") + case <-unblockChan: + t.Error("expected that Run() blocks") + case <-wait(wg): + } +} + +// Test that double-running is a no-op +func TestStartNoop(t *testing.T) { + var tickChan = make(chan struct{}, 2) + + cron := newWithSeconds() + cron.AddFunc("* * * * * ?", func() { + tickChan <- struct{}{} + }) + + cron.Start() + defer cron.Stop() + + // Wait for the first firing to ensure the runner is going + <-tickChan + + cron.Start() + + <-tickChan + + // Fail if this job fires again in a short period, indicating a double-run + select { + case <-time.After(time.Millisecond): + case <-tickChan: + t.Error("expected job fires exactly twice") + } +} + +// Simple test using Runnables. +func TestJob(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(1) + + cron := newWithSeconds() + cron.AddJob("0 0 0 30 Feb ?", testJob{wg, "job0"}) + cron.AddJob("0 0 0 1 1 ?", testJob{wg, "job1"}) + job2, _ := cron.AddJob("* * * * * ?", testJob{wg, "job2"}) + cron.AddJob("1 0 0 1 1 ?", testJob{wg, "job3"}) + cron.Schedule(Every(5*time.Second+5*time.Nanosecond), testJob{wg, "job4"}) + job5 := cron.Schedule(Every(5*time.Minute), testJob{wg, "job5"}) + + // Test getting an Entry pre-Start. + if actualName := cron.Entry(job2).Job.(testJob).name; actualName != "job2" { + t.Error("wrong job retrieved:", actualName) + } + if actualName := cron.Entry(job5).Job.(testJob).name; actualName != "job5" { + t.Error("wrong job retrieved:", actualName) + } + + cron.Start() + defer cron.Stop() + + select { + case <-time.After(OneSecond): + t.FailNow() + case <-wait(wg): + } + + // Ensure the entries are in the right order. + expecteds := []string{"job2", "job4", "job5", "job1", "job3", "job0"} + + var actuals []string + for _, entry := range cron.Entries() { + actuals = append(actuals, entry.Job.(testJob).name) + } + + for i, expected := range expecteds { + if actuals[i] != expected { + t.Fatalf("Jobs not in the right order. (expected) %s != %s (actual)", expecteds, actuals) + } + } + + // Test getting Entries. + if actualName := cron.Entry(job2).Job.(testJob).name; actualName != "job2" { + t.Error("wrong job retrieved:", actualName) + } + if actualName := cron.Entry(job5).Job.(testJob).name; actualName != "job5" { + t.Error("wrong job retrieved:", actualName) + } +} + +// Issue #206 +// Ensure that the next run of a job after removing an entry is accurate. +func TestScheduleAfterRemoval(t *testing.T) { + var wg1 sync.WaitGroup + var wg2 sync.WaitGroup + wg1.Add(1) + wg2.Add(1) + + // The first time this job is run, set a timer and remove the other job + // 750ms later. Correct behavior would be to still run the job again in + // 250ms, but the bug would cause it to run instead 1s later. + + var calls int + var mu sync.Mutex + + cron := newWithSeconds() + hourJob := cron.Schedule(Every(time.Hour), FuncJob(func() {})) + cron.Schedule(Every(time.Second), FuncJob(func() { + mu.Lock() + defer mu.Unlock() + switch calls { + case 0: + wg1.Done() + calls++ + case 1: + time.Sleep(750 * time.Millisecond) + cron.Remove(hourJob) + calls++ + case 2: + calls++ + wg2.Done() + case 3: + panic("unexpected 3rd call") + } + })) + + cron.Start() + defer cron.Stop() + + // the first run might be any length of time 0 - 1s, since the schedule + // rounds to the second. wait for the first run to true up. + wg1.Wait() + + select { + case <-time.After(2 * OneSecond): + t.Error("expected job fires 2 times") + case <-wait(&wg2): + } +} + +type ZeroSchedule struct{} + +func (*ZeroSchedule) Next(time.Time) time.Time { + return time.Time{} +} + +// Tests that job without time does not run +func TestJobWithZeroTimeDoesNotRun(t *testing.T) { + cron := newWithSeconds() + var calls int64 + cron.AddFunc("* * * * * *", func() { atomic.AddInt64(&calls, 1) }) + cron.Schedule(new(ZeroSchedule), FuncJob(func() { t.Error("expected zero task will not run") })) + cron.Start() + defer cron.Stop() + <-time.After(OneSecond) + if atomic.LoadInt64(&calls) != 1 { + t.Errorf("called %d times, expected 1\n", calls) + } +} + +func TestStopAndWait(t *testing.T) { + t.Run("nothing running, returns immediately", func(t *testing.T) { + cron := newWithSeconds() + cron.Start() + ctx := cron.Stop() + select { + case <-ctx.Done(): + case <-time.After(time.Millisecond): + t.Error("context was not done immediately") + } + }) + + t.Run("repeated calls to Stop", func(t *testing.T) { + cron := newWithSeconds() + cron.Start() + _ = cron.Stop() + time.Sleep(time.Millisecond) + ctx := cron.Stop() + select { + case <-ctx.Done(): + case <-time.After(time.Millisecond): + t.Error("context was not done immediately") + } + }) + + t.Run("a couple fast jobs added, still returns immediately", func(t *testing.T) { + cron := newWithSeconds() + cron.AddFunc("* * * * * *", func() {}) + cron.Start() + cron.AddFunc("* * * * * *", func() {}) + cron.AddFunc("* * * * * *", func() {}) + cron.AddFunc("* * * * * *", func() {}) + time.Sleep(time.Second) + ctx := cron.Stop() + select { + case <-ctx.Done(): + case <-time.After(time.Millisecond): + t.Error("context was not done immediately") + } + }) + + t.Run("a couple fast jobs and a slow job added, waits for slow job", func(t *testing.T) { + cron := newWithSeconds() + cron.AddFunc("* * * * * *", func() {}) + cron.Start() + cron.AddFunc("* * * * * *", func() { time.Sleep(2 * time.Second) }) + cron.AddFunc("* * * * * *", func() {}) + time.Sleep(time.Second) + + ctx := cron.Stop() + + // Verify that it is not done for at least 750ms + select { + case <-ctx.Done(): + t.Error("context was done too quickly immediately") + case <-time.After(750 * time.Millisecond): + // expected, because the job sleeping for 1 second is still running + } + + // Verify that it IS done in the next 500ms (giving 250ms buffer) + select { + case <-ctx.Done(): + // expected + case <-time.After(1500 * time.Millisecond): + t.Error("context not done after job should have completed") + } + }) + + t.Run("repeated calls to stop, waiting for completion and after", func(t *testing.T) { + cron := newWithSeconds() + cron.AddFunc("* * * * * *", func() {}) + cron.AddFunc("* * * * * *", func() { time.Sleep(2 * time.Second) }) + cron.Start() + cron.AddFunc("* * * * * *", func() {}) + time.Sleep(time.Second) + ctx := cron.Stop() + ctx2 := cron.Stop() + + // Verify that it is not done for at least 1500ms + select { + case <-ctx.Done(): + t.Error("context was done too quickly immediately") + case <-ctx2.Done(): + t.Error("context2 was done too quickly immediately") + case <-time.After(1500 * time.Millisecond): + // expected, because the job sleeping for 2 seconds is still running + } + + // Verify that it IS done in the next 1s (giving 500ms buffer) + select { + case <-ctx.Done(): + // expected + case <-time.After(time.Second): + t.Error("context not done after job should have completed") + } + + // Verify that ctx2 is also done. + select { + case <-ctx2.Done(): + // expected + case <-time.After(time.Millisecond): + t.Error("context2 not done even though context1 is") + } + + // Verify that a new context retrieved from stop is immediately done. + ctx3 := cron.Stop() + select { + case <-ctx3.Done(): + // expected + case <-time.After(time.Millisecond): + t.Error("context not done even when cron Stop is completed") + } + + }) +} + +func TestMultiThreadedStartAndStop(t *testing.T) { + cron := New() + go cron.Run() + time.Sleep(2 * time.Millisecond) + cron.Stop() +} + +func wait(wg *sync.WaitGroup) chan bool { + ch := make(chan bool) + go func() { + wg.Wait() + ch <- true + }() + return ch +} + +func stop(cron *Cron) chan bool { + ch := make(chan bool) + go func() { + cron.Stop() + ch <- true + }() + return ch +} + +// newWithSeconds returns a Cron with the seconds field enabled. +func newWithSeconds() *Cron { + return New(WithParser(secondParser), WithChain()) +} diff --git a/backend/lib/cron/doc.go b/backend/lib/cron/doc.go new file mode 100644 index 00000000..fbee72c1 --- /dev/null +++ b/backend/lib/cron/doc.go @@ -0,0 +1,212 @@ +/* +Package cron implements a cron spec parser and job runner. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") }) + c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 5 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Month and Day-of-week field values are case insensitive. "SUN", "Sun", and +"sun" are equally accepted. + +The specific interpretation of the format is based on the Cron Wikipedia page: +https://en.wikipedia.org/wiki/Cron + +Alternative Formats + +Alternative Cron expression formats support other fields like seconds. You can +implement that by creating a custom Parser as follows. + + cron.New( + cron.WithParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)) + +The most popular alternative Cron expression format is Quartz: +http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 1 * * + @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates after +1 hour, 30 minutes, 10 seconds, and then every interval after that. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +By default, all interpretation and scheduling is done in the machine's local +time zone (time.Local). You can specify a different time zone on construction: + + cron.New( + cron.WithLocation(time.UTC)) + +Individual cron schedules may also override the time zone they are to be +interpreted in by providing an additional space-separated field at the beginning +of the cron spec, of the form "CRON_TZ=Asia/Tokyo". + +For example: + + # Runs at 6am in time.Local + cron.New().AddFunc("0 6 * * ?", ...) + + # Runs at 6am in America/New_York + nyc, _ := time.LoadLocation("America/New_York") + c := cron.New(cron.WithLocation(nyc)) + c.AddFunc("0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + c := cron.New(cron.WithLocation(nyc)) + c.SetLocation("America/New_York") + c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + +The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility. + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Job Wrappers / Chain + +A Cron runner may be configured with a chain of job wrappers to add +cross-cutting functionality to all submitted jobs. For example, they may be used +to achieve the following effects: + + - Recover any panics from jobs (activated by default) + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + +Install wrappers for all jobs added to a cron using the `cron.WithChain` option: + + cron.New(cron.WithChain( + cron.SkipIfStillRunning(logger), + )) + +Install wrappers for individual jobs by explicitly wrapping them: + + job = cron.NewChain( + cron.SkipIfStillRunning(logger), + ).Then(job) + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Logging + +Cron defines a Logger interface that is a subset of the one defined in +github.com/go-logr/logr. It has two logging levels (Info and Error), and +parameters are key/value pairs. This makes it possible for cron logging to plug +into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided +to wrap the standard library *log.Logger. + +For additional insight into Cron operations, verbose logging may be activated +which will record job runs, scheduling decisions, and added or removed jobs. +Activate it with a one-off logger as follows: + + cron.New( + cron.WithLogger( + cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) + + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/backend/lib/cron/logger.go b/backend/lib/cron/logger.go new file mode 100644 index 00000000..b4efcc05 --- /dev/null +++ b/backend/lib/cron/logger.go @@ -0,0 +1,86 @@ +package cron + +import ( + "io/ioutil" + "log" + "os" + "strings" + "time" +) + +// DefaultLogger is used by Cron if none is specified. +var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) + +// DiscardLogger can be used by callers to discard all log messages. +var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0)) + +// Logger is the interface used in this package for logging, so that any backend +// can be plugged in. It is a subset of the github.com/go-logr/logr interface. +type Logger interface { + // Info logs routine messages about cron's operation. + Info(msg string, keysAndValues ...interface{}) + // Error logs an error condition. + Error(err error, msg string, keysAndValues ...interface{}) +} + +// PrintfLogger wraps a Printf-based logger (such as the standard library "log") +// into an implementation of the Logger interface which logs errors only. +func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, false} +} + +// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library +// "log") into an implementation of the Logger interface which logs everything. +func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, true} +} + +type printfLogger struct { + logger interface{ Printf(string, ...interface{}) } + logInfo bool +} + +func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) { + if pl.logInfo { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)), + append([]interface{}{msg}, keysAndValues...)...) + } +} + +func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)+2), + append([]interface{}{msg, "error", err}, keysAndValues...)...) +} + +// formatString returns a logfmt-like format string for the number of +// key/values. +func formatString(numKeysAndValues int) string { + var sb strings.Builder + sb.WriteString("%s") + if numKeysAndValues > 0 { + sb.WriteString(", ") + } + for i := 0; i < numKeysAndValues/2; i++ { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString("%v=%v") + } + return sb.String() +} + +// formatTimes formats any time.Time values as RFC3339. +func formatTimes(keysAndValues []interface{}) []interface{} { + var formattedArgs []interface{} + for _, arg := range keysAndValues { + if t, ok := arg.(time.Time); ok { + arg = t.Format(time.RFC3339) + } + formattedArgs = append(formattedArgs, arg) + } + return formattedArgs +} diff --git a/backend/lib/cron/option.go b/backend/lib/cron/option.go new file mode 100644 index 00000000..07638201 --- /dev/null +++ b/backend/lib/cron/option.go @@ -0,0 +1,45 @@ +package cron + +import ( + "time" +) + +// Option represents a modification to the default behavior of a Cron. +type Option func(*Cron) + +// WithLocation overrides the timezone of the cron instance. +func WithLocation(loc *time.Location) Option { + return func(c *Cron) { + c.location = loc + } +} + +// WithSeconds overrides the parser used for interpreting job schedules to +// include a seconds field as the first one. +func WithSeconds() Option { + return WithParser(NewParser( + Second | Minute | Hour | Dom | Month | Dow | Descriptor, + )) +} + +// WithParser overrides the parser used for interpreting job schedules. +func WithParser(p Parser) Option { + return func(c *Cron) { + c.parser = p + } +} + +// WithChain specifies Job wrappers to apply to all jobs added to this cron. +// Refer to the Chain* functions in this package for provided wrappers. +func WithChain(wrappers ...JobWrapper) Option { + return func(c *Cron) { + c.chain = NewChain(wrappers...) + } +} + +// WithLogger uses the provided logger. +func WithLogger(logger Logger) Option { + return func(c *Cron) { + c.logger = logger + } +} diff --git a/backend/lib/cron/option_test.go b/backend/lib/cron/option_test.go new file mode 100644 index 00000000..8aef1682 --- /dev/null +++ b/backend/lib/cron/option_test.go @@ -0,0 +1,42 @@ +package cron + +import ( + "log" + "strings" + "testing" + "time" +) + +func TestWithLocation(t *testing.T) { + c := New(WithLocation(time.UTC)) + if c.location != time.UTC { + t.Errorf("expected UTC, got %v", c.location) + } +} + +func TestWithParser(t *testing.T) { + var parser = NewParser(Dow) + c := New(WithParser(parser)) + if c.parser != parser { + t.Error("expected provided parser") + } +} + +func TestWithVerboseLogger(t *testing.T) { + var buf syncWriter + var logger = log.New(&buf, "", log.LstdFlags) + c := New(WithLogger(VerbosePrintfLogger(logger))) + if c.logger.(printfLogger).logger != logger { + t.Error("expected provided logger") + } + + c.AddFunc("@every 1s", func() {}) + c.Start() + time.Sleep(OneSecond) + c.Stop() + out := buf.String() + if !strings.Contains(out, "schedule,") || + !strings.Contains(out, "run,") { + t.Error("expected to see some actions, got:", out) + } +} diff --git a/backend/lib/cron/parser.go b/backend/lib/cron/parser.go new file mode 100644 index 00000000..3cf8879f --- /dev/null +++ b/backend/lib/cron/parser.go @@ -0,0 +1,434 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + SecondOptional // Optional seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption +} + +// NewParser creates a Parser with custom options. +// +// It panics if more than one Optional is given, since it would be impossible to +// correctly infer which optional is provided or missing in general. +// +// Examples +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + optionals++ + } + if options&SecondOptional > 0 { + optionals++ + } + if optionals > 1 { + panic("multiple optionals may not be configured") + } + return Parser{options} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("empty spec string") + } + + // Extract timezone if present + var loc = time.Local + if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") { + var err error + i := strings.Index(spec, " ") + eq := strings.Index(spec, "=") + if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil { + return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err) + } + spec = strings.TrimSpace(spec[i:]) + } + + // Handle named schedules (descriptors), if configured + if strings.HasPrefix(spec, "@") { + if p.options&Descriptor == 0 { + return nil, fmt.Errorf("parser does not accept descriptors: %v", spec) + } + return parseDescriptor(spec, loc) + } + + // Split on whitespace. + fields := strings.Fields(spec) + + // Validate & fill in any omitted or optional fields + var err error + fields, err = normalizeFields(fields, p.options) + if err != nil { + return nil, err + } + + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + Location: loc, + }, nil +} + +// normalizeFields takes a subset set of the time fields and returns the full set +// with defaults (zeroes) populated for unset fields. +// +// As part of performing this function, it also validates that the provided +// fields are compatible with the configured options. +func normalizeFields(fields []string, options ParseOption) ([]string, error) { + // Validate optionals & add their field to options + optionals := 0 + if options&SecondOptional > 0 { + options |= Second + optionals++ + } + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + if optionals > 1 { + return nil, fmt.Errorf("multiple optionals may not be configured") + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if options&place > 0 { + max++ + } + } + min := max - optionals + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields) + } + return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields) + } + + // Populate the optional field if not provided + if min < max && len(fields) == min { + switch { + case options&DowOptional > 0: + fields = append(fields, defaults[5]) // TODO: improve access to default + case options&SecondOptional > 0: + fields = append([]string{defaults[0]}, fields...) + default: + return nil, fmt.Errorf("unknown optional field") + } + } + + // Populate all fields not part of options with their defaults + n := 0 + expandedFields := make([]string, len(places)) + copy(expandedFields, defaults) + for i, place := range places { + if options&place > 0 { + expandedFields[i] = fields[n] + n++ + } + } + return expandedFields, nil +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given +// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries +// representing: minute, hour, day of month, month and day of week, in that +// order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + if step > 1 { + extra = 0 + } + default: + return 0, fmt.Errorf("too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + Location: loc, + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + Location: loc, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor) +} diff --git a/backend/lib/cron/parser_test.go b/backend/lib/cron/parser_test.go new file mode 100644 index 00000000..41c8c520 --- /dev/null +++ b/backend/lib/cron/parser_test.go @@ -0,0 +1,383 @@ +package cron + +import ( + "reflect" + "strings" + "testing" + "time" +) + +var secondParser = NewParser(Second | Minute | Hour | Dom | Month | DowOptional | Descriptor) + +func TestRange(t *testing.T) { + zero := uint64(0) + ranges := []struct { + expr string + min, max uint + expected uint64 + err string + }{ + {"5", 0, 7, 1 << 5, ""}, + {"0", 0, 7, 1 << 0, ""}, + {"7", 0, 7, 1 << 7, ""}, + + {"5-5", 0, 7, 1 << 5, ""}, + {"5-6", 0, 7, 1<<5 | 1<<6, ""}, + {"5-7", 0, 7, 1<<5 | 1<<6 | 1<<7, ""}, + + {"5-6/2", 0, 7, 1 << 5, ""}, + {"5-7/2", 0, 7, 1<<5 | 1<<7, ""}, + {"5-7/1", 0, 7, 1<<5 | 1<<6 | 1<<7, ""}, + + {"*", 1, 3, 1<<1 | 1<<2 | 1<<3 | starBit, ""}, + {"*/2", 1, 3, 1<<1 | 1<<3, ""}, + + {"5--5", 0, 0, zero, "too many hyphens"}, + {"jan-x", 0, 0, zero, "failed to parse int from"}, + {"2-x", 1, 5, zero, "failed to parse int from"}, + {"*/-12", 0, 0, zero, "negative number"}, + {"*//2", 0, 0, zero, "too many slashes"}, + {"1", 3, 5, zero, "below minimum"}, + {"6", 3, 5, zero, "above maximum"}, + {"5-3", 3, 5, zero, "beyond end of range"}, + {"*/0", 0, 0, zero, "should be a positive number"}, + } + + for _, c := range ranges { + actual, err := getRange(c.expr, bounds{c.min, c.max, nil}) + if len(c.err) != 0 && (err == nil || !strings.Contains(err.Error(), c.err)) { + t.Errorf("%s => expected %v, got %v", c.expr, c.err, err) + } + if len(c.err) == 0 && err != nil { + t.Errorf("%s => unexpected error %v", c.expr, err) + } + if actual != c.expected { + t.Errorf("%s => expected %d, got %d", c.expr, c.expected, actual) + } + } +} + +func TestField(t *testing.T) { + fields := []struct { + expr string + min, max uint + expected uint64 + }{ + {"5", 1, 7, 1 << 5}, + {"5,6", 1, 7, 1<<5 | 1<<6}, + {"5,6,7", 1, 7, 1<<5 | 1<<6 | 1<<7}, + {"1,5-7/2,3", 1, 7, 1<<1 | 1<<5 | 1<<7 | 1<<3}, + } + + for _, c := range fields { + actual, _ := getField(c.expr, bounds{c.min, c.max, nil}) + if actual != c.expected { + t.Errorf("%s => expected %d, got %d", c.expr, c.expected, actual) + } + } +} + +func TestAll(t *testing.T) { + allBits := []struct { + r bounds + expected uint64 + }{ + {minutes, 0xfffffffffffffff}, // 0-59: 60 ones + {hours, 0xffffff}, // 0-23: 24 ones + {dom, 0xfffffffe}, // 1-31: 31 ones, 1 zero + {months, 0x1ffe}, // 1-12: 12 ones, 1 zero + {dow, 0x7f}, // 0-6: 7 ones + } + + for _, c := range allBits { + actual := all(c.r) // all() adds the starBit, so compensate for that.. + if c.expected|starBit != actual { + t.Errorf("%d-%d/%d => expected %b, got %b", + c.r.min, c.r.max, 1, c.expected|starBit, actual) + } + } +} + +func TestBits(t *testing.T) { + bits := []struct { + min, max, step uint + expected uint64 + }{ + {0, 0, 1, 0x1}, + {1, 1, 1, 0x2}, + {1, 5, 2, 0x2a}, // 101010 + {1, 4, 2, 0xa}, // 1010 + } + + for _, c := range bits { + actual := getBits(c.min, c.max, c.step) + if c.expected != actual { + t.Errorf("%d-%d/%d => expected %b, got %b", + c.min, c.max, c.step, c.expected, actual) + } + } +} + +func TestParseScheduleErrors(t *testing.T) { + var tests = []struct{ expr, err string }{ + {"* 5 j * * *", "failed to parse int from"}, + {"@every Xm", "failed to parse duration"}, + {"@unrecognized", "unrecognized descriptor"}, + {"* * * *", "expected 5 to 6 fields"}, + {"", "empty spec string"}, + } + for _, c := range tests { + actual, err := secondParser.Parse(c.expr) + if err == nil || !strings.Contains(err.Error(), c.err) { + t.Errorf("%s => expected %v, got %v", c.expr, c.err, err) + } + if actual != nil { + t.Errorf("expected nil schedule on error, got %v", actual) + } + } +} + +func TestParseSchedule(t *testing.T) { + tokyo, _ := time.LoadLocation("Asia/Tokyo") + entries := []struct { + parser Parser + expr string + expected Schedule + }{ + {secondParser, "0 5 * * * *", every5min(time.Local)}, + {standardParser, "5 * * * *", every5min(time.Local)}, + {secondParser, "CRON_TZ=UTC 0 5 * * * *", every5min(time.UTC)}, + {standardParser, "CRON_TZ=UTC 5 * * * *", every5min(time.UTC)}, + {secondParser, "CRON_TZ=Asia/Tokyo 0 5 * * * *", every5min(tokyo)}, + {secondParser, "@every 5m", ConstantDelaySchedule{5 * time.Minute}}, + {secondParser, "@midnight", midnight(time.Local)}, + {secondParser, "TZ=UTC @midnight", midnight(time.UTC)}, + {secondParser, "TZ=Asia/Tokyo @midnight", midnight(tokyo)}, + {secondParser, "@yearly", annual(time.Local)}, + {secondParser, "@annually", annual(time.Local)}, + { + parser: secondParser, + expr: "* 5 * * * *", + expected: &SpecSchedule{ + Second: all(seconds), + Minute: 1 << 5, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: time.Local, + }, + }, + } + + for _, c := range entries { + actual, err := c.parser.Parse(c.expr) + if err != nil { + t.Errorf("%s => unexpected error %v", c.expr, err) + } + if !reflect.DeepEqual(actual, c.expected) { + t.Errorf("%s => expected %b, got %b", c.expr, c.expected, actual) + } + } +} + +func TestOptionalSecondSchedule(t *testing.T) { + parser := NewParser(SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor) + entries := []struct { + expr string + expected Schedule + }{ + {"0 5 * * * *", every5min(time.Local)}, + {"5 5 * * * *", every5min5s(time.Local)}, + {"5 * * * *", every5min(time.Local)}, + } + + for _, c := range entries { + actual, err := parser.Parse(c.expr) + if err != nil { + t.Errorf("%s => unexpected error %v", c.expr, err) + } + if !reflect.DeepEqual(actual, c.expected) { + t.Errorf("%s => expected %b, got %b", c.expr, c.expected, actual) + } + } +} + +func TestNormalizeFields(t *testing.T) { + tests := []struct { + name string + input []string + options ParseOption + expected []string + }{ + { + "AllFields_NoOptional", + []string{"0", "5", "*", "*", "*", "*"}, + Second | Minute | Hour | Dom | Month | Dow | Descriptor, + []string{"0", "5", "*", "*", "*", "*"}, + }, + { + "AllFields_SecondOptional_Provided", + []string{"0", "5", "*", "*", "*", "*"}, + SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor, + []string{"0", "5", "*", "*", "*", "*"}, + }, + { + "AllFields_SecondOptional_NotProvided", + []string{"5", "*", "*", "*", "*"}, + SecondOptional | Minute | Hour | Dom | Month | Dow | Descriptor, + []string{"0", "5", "*", "*", "*", "*"}, + }, + { + "SubsetFields_NoOptional", + []string{"5", "15", "*"}, + Hour | Dom | Month, + []string{"0", "0", "5", "15", "*", "*"}, + }, + { + "SubsetFields_DowOptional_Provided", + []string{"5", "15", "*", "4"}, + Hour | Dom | Month | DowOptional, + []string{"0", "0", "5", "15", "*", "4"}, + }, + { + "SubsetFields_DowOptional_NotProvided", + []string{"5", "15", "*"}, + Hour | Dom | Month | DowOptional, + []string{"0", "0", "5", "15", "*", "*"}, + }, + { + "SubsetFields_SecondOptional_NotProvided", + []string{"5", "15", "*"}, + SecondOptional | Hour | Dom | Month, + []string{"0", "0", "5", "15", "*", "*"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual, err := normalizeFields(test.input, test.options) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(actual, test.expected) { + t.Errorf("expected %v, got %v", test.expected, actual) + } + }) + } +} + +func TestNormalizeFields_Errors(t *testing.T) { + tests := []struct { + name string + input []string + options ParseOption + err string + }{ + { + "TwoOptionals", + []string{"0", "5", "*", "*", "*", "*"}, + SecondOptional | Minute | Hour | Dom | Month | DowOptional, + "", + }, + { + "TooManyFields", + []string{"0", "5", "*", "*"}, + SecondOptional | Minute | Hour, + "", + }, + { + "NoFields", + []string{}, + SecondOptional | Minute | Hour, + "", + }, + { + "TooFewFields", + []string{"*"}, + SecondOptional | Minute | Hour, + "", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual, err := normalizeFields(test.input, test.options) + if err == nil { + t.Errorf("expected an error, got none. results: %v", actual) + } + if !strings.Contains(err.Error(), test.err) { + t.Errorf("expected error %q, got %q", test.err, err.Error()) + } + }) + } +} + +func TestStandardSpecSchedule(t *testing.T) { + entries := []struct { + expr string + expected Schedule + err string + }{ + { + expr: "5 * * * *", + expected: &SpecSchedule{1 << seconds.min, 1 << 5, all(hours), all(dom), all(months), all(dow), time.Local}, + }, + { + expr: "@every 5m", + expected: ConstantDelaySchedule{time.Duration(5) * time.Minute}, + }, + { + expr: "5 j * * *", + err: "failed to parse int from", + }, + { + expr: "* * * *", + err: "expected exactly 5 fields", + }, + } + + for _, c := range entries { + actual, err := ParseStandard(c.expr) + if len(c.err) != 0 && (err == nil || !strings.Contains(err.Error(), c.err)) { + t.Errorf("%s => expected %v, got %v", c.expr, c.err, err) + } + if len(c.err) == 0 && err != nil { + t.Errorf("%s => unexpected error %v", c.expr, err) + } + if !reflect.DeepEqual(actual, c.expected) { + t.Errorf("%s => expected %b, got %b", c.expr, c.expected, actual) + } + } +} + +func TestNoDescriptorParser(t *testing.T) { + parser := NewParser(Minute | Hour) + _, err := parser.Parse("@every 1m") + if err == nil { + t.Error("expected an error, got none") + } +} + +func every5min(loc *time.Location) *SpecSchedule { + return &SpecSchedule{1 << 0, 1 << 5, all(hours), all(dom), all(months), all(dow), loc} +} + +func every5min5s(loc *time.Location) *SpecSchedule { + return &SpecSchedule{1 << 5, 1 << 5, all(hours), all(dom), all(months), all(dow), loc} +} + +func midnight(loc *time.Location) *SpecSchedule { + return &SpecSchedule{1, 1, 1, all(dom), all(months), all(dow), loc} +} + +func annual(loc *time.Location) *SpecSchedule { + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + Location: loc, + } +} diff --git a/backend/lib/cron/spec.go b/backend/lib/cron/spec.go new file mode 100644 index 00000000..fa1e241e --- /dev/null +++ b/backend/lib/cron/spec.go @@ -0,0 +1,188 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 + + // Override location for this schedule. + Location *time.Location +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach + // + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Convert the given time into the schedule's timezone, if one is specified. + // Save the original timezone so we can convert back after we find a time. + // Note that schedules without a time zone specified (time.Local) are treated + // as local to the time provided. + origLocation := t.Location() + loc := s.Location + if loc == time.Local { + loc = t.Location() + } + if s.Location != time.Local { + t = t.In(s.Location) + } + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 12 { + t = t.Add(time.Duration(24-t.Hour()) * time.Hour) + } else { + t = t.Add(time.Duration(-t.Hour()) * time.Hour) + } + } + + if t.Day() == 1 { + goto WRAP + } + } + + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/backend/lib/cron/spec_test.go b/backend/lib/cron/spec_test.go new file mode 100644 index 00000000..1b8a503e --- /dev/null +++ b/backend/lib/cron/spec_test.go @@ -0,0 +1,300 @@ +package cron + +import ( + "strings" + "testing" + "time" +) + +func TestActivation(t *testing.T) { + tests := []struct { + time, spec string + expected bool + }{ + // Every fifteen minutes. + {"Mon Jul 9 15:00 2012", "0/15 * * * *", true}, + {"Mon Jul 9 15:45 2012", "0/15 * * * *", true}, + {"Mon Jul 9 15:40 2012", "0/15 * * * *", false}, + + // Every fifteen minutes, starting at 5 minutes. + {"Mon Jul 9 15:05 2012", "5/15 * * * *", true}, + {"Mon Jul 9 15:20 2012", "5/15 * * * *", true}, + {"Mon Jul 9 15:50 2012", "5/15 * * * *", true}, + + // Named months + {"Sun Jul 15 15:00 2012", "0/15 * * Jul *", true}, + {"Sun Jul 15 15:00 2012", "0/15 * * Jun *", false}, + + // Everything set. + {"Sun Jul 15 08:30 2012", "30 08 ? Jul Sun", true}, + {"Sun Jul 15 08:30 2012", "30 08 15 Jul ?", true}, + {"Mon Jul 16 08:30 2012", "30 08 ? Jul Sun", false}, + {"Mon Jul 16 08:30 2012", "30 08 15 Jul ?", false}, + + // Predefined schedules + {"Mon Jul 9 15:00 2012", "@hourly", true}, + {"Mon Jul 9 15:04 2012", "@hourly", false}, + {"Mon Jul 9 15:00 2012", "@daily", false}, + {"Mon Jul 9 00:00 2012", "@daily", true}, + {"Mon Jul 9 00:00 2012", "@weekly", false}, + {"Sun Jul 8 00:00 2012", "@weekly", true}, + {"Sun Jul 8 01:00 2012", "@weekly", false}, + {"Sun Jul 8 00:00 2012", "@monthly", false}, + {"Sun Jul 1 00:00 2012", "@monthly", true}, + + // Test interaction of DOW and DOM. + // If both are restricted, then only one needs to match. + {"Sun Jul 15 00:00 2012", "* * 1,15 * Sun", true}, + {"Fri Jun 15 00:00 2012", "* * 1,15 * Sun", true}, + {"Wed Aug 1 00:00 2012", "* * 1,15 * Sun", true}, + {"Sun Jul 15 00:00 2012", "* * */10 * Sun", true}, // verifies #70 + + // However, if one has a star, then both need to match. + {"Sun Jul 15 00:00 2012", "* * * * Mon", false}, + {"Mon Jul 9 00:00 2012", "* * 1,15 * *", false}, + {"Sun Jul 15 00:00 2012", "* * 1,15 * *", true}, + {"Sun Jul 15 00:00 2012", "* * */2 * Sun", true}, + } + + for _, test := range tests { + sched, err := ParseStandard(test.spec) + if err != nil { + t.Error(err) + continue + } + actual := sched.Next(getTime(test.time).Add(-1 * time.Second)) + expected := getTime(test.time) + if test.expected && expected != actual || !test.expected && expected == actual { + t.Errorf("Fail evaluating %s on %s: (expected) %s != %s (actual)", + test.spec, test.time, expected, actual) + } + } +} + +func TestNext(t *testing.T) { + runs := []struct { + time, spec string + expected string + }{ + // Simple cases + {"Mon Jul 9 14:45 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012"}, + {"Mon Jul 9 14:59 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012"}, + {"Mon Jul 9 14:59:59 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012"}, + + // Wrap around hours + {"Mon Jul 9 15:45 2012", "0 20-35/15 * * * *", "Mon Jul 9 16:20 2012"}, + + // Wrap around days + {"Mon Jul 9 23:46 2012", "0 */15 * * * *", "Tue Jul 10 00:00 2012"}, + {"Mon Jul 9 23:45 2012", "0 20-35/15 * * * *", "Tue Jul 10 00:20 2012"}, + {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * * * *", "Tue Jul 10 00:20:15 2012"}, + {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 1/2 * * *", "Tue Jul 10 01:20:15 2012"}, + {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 10-12 * * *", "Tue Jul 10 10:20:15 2012"}, + + {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 1/2 */2 * *", "Thu Jul 11 01:20:15 2012"}, + {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * 9-20 * *", "Wed Jul 10 00:20:15 2012"}, + {"Mon Jul 9 23:35:51 2012", "15/35 20-35/15 * 9-20 Jul *", "Wed Jul 10 00:20:15 2012"}, + + // Wrap around months + {"Mon Jul 9 23:35 2012", "0 0 0 9 Apr-Oct ?", "Thu Aug 9 00:00 2012"}, + {"Mon Jul 9 23:35 2012", "0 0 0 */5 Apr,Aug,Oct Mon", "Tue Aug 1 00:00 2012"}, + {"Mon Jul 9 23:35 2012", "0 0 0 */5 Oct Mon", "Mon Oct 1 00:00 2012"}, + + // Wrap around years + {"Mon Jul 9 23:35 2012", "0 0 0 * Feb Mon", "Mon Feb 4 00:00 2013"}, + {"Mon Jul 9 23:35 2012", "0 0 0 * Feb Mon/2", "Fri Feb 1 00:00 2013"}, + + // Wrap around minute, hour, day, month, and year + {"Mon Dec 31 23:59:45 2012", "0 * * * * *", "Tue Jan 1 00:00:00 2013"}, + + // Leap year + {"Mon Jul 9 23:35 2012", "0 0 0 29 Feb ?", "Mon Feb 29 00:00 2016"}, + + // Daylight savings time 2am EST (-5) -> 3am EDT (-4) + {"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 30 2 11 Mar ?", "2013-03-11T02:30:00-0400"}, + + // hourly job + {"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T01:00:00-0500"}, + {"2012-03-11T01:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T03:00:00-0400"}, + {"2012-03-11T03:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T04:00:00-0400"}, + {"2012-03-11T04:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T05:00:00-0400"}, + + // hourly job using CRON_TZ + {"2012-03-11T00:00:00-0500", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T01:00:00-0500"}, + {"2012-03-11T01:00:00-0500", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T03:00:00-0400"}, + {"2012-03-11T03:00:00-0400", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T04:00:00-0400"}, + {"2012-03-11T04:00:00-0400", "CRON_TZ=America/New_York 0 0 * * * ?", "2012-03-11T05:00:00-0400"}, + + // 1am nightly job + {"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 0 1 * * ?", "2012-03-11T01:00:00-0500"}, + {"2012-03-11T01:00:00-0500", "TZ=America/New_York 0 0 1 * * ?", "2012-03-12T01:00:00-0400"}, + + // 2am nightly job (skipped) + {"2012-03-11T00:00:00-0500", "TZ=America/New_York 0 0 2 * * ?", "2012-03-12T02:00:00-0400"}, + + // Daylight savings time 2am EDT (-4) => 1am EST (-5) + {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 30 2 04 Nov ?", "2012-11-04T02:30:00-0500"}, + {"2012-11-04T01:45:00-0400", "TZ=America/New_York 0 30 1 04 Nov ?", "2012-11-04T01:30:00-0500"}, + + // hourly job + {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-11-04T01:00:00-0400"}, + {"2012-11-04T01:00:00-0400", "TZ=America/New_York 0 0 * * * ?", "2012-11-04T01:00:00-0500"}, + {"2012-11-04T01:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-11-04T02:00:00-0500"}, + + // 1am nightly job (runs twice) + {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 1 * * ?", "2012-11-04T01:00:00-0400"}, + {"2012-11-04T01:00:00-0400", "TZ=America/New_York 0 0 1 * * ?", "2012-11-04T01:00:00-0500"}, + {"2012-11-04T01:00:00-0500", "TZ=America/New_York 0 0 1 * * ?", "2012-11-05T01:00:00-0500"}, + + // 2am nightly job + {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 2 * * ?", "2012-11-04T02:00:00-0500"}, + {"2012-11-04T02:00:00-0500", "TZ=America/New_York 0 0 2 * * ?", "2012-11-05T02:00:00-0500"}, + + // 3am nightly job + {"2012-11-04T00:00:00-0400", "TZ=America/New_York 0 0 3 * * ?", "2012-11-04T03:00:00-0500"}, + {"2012-11-04T03:00:00-0500", "TZ=America/New_York 0 0 3 * * ?", "2012-11-05T03:00:00-0500"}, + + // hourly job + {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 * * * ?", "2012-11-04T01:00:00-0400"}, + {"TZ=America/New_York 2012-11-04T01:00:00-0400", "0 0 * * * ?", "2012-11-04T01:00:00-0500"}, + {"TZ=America/New_York 2012-11-04T01:00:00-0500", "0 0 * * * ?", "2012-11-04T02:00:00-0500"}, + + // 1am nightly job (runs twice) + {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 1 * * ?", "2012-11-04T01:00:00-0400"}, + {"TZ=America/New_York 2012-11-04T01:00:00-0400", "0 0 1 * * ?", "2012-11-04T01:00:00-0500"}, + {"TZ=America/New_York 2012-11-04T01:00:00-0500", "0 0 1 * * ?", "2012-11-05T01:00:00-0500"}, + + // 2am nightly job + {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 2 * * ?", "2012-11-04T02:00:00-0500"}, + {"TZ=America/New_York 2012-11-04T02:00:00-0500", "0 0 2 * * ?", "2012-11-05T02:00:00-0500"}, + + // 3am nightly job + {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 3 * * ?", "2012-11-04T03:00:00-0500"}, + {"TZ=America/New_York 2012-11-04T03:00:00-0500", "0 0 3 * * ?", "2012-11-05T03:00:00-0500"}, + + // Unsatisfiable + {"Mon Jul 9 23:35 2012", "0 0 0 30 Feb ?", ""}, + {"Mon Jul 9 23:35 2012", "0 0 0 31 Apr ?", ""}, + + // Monthly job + {"TZ=America/New_York 2012-11-04T00:00:00-0400", "0 0 3 3 * ?", "2012-12-03T03:00:00-0500"}, + + // Test the scenario of DST resulting in midnight not being a valid time. + // https://github.com/robfig/cron/issues/157 + {"2018-10-17T05:00:00-0400", "TZ=America/Sao_Paulo 0 0 9 10 * ?", "2018-11-10T06:00:00-0500"}, + {"2018-02-14T05:00:00-0500", "TZ=America/Sao_Paulo 0 0 9 22 * ?", "2018-02-22T07:00:00-0500"}, + } + + for _, c := range runs { + sched, err := secondParser.Parse(c.spec) + if err != nil { + t.Error(err) + continue + } + actual := sched.Next(getTime(c.time)) + expected := getTime(c.expected) + if !actual.Equal(expected) { + t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.spec, expected, actual) + } + } +} + +func TestErrors(t *testing.T) { + invalidSpecs := []string{ + "xyz", + "60 0 * * *", + "0 60 * * *", + "0 0 * * XYZ", + } + for _, spec := range invalidSpecs { + _, err := ParseStandard(spec) + if err == nil { + t.Error("expected an error parsing: ", spec) + } + } +} + +func getTime(value string) time.Time { + if value == "" { + return time.Time{} + } + + var location = time.Local + if strings.HasPrefix(value, "TZ=") { + parts := strings.Fields(value) + loc, err := time.LoadLocation(parts[0][len("TZ="):]) + if err != nil { + panic("could not parse location:" + err.Error()) + } + location = loc + value = parts[1] + } + + var layouts = []string{ + "Mon Jan 2 15:04 2006", + "Mon Jan 2 15:04:05 2006", + } + for _, layout := range layouts { + if t, err := time.ParseInLocation(layout, value, location); err == nil { + return t + } + } + if t, err := time.ParseInLocation("2006-01-02T15:04:05-0700", value, location); err == nil { + return t + } + panic("could not parse time value " + value) +} + +func TestNextWithTz(t *testing.T) { + runs := []struct { + time, spec string + expected string + }{ + // Failing tests + {"2016-01-03T13:09:03+0530", "14 14 * * *", "2016-01-03T14:14:00+0530"}, + {"2016-01-03T04:09:03+0530", "14 14 * * ?", "2016-01-03T14:14:00+0530"}, + + // Passing tests + {"2016-01-03T14:09:03+0530", "14 14 * * *", "2016-01-03T14:14:00+0530"}, + {"2016-01-03T14:00:00+0530", "14 14 * * ?", "2016-01-03T14:14:00+0530"}, + } + for _, c := range runs { + sched, err := ParseStandard(c.spec) + if err != nil { + t.Error(err) + continue + } + actual := sched.Next(getTimeTZ(c.time)) + expected := getTimeTZ(c.expected) + if !actual.Equal(expected) { + t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.spec, expected, actual) + } + } +} + +func getTimeTZ(value string) time.Time { + if value == "" { + return time.Time{} + } + t, err := time.Parse("Mon Jan 2 15:04 2006", value) + if err != nil { + t, err = time.Parse("Mon Jan 2 15:04:05 2006", value) + if err != nil { + t, err = time.Parse("2006-01-02T15:04:05-0700", value) + if err != nil { + panic(err) + } + } + } + + return t +} + +// https://github.com/robfig/cron/issues/144 +func TestSlash0NoHang(t *testing.T) { + schedule := "TZ=America/New_York 15/0 * * * *" + _, err := ParseStandard(schedule) + if err == nil { + t.Error("expected an error on 0 increment") + } +} diff --git a/backend/main.go b/backend/main.go new file mode 100644 index 00000000..1b4a29c1 --- /dev/null +++ b/backend/main.go @@ -0,0 +1,121 @@ +package main + +import ( + "crawlab/config" + "crawlab/database" + "crawlab/middlewares" + "crawlab/routes" + "crawlab/services" + "github.com/apex/log" + "github.com/gin-gonic/gin" + "github.com/spf13/viper" + "runtime/debug" +) + +func main() { + app := gin.Default() + + // 初始化配置 + if err := config.InitConfig(""); err != nil { + panic(err) + } + log.Info("初始化配置成功") + + // 初始化日志设置 + logLevel := viper.GetString("log.level") + if logLevel != "" { + log.SetLevelFromString(logLevel) + } + log.Info("初始化日志设置成功") + + // 初始化Mongodb数据库 + if err := database.InitMongo(); err != nil { + debug.PrintStack() + panic(err) + } + log.Info("初始化Mongodb数据库成功") + + // 初始化Redis数据库 + if err := database.InitRedis(); err != nil { + debug.PrintStack() + panic(err) + } + log.Info("初始化Redis数据库成功") + + if services.IsMaster() { + // 初始化定时任务 + if err := services.InitScheduler(); err != nil { + debug.PrintStack() + panic(err) + } + log.Info("初始化定时任务成功") + } + + // 初始化任务执行器 + if err := services.InitTaskExecutor(); err != nil { + debug.PrintStack() + panic(err) + } + log.Info("初始化任务执行器成功") + + // 初始化节点服务 + if err := services.InitNodeService(); err != nil { + debug.PrintStack() + panic(err) + } + log.Info("初始化节点配置成功") + + // 初始化爬虫服务 + if err := services.InitSpiderService(); err != nil { + debug.PrintStack() + panic(err) + } + log.Info("初始化爬虫服务成功") + + // 以下为主节点服务 + if services.IsMaster() { + // 中间件 + app.Use(middlewares.CORSMiddleware()) + + // 路由 + // 节点 + app.GET("/nodes", routes.GetNodeList) // 节点列表 + app.GET("/nodes/:id", routes.GetNode) // 节点详情 + app.POST("/nodes/:id", routes.PostNode) // 修改节点 + app.GET("/nodes/:id/tasks", routes.GetNodeTaskList) // 节点任务列表 + // 爬虫 + app.GET("/spiders", routes.GetSpiderList) // 爬虫列表 + app.GET("/spiders/:id", routes.GetSpider) // 爬虫详情 + app.PUT("/spiders", routes.PutSpider) // 上传爬虫 + app.POST("/spiders", routes.PublishAllSpiders) // 发布所有爬虫 + app.POST("/spiders/:id", routes.PostSpider) // 修改爬虫 + app.POST("/spiders/:id/publish", routes.PublishSpider) // 发布爬虫 + app.DELETE("/spiders/:id", routes.DeleteSpider) // 删除爬虫 + app.GET("/spiders/:id/tasks", routes.GetSpiderTasks) // 爬虫任务列表 + // 任务 + app.GET("/tasks", routes.GetTaskList) // 任务列表 + app.GET("/tasks/:id", routes.GetTask) // 任务详情 + app.PUT("/tasks", routes.PutTask) // 派发任务 + app.DELETE("/tasks/:id", routes.DeleteTask) // 删除任务 + app.POST("/tasks/:id/cancel", routes.CancelTask) // 取消任务 + app.GET("/tasks/:id/log", routes.GetTaskLog) // 任务日志 + app.GET("/tasks/:id/results", routes.GetTaskResults) // 任务结果 + app.GET("/tasks/:id/results/download", routes.DownloadTaskResultsCsv) // 下载任务结果 + // 定时任务 + app.GET("/schedules", routes.GetScheduleList) // 定时任务列表 + app.GET("/schedules/:id", routes.GetSchedule) // 定时任务详情 + app.PUT("/schedules", routes.PutSchedule) // 创建定时任务 + app.POST("/schedules/:id", routes.PostSchedule) // 修改定时任务 + app.DELETE("/schedules/:id", routes.DeleteSchedule) // 删除定时任务 + } + + // 路由ping + app.GET("/ping", routes.Ping) + + // 运行服务器 + host := viper.GetString("server.host") + port := viper.GetString("server.port") + if err := app.Run(host + ":" + port); err != nil { + panic(err) + } +} diff --git a/backend/middlewares/cors.go b/backend/middlewares/cors.go new file mode 100644 index 00000000..5397251d --- /dev/null +++ b/backend/middlewares/cors.go @@ -0,0 +1,19 @@ +package middlewares + +import "github.com/gin-gonic/gin" + +func CORSMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + c.Writer.Header().Set("Access-Control-Allow-Origin", "*") + c.Writer.Header().Set("Access-Control-Allow-Credentials", "true") + c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With") + c.Writer.Header().Set("Access-Control-Allow-Methods", "DELETE, POST, OPTIONS, GET, PUT") + + if c.Request.Method == "OPTIONS" { + c.AbortWithStatus(204) + return + } + + c.Next() + } +} diff --git a/backend/model/base.go b/backend/model/base.go new file mode 100644 index 00000000..70d031f3 --- /dev/null +++ b/backend/model/base.go @@ -0,0 +1,12 @@ +package model + +type Base struct { +} + +func (b *Base) Save() error { + return nil +} + +func (b *Base) Delete() error { + return nil +} diff --git a/backend/model/node.go b/backend/model/node.go new file mode 100644 index 00000000..8a49f121 --- /dev/null +++ b/backend/model/node.go @@ -0,0 +1,117 @@ +package model + +import ( + "crawlab/database" + "github.com/apex/log" + "github.com/globalsign/mgo" + "github.com/globalsign/mgo/bson" + "runtime/debug" + "time" +) + +type Node struct { + Id bson.ObjectId `json:"_id" bson:"_id"` + Name string `json:"name" bson:"name"` + Status string `json:"status" bson:"status"` + Ip string `json:"ip" bson:"ip"` + Port string `json:"port" bson:"port"` + Mac string `json:"mac" bson:"mac"` + Description string `json:"description" bson:"description"` + + UpdateTs time.Time `json:"update_ts" bson:"update_ts"` + CreateTs time.Time `json:"create_ts" bson:"create_ts"` +} + +func (n *Node) Save() error { + s, c := database.GetCol("nodes") + defer s.Close() + n.UpdateTs = time.Now() + if err := c.UpdateId(n.Id, n); err != nil { + return err + } + return nil +} + +func (n *Node) Add() error { + s, c := database.GetCol("nodes") + defer s.Close() + n.Id = bson.NewObjectId() + n.UpdateTs = time.Now() + n.CreateTs = time.Now() + if err := c.Insert(&n); err != nil { + return err + } + return nil +} + +func (n *Node) Delete() error { + s, c := database.GetCol("nodes") + defer s.Close() + if err := c.RemoveId(n.Id); err != nil { + return err + } + return nil +} + +func (n *Node) GetTasks() ([]Task, error) { + tasks, err := GetTaskList(bson.M{"node_id": n.Id}, 0, 10, "-create_ts") + //tasks, err := GetTaskList(nil, 0, 10, "-create_ts") + if err != nil { + return []Task{}, err + } + + return tasks, nil +} + +func GetNodeList(filter interface{}) ([]Node, error) { + s, c := database.GetCol("nodes") + defer s.Close() + + var results []Node + if err := c.Find(filter).All(&results); err != nil { + return results, err + } + return results, nil +} + +func GetNode(id bson.ObjectId) (Node, error) { + s, c := database.GetCol("nodes") + defer s.Close() + + var result Node + if err := c.FindId(id).One(&result); err != nil { + if err != mgo.ErrNotFound { + log.Errorf(err.Error()) + debug.PrintStack() + } + return result, err + } + return result, nil +} + +func UpdateNode(id bson.ObjectId, item Node) error { + s, c := database.GetCol("nodes") + defer s.Close() + + var node Node + if err := c.FindId(id).One(&node); err != nil { + return err + } + + if err := item.Save(); err != nil { + return err + } + return nil +} + +func GetNodeTaskList(id bson.ObjectId) ([]Task, error) { + node, err := GetNode(id) + if err != nil { + return []Task{}, err + } + tasks, err := node.GetTasks() + if err != nil { + return []Task{}, err + } + return tasks, nil +} diff --git a/backend/model/schedule.go b/backend/model/schedule.go new file mode 100644 index 00000000..eee32dcf --- /dev/null +++ b/backend/model/schedule.go @@ -0,0 +1,131 @@ +package model + +import ( + "crawlab/constants" + "crawlab/database" + "crawlab/lib/cron" + "github.com/apex/log" + "github.com/globalsign/mgo/bson" + "runtime/debug" + "time" +) + +type Schedule struct { + Id bson.ObjectId `json:"_id" bson:"_id"` + Name string `json:"name" bson:"name"` + Description string `json:"description" bson:"description"` + SpiderId bson.ObjectId `json:"spider_id" bson:"spider_id"` + NodeId bson.ObjectId `json:"node_id" bson:"node_id"` + Cron string `json:"cron" bson:"cron"` + EntryId cron.EntryID `json:"entry_id" bson:"entry_id"` + + // 前端展示 + SpiderName string `json:"spider_name" bson:"spider_name"` + NodeName string `json:"node_name" bson:"node_name"` + + CreateTs time.Time `json:"create_ts" bson:"create_ts"` + UpdateTs time.Time `json:"update_ts" bson:"update_ts"` +} + +func (sch *Schedule) Save() error { + s, c := database.GetCol("schedules") + defer s.Close() + sch.UpdateTs = time.Now() + if err := c.UpdateId(sch.Id, sch); err != nil { + return err + } + return nil +} + +func GetScheduleList(filter interface{}) ([]Schedule, error) { + s, c := database.GetCol("schedules") + defer s.Close() + + var schedules []Schedule + if err := c.Find(filter).All(&schedules); err != nil { + return schedules, err + } + + for i, schedule := range schedules { + // 获取节点名称 + if schedule.NodeId == bson.ObjectIdHex(constants.ObjectIdNull) { + // 选择所有节点 + schedules[i].NodeName = "All Nodes" + } else { + // 选择单一节点 + node, err := GetNode(schedule.NodeId) + if err != nil { + log.Errorf(err.Error()) + continue + } + schedules[i].NodeName = node.Name + } + + // 获取爬虫名称 + spider, err := GetSpider(schedule.SpiderId) + if err != nil { + log.Errorf(err.Error()) + continue + } + schedules[i].SpiderName = spider.Name + } + return schedules, nil +} + +func GetSchedule(id bson.ObjectId) (Schedule, error) { + s, c := database.GetCol("schedules") + defer s.Close() + + var result Schedule + if err := c.FindId(id).One(&result); err != nil { + return result, err + } + return result, nil +} + +func UpdateSchedule(id bson.ObjectId, item Schedule) error { + s, c := database.GetCol("schedules") + defer s.Close() + + var result Schedule + if err := c.FindId(id).One(&result); err != nil { + return err + } + + if err := item.Save(); err != nil { + return err + } + return nil +} + +func AddSchedule(item Schedule) error { + s, c := database.GetCol("schedules") + defer s.Close() + + item.Id = bson.NewObjectId() + item.CreateTs = time.Now() + item.UpdateTs = time.Now() + + if err := c.Insert(&item); err != nil { + debug.PrintStack() + log.Errorf(err.Error()) + return err + } + return nil +} + +func RemoveSchedule(id bson.ObjectId) error { + s, c := database.GetCol("schedules") + defer s.Close() + + var result Schedule + if err := c.FindId(id).One(&result); err != nil { + return err + } + + if err := c.RemoveId(id); err != nil { + return err + } + + return nil +} diff --git a/backend/model/spider.go b/backend/model/spider.go new file mode 100644 index 00000000..037e6b06 --- /dev/null +++ b/backend/model/spider.go @@ -0,0 +1,167 @@ +package model + +import ( + "crawlab/database" + "github.com/apex/log" + "github.com/globalsign/mgo" + "github.com/globalsign/mgo/bson" + "runtime/debug" + "time" +) + +type Env struct { + Name string `json:"name" bson:"name"` + Value string `json:"value" bson:"value"` +} + +type Spider struct { + Id bson.ObjectId `json:"_id" bson:"_id"` // 爬虫ID + Name string `json:"name" bson:"name"` // 爬虫名称(唯一) + DisplayName string `json:"display_name" bson:"display_name"` // 爬虫显示名称 + Type string `json:"type"` // 爬虫类别 + FileId bson.ObjectId `json:"file_id" bson:"file_id"` // GridFS文件ID + Col string `json:"col"` // 结果储存位置 + Site string `json:"site"` // 爬虫网站 + Envs []Env `json:"envs" bson:"envs"` // 环境变量 + + // 自定义爬虫 + Src string `json:"src" bson:"src"` // 源码位置 + Cmd string `json:"cmd" bson:"cmd"` // 执行命令 + + // 前端展示 + LastRunTs time.Time `json:"last_run_ts"` // 最后一次执行时间 + + // TODO: 可配置爬虫 + //Fields []interface{} `json:"fields"` + //DetailFields []interface{} `json:"detail_fields"` + //CrawlType string `json:"crawl_type"` + //StartUrl string `json:"start_url"` + //UrlPattern string `json:"url_pattern"` + //ItemSelector string `json:"item_selector"` + //ItemSelectorType string `json:"item_selector_type"` + //PaginationSelector string `json:"pagination_selector"` + //PaginationSelectorType string `json:"pagination_selector_type"` + + CreateTs time.Time `json:"create_ts" bson:"create_ts"` + UpdateTs time.Time `json:"update_ts" bson:"update_ts"` +} + +func (spider *Spider) Save() error { + s, c := database.GetCol("spiders") + defer s.Close() + + spider.UpdateTs = time.Now() + + if err := c.UpdateId(spider.Id, spider); err != nil { + debug.PrintStack() + return err + } + return nil +} + +func (spider *Spider) Add() error { + s, c := database.GetCol("spiders") + defer s.Close() + + spider.Id = bson.NewObjectId() + spider.CreateTs = time.Now() + spider.UpdateTs = time.Now() + + if err := c.Insert(&spider); err != nil { + return err + } + return nil +} + +func (spider *Spider) GetTasks() ([]Task, error) { + tasks, err := GetTaskList(bson.M{"spider_id": spider.Id}, 0, 10, "-create_ts") + if err != nil { + return tasks, err + } + return tasks, nil +} + +func (spider *Spider) GetLastTask() (Task, error) { + tasks, err := GetTaskList(bson.M{"spider_id": spider.Id}, 0, 1, "-create_ts") + if err != nil { + return Task{}, err + } + if tasks == nil { + return Task{}, nil + } + return tasks[0], nil +} + +func GetSpiderList(filter interface{}, skip int, limit int) ([]Spider, error) { + s, c := database.GetCol("spiders") + defer s.Close() + + // 获取爬虫列表 + var spiders []Spider + if err := c.Find(filter).Skip(skip).Limit(limit).All(&spiders); err != nil { + debug.PrintStack() + return spiders, err + } + + // 遍历爬虫列表 + for i, spider := range spiders { + // 获取最后一次任务 + task, err := spider.GetLastTask() + if err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + continue + } + + // 赋值 + spiders[i].LastRunTs = task.CreateTs + } + + return spiders, nil +} + +func GetSpider(id bson.ObjectId) (Spider, error) { + s, c := database.GetCol("spiders") + defer s.Close() + + var result Spider + if err := c.FindId(id).One(&result); err != nil { + if err != mgo.ErrNotFound { + debug.PrintStack() + } + return result, err + } + return result, nil +} + +func UpdateSpider(id bson.ObjectId, item Spider) error { + s, c := database.GetCol("spiders") + defer s.Close() + + var result Spider + if err := c.FindId(id).One(&result); err != nil { + debug.PrintStack() + return err + } + + if err := item.Save(); err != nil { + return err + } + return nil +} + +func RemoveSpider(id bson.ObjectId) error { + s, c := database.GetCol("spiders") + defer s.Close() + + var result Spider + if err := c.FindId(id).One(&result); err != nil { + return err + } + + if err := c.RemoveId(id); err != nil { + return err + } + + return nil +} diff --git a/backend/model/task.go b/backend/model/task.go new file mode 100644 index 00000000..e2384ed5 --- /dev/null +++ b/backend/model/task.go @@ -0,0 +1,192 @@ +package model + +import ( + "crawlab/database" + "github.com/apex/log" + "github.com/globalsign/mgo" + "github.com/globalsign/mgo/bson" + "runtime/debug" + "time" +) + +type Task struct { + Id string `json:"_id" bson:"_id"` + SpiderId bson.ObjectId `json:"spider_id" bson:"spider_id"` + StartTs time.Time `json:"start_ts" bson:"start_ts"` + FinishTs time.Time `json:"finish_ts" bson:"finish_ts"` + Status string `json:"status" bson:"status"` + NodeId bson.ObjectId `json:"node_id" bson:"node_id"` + LogPath string `json:"log_path" bson:"log_path"` + Cmd string `json:"cmd" bson:"cmd"` + Error string `json:"error" bson:"error"` + + // 前端数据 + SpiderName string `json:"spider_name"` + NodeName string `json:"node_name"` + NumResults int `json:"num_results"` + + CreateTs time.Time `json:"create_ts" bson:"create_ts"` + UpdateTs time.Time `json:"update_ts" bson:"update_ts"` +} + +func (t *Task) GetSpider() (Spider, error) { + spider, err := GetSpider(t.SpiderId) + if err != nil { + return spider, err + } + return spider, nil +} + +func (t *Task) GetNode() (Node, error) { + node, err := GetNode(t.NodeId) + if err != nil { + return node, err + } + return node, nil +} + +func (t *Task) Save() error { + s, c := database.GetCol("tasks") + defer s.Close() + t.UpdateTs = time.Now() + if err := c.UpdateId(t.Id, t); err != nil { + debug.PrintStack() + return err + } + return nil +} + +func (t *Task) Delete() error { + s, c := database.GetCol("tasks") + defer s.Close() + if err := c.RemoveId(t.Id); err != nil { + return err + } + return nil +} + +func (t *Task) GetResults(pageNum int, pageSize int) (results []interface{}, total int, err error) { + spider, err := t.GetSpider() + if err != nil { + log.Errorf(err.Error()) + return + } + + if spider.Col == "" { + return + } + + s, c := database.GetCol(spider.Col) + defer s.Close() + + query := bson.M{ + "task_id": t.Id, + } + if err = c.Find(query).Skip((pageNum - 1) * pageSize).Limit(pageSize).Sort("-create_ts").All(&results); err != nil { + return + } + + if total, err = c.Find(query).Count(); err != nil { + return + } + + return +} + +func GetTaskList(filter interface{}, skip int, limit int, sortKey string) ([]Task, error) { + s, c := database.GetCol("tasks") + defer s.Close() + + var tasks []Task + if err := c.Find(filter).Skip(skip).Limit(limit).Sort(sortKey).All(&tasks); err != nil { + debug.PrintStack() + return tasks, err + } + + for i, task := range tasks { + // 获取爬虫名称 + spider, err := task.GetSpider() + if err == mgo.ErrNotFound { + // do nothing + } else if err != nil { + return tasks, err + } else { + tasks[i].SpiderName = spider.DisplayName + } + + // 获取节点名称 + node, err := task.GetNode() + if err == mgo.ErrNotFound { + // do nothing + } else if err != nil { + return tasks, err + } else { + tasks[i].NodeName = node.Name + } + + // 获取结果数 + if spider.Col == "" { + continue + } + s, c := database.GetCol(spider.Col) + tasks[i].NumResults, err = c.Find(bson.M{"task_id": task.Id}).Count() + if err != nil { + continue + } + s.Close() + } + return tasks, nil +} + +func GetTaskListTotal(filter interface{}) (int, error) { + s, c := database.GetCol("tasks") + defer s.Close() + + var result int + result, err := c.Find(filter).Count() + if err != nil { + return result, err + } + return result, nil +} + +func GetTask(id string) (Task, error) { + s, c := database.GetCol("tasks") + defer s.Close() + + var task Task + if err := c.FindId(id).One(&task); err != nil { + debug.PrintStack() + return task, err + } + return task, nil +} + +func AddTask(item Task) error { + s, c := database.GetCol("tasks") + defer s.Close() + + item.CreateTs = time.Now() + item.UpdateTs = time.Now() + + if err := c.Insert(&item); err != nil { + return err + } + return nil +} + +func RemoveTask(id string) error { + s, c := database.GetCol("tasks") + defer s.Close() + + var result Task + if err := c.FindId(id).One(&result); err != nil { + return err + } + + if err := c.RemoveId(id); err != nil { + return err + } + + return nil +} diff --git a/backend/routes/base.go b/backend/routes/base.go new file mode 100644 index 00000000..b338a833 --- /dev/null +++ b/backend/routes/base.go @@ -0,0 +1,16 @@ +package routes + +type Response struct { + Status string `json:"status"` + Message string `json:"message"` + Data interface{} `json:"data"` + Error string `json:"error"` +} + +type ListResponse struct { + Status string `json:"status"` + Message string `json:"message"` + Total int `json:"total"` + Data interface{} `json:"data"` + Error string `json:"error"` +} diff --git a/backend/routes/node.go b/backend/routes/node.go new file mode 100644 index 00000000..c2d7d66c --- /dev/null +++ b/backend/routes/node.go @@ -0,0 +1,93 @@ +package routes + +import ( + "crawlab/model" + "crawlab/services" + "github.com/gin-gonic/gin" + "github.com/globalsign/mgo/bson" + "net/http" +) + +func GetNodeList(c *gin.Context) { + results, err := model.GetNodeList(nil) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: results, + }) +} + +func GetNode(c *gin.Context) { + id := c.Param("id") + + result, err := model.GetNode(bson.ObjectIdHex(id)) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: result, + }) +} + +func Ping(c *gin.Context) { + data, err := services.GetNodeData() + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: data, + }) +} + +func PostNode(c *gin.Context) { + id := c.Param("id") + + item, err := model.GetNode(bson.ObjectIdHex(id)) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + var newItem model.Node + if err := c.ShouldBindJSON(&newItem); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + newItem.Id = item.Id + + if err := model.UpdateNode(bson.ObjectIdHex(id), newItem); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func GetNodeTaskList(c *gin.Context) { + id := c.Param("id") + + tasks, err := model.GetNodeTaskList(bson.ObjectIdHex(id)) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: tasks, + }) +} diff --git a/backend/routes/schedule.go b/backend/routes/schedule.go new file mode 100644 index 00000000..b447abb5 --- /dev/null +++ b/backend/routes/schedule.go @@ -0,0 +1,125 @@ +package routes + +import ( + "crawlab/constants" + "crawlab/model" + "crawlab/services" + "github.com/gin-gonic/gin" + "github.com/globalsign/mgo/bson" + "net/http" +) + +func GetScheduleList(c *gin.Context) { + results, err := model.GetScheduleList(nil) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: results, + }) +} + +func GetSchedule(c *gin.Context) { + id := c.Param("id") + + result, err := model.GetSchedule(bson.ObjectIdHex(id)) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: result, + }) +} + +func PostSchedule(c *gin.Context) { + id := c.Param("id") + + // 绑定数据模型 + var newItem model.Schedule + if err := c.ShouldBindJSON(&newItem); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + newItem.Id = bson.ObjectIdHex(id) + + // 如果node_id为空,则置为空ObjectId + if newItem.NodeId == "" { + newItem.NodeId = bson.ObjectIdHex(constants.ObjectIdNull) + } + + // 更新数据库 + if err := model.UpdateSchedule(bson.ObjectIdHex(id), newItem); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 更新定时任务 + if err := services.Sched.Update(); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func PutSchedule(c *gin.Context) { + var item model.Schedule + + // 绑定数据模型 + if err := c.ShouldBindJSON(&item); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + + // 如果node_id为空,则置为空ObjectId + if item.NodeId == "" { + item.NodeId = bson.ObjectIdHex(constants.ObjectIdNull) + } + + // 更新数据库 + if err := model.AddSchedule(item); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 更新定时任务 + if err := services.Sched.Update(); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func DeleteSchedule(c *gin.Context) { + id := c.Param("id") + + // 删除定时任务 + if err := model.RemoveSchedule(bson.ObjectIdHex(id)); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 更新定时任务 + if err := services.Sched.Update(); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} diff --git a/backend/routes/spider.go b/backend/routes/spider.go new file mode 100644 index 00000000..9af2b8ce --- /dev/null +++ b/backend/routes/spider.go @@ -0,0 +1,239 @@ +package routes + +import ( + "crawlab/model" + "crawlab/services" + "crawlab/utils" + "github.com/gin-gonic/gin" + "github.com/globalsign/mgo/bson" + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" + "github.com/spf13/viper" + "net/http" + "os" + "path/filepath" + "runtime/debug" + "strings" +) + +func GetSpiderList(c *gin.Context) { + results, err := model.GetSpiderList(nil, 0, 0) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: results, + }) +} + +func GetSpider(c *gin.Context) { + id := c.Param("id") + + if !bson.IsObjectIdHex(id) { + HandleErrorF(http.StatusBadRequest, c, "invalid id") + } + + result, err := model.GetSpider(bson.ObjectIdHex(id)) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: result, + }) +} + +func PostSpider(c *gin.Context) { + id := c.Param("id") + + if !bson.IsObjectIdHex(id) { + HandleErrorF(http.StatusBadRequest, c, "invalid id") + } + + var item model.Spider + if err := c.ShouldBindJSON(&item); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + + if err := model.UpdateSpider(bson.ObjectIdHex(id), item); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func PublishAllSpiders(c *gin.Context) { + if err := services.PublishAllSpiders(); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func PublishSpider(c *gin.Context) { + id := c.Param("id") + + if !bson.IsObjectIdHex(id) { + HandleErrorF(http.StatusBadRequest, c, "invalid id") + } + + spider, err := model.GetSpider(bson.ObjectIdHex(id)) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + if err := services.PublishSpider(spider); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func PutSpider(c *gin.Context) { + // 从body中获取文件 + file, err := c.FormFile("file") + if err != nil { + debug.PrintStack() + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 如果不为zip文件,返回错误 + if !strings.HasSuffix(file.Filename, ".zip") { + debug.PrintStack() + HandleError(http.StatusBadRequest, c, errors.New("Not a valid zip file")) + return + } + + // 保存到本地临时文件 + randomId := uuid.NewV4() + tmpFilePath := filepath.Join(viper.GetString("other.tmppath"), randomId.String()+".zip") + if err := c.SaveUploadedFile(file, tmpFilePath); err != nil { + debug.PrintStack() + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 读取临时文件 + tmpFile, err := os.OpenFile(tmpFilePath, os.O_RDONLY, 0777) + if err != nil { + debug.PrintStack() + HandleError(http.StatusInternalServerError, c, err) + return + } + if err = tmpFile.Close(); err != nil { + debug.PrintStack() + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 目标目录 + dstPath := filepath.Join( + viper.GetString("spider.path"), + strings.Replace(file.Filename, ".zip", "", 1), + ) + + // 如果目标目录已存在,删除目标目录 + if utils.Exists(dstPath) { + if err := os.RemoveAll(dstPath); err != nil { + debug.PrintStack() + HandleError(http.StatusInternalServerError, c, err) + } + } + + // 将临时文件解压到爬虫目录 + if err := utils.DeCompress(tmpFile, dstPath); err != nil { + debug.PrintStack() + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 删除临时文件 + if err = os.Remove(tmpFilePath); err != nil { + debug.PrintStack() + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 更新爬虫 + services.UpdateSpiders() + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func DeleteSpider(c *gin.Context) { + id := c.Param("id") + + if !bson.IsObjectIdHex(id) { + HandleErrorF(http.StatusBadRequest, c, "invalid id") + return + } + + // 获取该爬虫 + spider, err := model.GetSpider(bson.ObjectIdHex(id)) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 删除爬虫文件目录 + if err := os.RemoveAll(spider.Src); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 从数据库中删除该爬虫 + if err := model.RemoveSpider(bson.ObjectIdHex(id)); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func GetSpiderTasks(c *gin.Context) { + id := c.Param("id") + + spider, err := model.GetSpider(bson.ObjectIdHex(id)) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + tasks, err := spider.GetTasks() + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: tasks, + }) +} diff --git a/backend/routes/task.go b/backend/routes/task.go new file mode 100644 index 00000000..e5efa425 --- /dev/null +++ b/backend/routes/task.go @@ -0,0 +1,267 @@ +package routes + +import ( + "bytes" + "crawlab/constants" + "crawlab/model" + "crawlab/services" + "crawlab/utils" + "encoding/csv" + "github.com/gin-gonic/gin" + "github.com/globalsign/mgo/bson" + uuid "github.com/satori/go.uuid" + "net/http" +) + +type TaskListRequestData struct { + PageNum int `form:"page_num"` + PageSize int `form:"page_size"` + NodeId string `form:"node_id"` + SpiderId string `form:"spider_id"` +} + +type TaskResultsRequestData struct { + PageNum int `form:"page_num"` + PageSize int `form:"page_size"` +} + +func GetTaskList(c *gin.Context) { + // 绑定数据 + data := TaskListRequestData{} + if err := c.ShouldBindQuery(&data); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + if data.PageNum == 0 { + data.PageNum = 1 + } + if data.PageSize == 0 { + data.PageNum = 10 + } + + // 过滤条件 + query := bson.M{} + if data.NodeId != "" { + query["node_id"] = bson.ObjectIdHex(data.NodeId) + } + if data.SpiderId != "" { + query["spider_id"] = bson.ObjectIdHex(data.SpiderId) + } + + // 获取任务列表 + tasks, err := model.GetTaskList(query, (data.PageNum-1)*data.PageSize, data.PageSize, "-create_ts") + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 获取总任务数 + total, err := model.GetTaskListTotal(query) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, ListResponse{ + Status: "ok", + Message: "success", + Total: total, + Data: tasks, + }) +} + +func GetTask(c *gin.Context) { + id := c.Param("id") + + result, err := model.GetTask(id) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: result, + }) +} + +func PutTask(c *gin.Context) { + // 生成任务ID + id := uuid.NewV4() + + // 绑定数据 + var t model.Task + if err := c.ShouldBindJSON(&t); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + t.Id = id.String() + t.Status = constants.StatusPending + + // 如果没有传入node_id,则置为null + if t.NodeId.Hex() == "" { + t.NodeId = bson.ObjectIdHex(constants.ObjectIdNull) + } + + // 将任务存入数据库 + if err := model.AddTask(t); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 加入任务队列 + if err := services.AssignTask(t); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func DeleteTask(c *gin.Context) { + id := c.Param("id") + + if err := model.RemoveTask(id); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func GetTaskLog(c *gin.Context) { + id := c.Param("id") + + logStr, err := services.GetTaskLog(id) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: logStr, + }) +} + +func GetTaskResults(c *gin.Context) { + id := c.Param("id") + + // 绑定数据 + data := TaskResultsRequestData{} + if err := c.ShouldBindQuery(&data); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + + // 获取任务 + task, err := model.GetTask(id) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 获取结果 + results, total, err := task.GetResults(data.PageNum, data.PageSize) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, ListResponse{ + Status: "ok", + Message: "success", + Data: results, + Total: total, + }) +} + +func DownloadTaskResultsCsv(c *gin.Context) { + id := c.Param("id") + + // 获取任务 + task, err := model.GetTask(id) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 获取结果 + results, _, err := task.GetResults(1, constants.Infinite) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 字段列表 + var columns []string + if len(results) == 0 { + columns = []string{} + } else { + item := results[0].(bson.M) + for key := range item { + columns = append(columns, key) + } + } + + // 缓冲 + bytesBuffer := &bytes.Buffer{} + + // 写入UTF-8 BOM,避免使用Microsoft Excel打开乱码 + bytesBuffer.Write([]byte("\xEF\xBB\xBF")) + + writer := csv.NewWriter(bytesBuffer) + + // 写入表头 + if err := writer.Write(columns); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 写入内容 + for _, result := range results { + // 将result转换为[]string + item := result.(bson.M) + var values []string + for _, col := range columns { + value := utils.InterfaceToString(item[col]) + values = append(values, value) + } + + // 写入数据 + if err := writer.Write(values); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + } + + // 此时才会将缓冲区数据写入 + writer.Flush() + + // 设置下载的文件名 + c.Writer.Header().Set("Content-Disposition", "attachment;filename=data.csv") + + // 设置文件类型以及输出数据 + c.Data(http.StatusOK, "text/csv", bytesBuffer.Bytes()) +} + +func CancelTask(c *gin.Context) { + id := c.Param("id") + + if err := services.CancelTask(id); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} diff --git a/backend/routes/utils.go b/backend/routes/utils.go new file mode 100644 index 00000000..14c5853e --- /dev/null +++ b/backend/routes/utils.go @@ -0,0 +1,24 @@ +package routes + +import ( + "github.com/gin-gonic/gin" + "runtime/debug" +) + +func HandleError(statusCode int, c *gin.Context, err error) { + debug.PrintStack() + c.JSON(statusCode, Response{ + Status: "ok", + Message: "error", + Error: err.Error(), + }) +} + +func HandleErrorF(statusCode int, c *gin.Context, err string) { + debug.PrintStack() + c.JSON(statusCode, Response{ + Status: "ok", + Message: "error", + Error: err, + }) +} diff --git a/backend/services/log.go b/backend/services/log.go new file mode 100644 index 00000000..d59e463e --- /dev/null +++ b/backend/services/log.go @@ -0,0 +1,57 @@ +package services + +import ( + "crawlab/constants" + "crawlab/database" + "crawlab/model" + "crawlab/utils" + "encoding/json" + "github.com/apex/log" + "io/ioutil" + "runtime/debug" +) + +// 任务日志频道映射 +var TaskLogChanMap = utils.NewChanMap() + +// 获取本地日志 +func GetLocalLog(logPath string) (fileBytes []byte, err error) { + fileBytes, err = ioutil.ReadFile(logPath) + if err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return fileBytes, err + } + return fileBytes, nil +} + +// 获取远端日志 +func GetRemoteLog(task model.Task) (logStr string, err error) { + // 序列化消息 + msg := NodeMessage{ + Type: constants.MsgTypeGetLog, + LogPath: task.LogPath, + TaskId: task.Id, + } + msgBytes, err := json.Marshal(&msg) + if err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return "", err + } + + // 发布获取日志消息 + channel := "nodes:" + task.NodeId.Hex() + if err := database.Publish(channel, string(msgBytes)); err != nil { + log.Errorf(err.Error()) + return "", err + } + + // 生成频道,等待获取log + ch := TaskLogChanMap.ChanBlocked(task.Id) + + // 此处阻塞,等待结果 + logStr = <-ch + + return logStr, nil +} diff --git a/backend/services/node.go b/backend/services/node.go new file mode 100644 index 00000000..d0d50c7d --- /dev/null +++ b/backend/services/node.go @@ -0,0 +1,348 @@ +package services + +import ( + "crawlab/constants" + "crawlab/database" + "crawlab/lib/cron" + "crawlab/model" + "encoding/json" + "fmt" + "github.com/apex/log" + "github.com/globalsign/mgo/bson" + "github.com/spf13/viper" + "net" + "runtime/debug" + "time" +) + +type Data struct { + Mac string `json:"mac"` + Ip string `json:"ip"` + Master bool `json:"master"` + UpdateTs time.Time `json:"update_ts"` +} + +const ( + Yes = "Y" + No = "N" +) + +// 获取本机的IP地址 +// TODO: 考虑多个IP地址的情况 +func GetIp() (string, error) { + addrList, err := net.InterfaceAddrs() + if err != nil { + return "", err + } + for _, value := range addrList { + if ipNet, ok := value.(*net.IPNet); ok && !ipNet.IP.IsLoopback() { + if ipNet.IP.To4() != nil { + return ipNet.IP.String(), nil + } + } + } + return "", nil +} + +// 获取本机的MAC地址 +func GetMac() (string, error) { + interfaces, err := net.Interfaces() + if err != nil { + debug.PrintStack() + return "", err + } + for _, inter := range interfaces { + if inter.HardwareAddr != nil { + mac := inter.HardwareAddr.String() + return mac, nil + } + } + return "", nil +} + +// 获取本机节点 +func GetCurrentNode() (model.Node, error) { + // 获取本机MAC地址 + mac, err := GetMac() + if err != nil { + return model.Node{}, err + } + + s, c := database.GetCol("nodes") + defer s.Close() + + // 从数据库中获取当前节点 + var n model.Node + if err := c.Find(bson.M{"mac": mac}).One(&n); err != nil { + return n, err + } + return n, nil +} + +// 是否为主节点 +func IsMaster() bool { + return viper.GetString("server.master") == Yes +} + +// 获取节点数据 +func GetNodeData() (Data, error) { + mac, err := GetMac() + if err != nil { + return Data{}, err + } + + value, err := database.RedisClient.HGet("nodes", mac) + data := Data{} + if err := json.Unmarshal([]byte(value), &data); err != nil { + return data, err + } + return data, err +} + +// 更新所有节点状态 +func UpdateNodeStatus() { + // 从Redis获取节点keys + list, err := database.RedisClient.HKeys("nodes") + if err != nil { + log.Errorf(err.Error()) + return + } + + // 遍历节点keys + for _, mac := range list { + // 获取节点数据 + value, err := database.RedisClient.HGet("nodes", mac) + if err != nil { + log.Errorf(err.Error()) + return + } + + // 解析节点列表数据 + var data Data + if err := json.Unmarshal([]byte(value), &data); err != nil { + log.Errorf(err.Error()) + return + } + + // 如果记录的更新时间超过60秒,该节点被认为离线 + if time.Now().Sub(data.UpdateTs) > 60*time.Second { + // 在Redis中删除该节点 + if err := database.RedisClient.HDel("nodes", data.Mac); err != nil { + log.Errorf(err.Error()) + return + } + + // 在MongoDB中该节点设置状态为离线 + s, c := database.GetCol("nodes") + defer s.Close() + var node model.Node + if err := c.Find(bson.M{"mac": mac}).One(&node); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + node.Status = constants.StatusOffline + if err := node.Save(); err != nil { + log.Errorf(err.Error()) + return + } + continue + } + + // 更新节点信息到数据库 + s, c := database.GetCol("nodes") + defer s.Close() + var node model.Node + if err := c.Find(bson.M{"mac": mac}).One(&node); err != nil { + // 数据库不存在该节点 + node = model.Node{ + Name: data.Mac, + Ip: data.Ip, + Port: "8000", + Mac: data.Mac, + Status: constants.StatusOnline, + } + if err := node.Add(); err != nil { + log.Errorf(err.Error()) + return + } + } else { + // 数据库存在该节点 + node.Status = constants.StatusOnline + if err := node.Save(); err != nil { + log.Errorf(err.Error()) + return + } + } + } +} + +// 更新节点数据 +func UpdateNodeData() { + // 获取MAC地址 + mac, err := GetMac() + if err != nil { + log.Errorf(err.Error()) + return + } + + // 获取IP地址 + ip, err := GetIp() + if err != nil { + log.Errorf(err.Error()) + return + } + + // 构造节点数据 + data := Data{ + Mac: mac, + Ip: ip, + Master: IsMaster(), + UpdateTs: time.Now(), + } + + // 注册节点到Redis + dataBytes, err := json.Marshal(&data) + if err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + if err := database.RedisClient.HSet("nodes", mac, string(dataBytes)); err != nil { + log.Errorf(err.Error()) + return + } +} + +// TODO: 查看节点PATH +func GetNodePath() { + //os.Environ() +} + +type NodeMessage struct { + // 通信类别 + Type string `json:"type"` + + // 任务相关 + TaskId string `json:"task_id"` // 任务ID + + // 日志相关 + LogPath string `json:"log_path"` // 日志路径 + Log string `json:"log"` // 日志 + + // 环境变量 + Env string `json:"env"` + + // 错误相关 + Error string `json:"error"` +} + +func MasterNodeCallback(channel string, msgStr string) { + // 反序列化 + var msg NodeMessage + if err := json.Unmarshal([]byte(msgStr), &msg); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + + if msg.Type == constants.MsgTypeGetLog { + fmt.Println(msg) + time.Sleep(10 * time.Millisecond) + ch := TaskLogChanMap.ChanBlocked(msg.TaskId) + ch <- msg.Log + } +} + +func WorkerNodeCallback(channel string, msgStr string) { + // 反序列化 + msg := NodeMessage{} + fmt.Println(msgStr) + if err := json.Unmarshal([]byte(msgStr), &msg); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + + if msg.Type == constants.MsgTypeGetLog { + // 消息类型为获取日志 + + msgSd := NodeMessage{ + Type: constants.MsgTypeGetLog, + TaskId: msg.TaskId, + } + + // 获取本地日志 + logStr, err := GetLocalLog(msg.LogPath) + if err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + msgSd.Error = err.Error() + } + msgSd.Log = string(logStr) + + // 序列化 + msgSdBytes, err := json.Marshal(&msgSd) + if err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + + // 发布消息给主节点 + fmt.Println(msgSd) + if err := database.Publish("nodes:master", string(msgSdBytes)); err != nil { + log.Errorf(err.Error()) + return + } + } else if msg.Type == constants.MsgTypeCancelTask { + ch := TaskExecChanMap.ChanBlocked(msg.TaskId) + ch <- constants.TaskCancel + } +} + +// 初始化节点服务 +func InitNodeService() error { + // 构造定时任务 + c := cron.New(cron.WithSeconds()) + + // 每5秒更新一次本节点信息 + spec := "0/5 * * * * *" + if _, err := c.AddFunc(spec, UpdateNodeData); err != nil { + debug.PrintStack() + return err + } + + // 消息订阅 + var sub database.Subscriber + sub.Connect() + + // 获取当前节点 + node, err := GetCurrentNode() + if err != nil { + log.Errorf(err.Error()) + return err + } + + if IsMaster() { + // 如果为主节点,订阅主节点通信频道 + channel := "nodes:master" + sub.Subscribe(channel, MasterNodeCallback) + } else { + // 若为工作节点,订阅单独指定通信频道 + channel := "nodes:" + node.Id.Hex() + sub.Subscribe(channel, WorkerNodeCallback) + } + + // 如果为主节点,每30秒刷新所有节点信息 + if IsMaster() { + spec := "*/10 * * * * *" + if _, err := c.AddFunc(spec, UpdateNodeStatus); err != nil { + debug.PrintStack() + return err + } + } + + c.Start() + return nil +} diff --git a/backend/services/schedule.go b/backend/services/schedule.go new file mode 100644 index 00000000..916e42d0 --- /dev/null +++ b/backend/services/schedule.go @@ -0,0 +1,133 @@ +package services + +import ( + "crawlab/constants" + "crawlab/lib/cron" + "crawlab/model" + "github.com/apex/log" + uuid "github.com/satori/go.uuid" + "runtime/debug" +) + +var Sched *Scheduler + +type Scheduler struct { + cron *cron.Cron +} + +func AddTask(s model.Schedule) func() { + return func() { + nodeId := s.NodeId + + // 生成任务ID + id := uuid.NewV4() + + // 生成任务模型 + t := model.Task{ + Id: id.String(), + SpiderId: s.SpiderId, + NodeId: nodeId, + Status: constants.StatusPending, + } + + // 将任务存入数据库 + if err := model.AddTask(t); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + + // 加入任务队列 + if err := AssignTask(t); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + } +} + +func UpdateSchedules() { + if err := Sched.Update(); err != nil { + log.Errorf(err.Error()) + return + } +} + +func (s *Scheduler) Start() error { + exec := cron.New(cron.WithSeconds()) + + // 启动cron服务 + s.cron.Start() + + // 更新任务列表 + if err := s.Update(); err != nil { + return err + } + + // 每30秒更新一次任务列表 + spec := "*/30 * * * * *" + if _, err := exec.AddFunc(spec, UpdateSchedules); err != nil { + return err + } + + return nil +} + +func (s *Scheduler) AddJob(job model.Schedule) error { + spec := job.Cron + + // 添加任务 + eid, err := s.cron.AddFunc(spec, AddTask(job)) + if err != nil { + return err + } + + // 更新EntryID + job.EntryId = eid + if err := job.Save(); err != nil { + return err + } + + return nil +} + +func (s *Scheduler) RemoveAll() { + entries := s.cron.Entries() + for i := 0; i < len(entries); i++ { + s.cron.Remove(entries[i].ID) + } +} + +func (s *Scheduler) Update() error { + // 删除所有定时任务 + s.RemoveAll() + + // 获取所有定时任务 + sList, err := model.GetScheduleList(nil) + if err != nil { + return err + } + + // 遍历任务列表 + for i := 0; i < len(sList); i++ { + // 单个任务 + job := sList[i] + + // 添加到定时任务 + if err := s.AddJob(job); err != nil { + return err + } + } + + return nil +} + +func InitScheduler() error { + Sched = &Scheduler{ + cron: cron.New(cron.WithSeconds()), + } + if err := Sched.Start(); err != nil { + return err + } + return nil +} diff --git a/backend/services/spider.go b/backend/services/spider.go new file mode 100644 index 00000000..bf144959 --- /dev/null +++ b/backend/services/spider.go @@ -0,0 +1,387 @@ +package services + +import ( + "crawlab/constants" + "crawlab/database" + "crawlab/lib/cron" + "crawlab/model" + "crawlab/utils" + "encoding/json" + "github.com/apex/log" + "github.com/globalsign/mgo/bson" + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" + "github.com/spf13/viper" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime/debug" + "strings" +) + +type SpiderFileData struct { + FileName string + File []byte +} + +type SpiderUploadMessage struct { + FileId string + FileName string +} + +// 从项目目录中获取爬虫列表 +func GetSpidersFromDir() ([]model.Spider, error) { + // 爬虫项目目录路径 + srcPath := viper.GetString("spider.path") + + // 如果爬虫项目目录不存在,则创建一个 + if !utils.Exists(srcPath) { + if err := os.MkdirAll(srcPath, 0666); err != nil { + debug.PrintStack() + return []model.Spider{}, err + } + } + + // 获取爬虫项目目录下的所有子项 + items, err := ioutil.ReadDir(srcPath) + if err != nil { + debug.PrintStack() + return []model.Spider{}, err + } + + // 定义爬虫列表 + spiders := make([]model.Spider, 0) + + // 遍历所有子项 + for _, item := range items { + // 忽略不为目录的子项 + if !item.IsDir() { + continue + } + + // 忽略隐藏目录 + if strings.HasPrefix(item.Name(), ".") { + continue + } + + // 构造爬虫 + spider := model.Spider{ + Name: item.Name(), + DisplayName: item.Name(), + Type: constants.Customized, + Src: filepath.Join(srcPath, item.Name()), + FileId: bson.ObjectIdHex(constants.ObjectIdNull), + } + + // 将爬虫加入列表 + spiders = append(spiders, spider) + } + + return spiders, nil +} + +// 将爬虫保存到数据库 +func SaveSpiders(spiders []model.Spider) error { + // 遍历爬虫列表 + for _, spider := range spiders { + // 忽略非自定义爬虫 + if spider.Type != constants.Customized { + continue + } + + // 如果该爬虫不存在于数据库,则保存爬虫到数据库 + s, c := database.GetCol("spiders") + defer s.Close() + var spider_ *model.Spider + if err := c.Find(bson.M{"src": spider.Src}).One(&spider_); err != nil { + // 不存在 + if err := spider.Add(); err != nil { + debug.PrintStack() + return err + } + } else { + // 存在 + } + } + + return nil +} + +// 更新爬虫 +func UpdateSpiders() { + // 从项目目录获取爬虫列表 + spiders, err := GetSpidersFromDir() + if err != nil { + log.Errorf(err.Error()) + return + } + + // 储存爬虫 + if err := SaveSpiders(spiders); err != nil { + log.Errorf(err.Error()) + return + } +} + +// 打包爬虫目录为zip文件 +func ZipSpider(spider model.Spider) (filePath string, err error) { + // 如果源文件夹不存在,抛错 + if !utils.Exists(spider.Src) { + debug.PrintStack() + return "", errors.New("source path does not exist") + } + + // 临时文件路径 + randomId := uuid.NewV4() + if err != nil { + debug.PrintStack() + return "", err + } + filePath = filepath.Join( + viper.GetString("other.tmppath"), + randomId.String()+".zip", + ) + + // 将源文件夹打包为zip文件 + d, err := os.Open(spider.Src) + if err != nil { + debug.PrintStack() + return filePath, err + } + var files []*os.File + files = append(files, d) + if err := utils.Compress(files, filePath); err != nil { + return filePath, err + } + + return filePath, nil +} + +// 上传zip文件到GridFS +func UploadToGridFs(spider model.Spider, fileName string, filePath string) (fid bson.ObjectId, err error) { + fid = "" + + // 读取zip文件 + file, err := os.OpenFile(filePath, os.O_RDONLY, 0777) + fileBytes, err := ioutil.ReadAll(file) + if err != nil { + debug.PrintStack() + return + } + if err = file.Close(); err != nil { + debug.PrintStack() + return + } + + // 删除zip文件 + if err = os.Remove(filePath); err != nil { + debug.PrintStack() + return + } + + // 获取MongoDB GridFS连接 + s, gf := database.GetGridFs("files") + defer s.Close() + + // 如果存在FileId删除GridFS上的老文件 + if !utils.IsObjectIdNull(spider.FileId) { + if err = gf.RemoveId(spider.FileId); err != nil { + debug.PrintStack() + } + } + + // 创建一个新GridFS文件 + f, err := gf.Create(fileName) + if err != nil { + debug.PrintStack() + return + } + + // 将文件写入到GridFS + if _, err = f.Write(fileBytes); err != nil { + debug.PrintStack() + return + } + + // 关闭文件,提交写入 + if err = f.Close(); err != nil { + return "", err + } + + // 文件ID + fid = f.Id().(bson.ObjectId) + + return fid, nil +} + +// 发布所有爬虫 +func PublishAllSpiders() error { + // 获取爬虫列表 + spiders, err := model.GetSpiderList(nil, 0, constants.Infinite) + if err != nil { + log.Errorf(err.Error()) + return err + } + + // 遍历爬虫列表 + for _, spider := range spiders { + // 发布爬虫 + if err := PublishSpider(spider); err != nil { + log.Errorf(err.Error()) + return err + } + } + + return nil +} + +func PublishAllSpidersJob() { + if err := PublishAllSpiders(); err != nil { + log.Errorf(err.Error()) + } +} + +// 发布爬虫 +// 1. 将源文件夹打包为zip文件 +// 2. 上传zip文件到GridFS +// 3. 发布消息给工作节点 +func PublishSpider(spider model.Spider) (err error) { + // 将源文件夹打包为zip文件 + filePath, err := ZipSpider(spider) + if err != nil { + return err + } + + // 上传zip文件到GridFS + fileName := filepath.Base(spider.Src) + ".zip" + fid, err := UploadToGridFs(spider, fileName, filePath) + if err != nil { + return err + } + + // 保存FileId + spider.FileId = fid + if err := spider.Save(); err != nil { + return err + } + + // 发布消息给工作节点 + msg := SpiderUploadMessage{ + FileId: fid.Hex(), + FileName: fileName, + } + msgStr, err := json.Marshal(msg) + if err != nil { + return + } + channel := "files:upload" + if err = database.Publish(channel, string(msgStr)); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + + return +} + +// 上传爬虫回调 +func OnFileUpload(channel string, msgStr string) { + s, gf := database.GetGridFs("files") + defer s.Close() + + // 反序列化消息 + var msg SpiderUploadMessage + if err := json.Unmarshal([]byte(msgStr), &msg); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + + // 从GridFS获取该文件 + f, err := gf.OpenId(bson.ObjectIdHex(msg.FileId)) + if err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + defer f.Close() + + // 生成唯一ID + randomId := uuid.NewV4() + + // 创建临时文件 + tmpFilePath := filepath.Join(viper.GetString("other.tmppath"), randomId.String()+".zip") + tmpFile, err := os.OpenFile(tmpFilePath, os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + defer tmpFile.Close() + + // 将该文件写入临时文件 + if _, err := io.Copy(tmpFile, f); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + + // 解压缩临时文件到目标文件夹 + dstPath := filepath.Join( + viper.GetString("spider.path"), + //strings.Replace(msg.FileName, ".zip", "", -1), + ) + if err := utils.DeCompress(tmpFile, dstPath); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + + // 关闭临时文件 + if err := tmpFile.Close(); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + + // 删除临时文件 + if err := os.Remove(tmpFilePath); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } +} + +// 启动爬虫服务 +func InitSpiderService() error { + // 构造定时任务执行器 + c := cron.New(cron.WithSeconds()) + + if IsMaster() { + // 主节点 + + // 每5秒更新一次爬虫信息 + if _, err := c.AddFunc("*/5 * * * * *", UpdateSpiders); err != nil { + return err + } + + // 每60秒同步爬虫给工作节点 + if _, err := c.AddFunc("0 * * * * *", PublishAllSpidersJob); err != nil { + return err + } + } else { + // 非主节点 + + // 订阅文件上传 + channel := "files:upload" + var sub database.Subscriber + sub.Connect() + sub.Subscribe(channel, OnFileUpload) + } + + // 启动定时任务 + c.Start() + + return nil +} diff --git a/backend/services/task.go b/backend/services/task.go new file mode 100644 index 00000000..306faf19 --- /dev/null +++ b/backend/services/task.go @@ -0,0 +1,444 @@ +package services + +import ( + "crawlab/constants" + "crawlab/database" + "crawlab/lib/cron" + "crawlab/model" + "crawlab/utils" + "encoding/json" + "errors" + "github.com/apex/log" + "github.com/spf13/viper" + "os" + "os/exec" + "path/filepath" + "runtime" + "runtime/debug" + "strconv" + "time" +) + +var Exec *Executor + +// 任务执行锁 +var LockList []bool + +// 任务消息 +type TaskMessage struct { + Id string + Cmd string +} + +// 序列化任务消息 +func (m *TaskMessage) ToString() (string, error) { + data, err := json.Marshal(&m) + if err != nil { + return "", err + } + return string(data), err +} + +// 任务执行器 +type Executor struct { + Cron *cron.Cron +} + +// 启动任务执行器 +func (ex *Executor) Start() error { + // 启动cron服务 + ex.Cron.Start() + + // 加入执行器到定时任务 + spec := "0/1 * * * * *" // 每秒执行一次 + for i := 0; i < viper.GetInt("task.workers"); i++ { + // WorkerID + id := i + + // 初始化任务锁 + LockList = append(LockList, false) + + // 加入定时任务 + _, err := ex.Cron.AddFunc(spec, GetExecuteTaskFunc(id)) + if err != nil { + return err + } + } + + return nil +} + +var TaskExecChanMap = utils.NewChanMap() + +// 派发任务 +func AssignTask(task model.Task) error { + // 生成任务信息 + msg := TaskMessage{ + Id: task.Id, + } + + // 序列化 + msgStr, err := msg.ToString() + if err != nil { + return err + } + + // 队列名称 + var queue string + if utils.IsObjectIdNull(task.NodeId) { + queue = "tasks:public" + } else { + queue = "tasks:node:" + task.NodeId.Hex() + } + + // 任务入队 + if err := database.RedisClient.RPush(queue, msgStr); err != nil { + return err + } + return nil +} + +// 执行shell命令 +func ExecuteShellCmd(cmdStr string, cwd string, t model.Task, s model.Spider) (err error) { + log.Infof("cwd: " + cwd) + log.Infof("cmd: " + cmdStr) + + // 生成执行命令 + var cmd *exec.Cmd + if runtime.GOOS == constants.Windows { + cmd = exec.Command("cmd", "/C", cmdStr) + } else { + cmd = exec.Command("sh", "-c", cmdStr) + } + + // 工作目录 + cmd.Dir = cwd + + // 指定stdout, stderr日志位置 + fLog, err := os.Create(t.LogPath) + if err != nil { + HandleTaskError(t, err) + return err + } + defer fLog.Close() + cmd.Stdout = fLog + cmd.Stderr = fLog + + // 添加环境变量 + cmd.Env = append(cmd.Env, "CRAWLAB_TASK_ID="+t.Id) + cmd.Env = append(cmd.Env, "CRAWLAB_COLLECTION="+s.Col) + + // 起一个goroutine来监控进程 + ch := TaskExecChanMap.ChanBlocked(t.Id) + go func() { + // 传入信号,此处阻塞 + signal := <-ch + + if signal == constants.TaskCancel { + // 取消进程 + if err := cmd.Process.Kill(); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + t.Status = constants.StatusCancelled + } else if signal == constants.TaskFinish { + // 完成进程 + t.Status = constants.StatusFinished + } + + // 保存任务 + t.FinishTs = time.Now() + if err := t.Save(); err != nil { + log.Infof(err.Error()) + debug.PrintStack() + return + } + }() + + // 开始执行 + if err := cmd.Run(); err != nil { + HandleTaskError(t, err) + return err + } + ch <- constants.TaskFinish + + return nil +} + +// 生成日志目录 +func MakeLogDir(t model.Task) (fileDir string, err error) { + // 日志目录 + fileDir = filepath.Join(viper.GetString("log.path"), t.SpiderId.Hex()) + + // 如果日志目录不存在,生成该目录 + if !utils.Exists(fileDir) { + if err := os.MkdirAll(fileDir, 0777); err != nil { + debug.PrintStack() + return "", err + } + } + + return fileDir, nil +} + +// 获取日志文件路径 +func GetLogFilePaths(fileDir string) (filePath string) { + // 时间戳 + ts := time.Now() + tsStr := ts.Format("20060102150405") + + // stdout日志文件 + filePath = filepath.Join(fileDir, tsStr+".log") + + return filePath +} + +// 生成执行任务方法 +func GetExecuteTaskFunc(id int) func() { + return func() { + ExecuteTask(id) + } +} + +func GetWorkerPrefix(id int) string { + return "[Worker " + strconv.Itoa(id) + "] " +} + +// 执行任务 +func ExecuteTask(id int) { + if LockList[id] { + log.Debugf(GetWorkerPrefix(id) + "正在执行任务...") + return + } + + // 上锁 + LockList[id] = true + + // 解锁(延迟执行) + defer func() { + LockList[id] = false + }() + + // 开始计时 + tic := time.Now() + + // 获取当前节点 + node, err := GetCurrentNode() + if err != nil { + log.Errorf(GetWorkerPrefix(id) + err.Error()) + return + } + + // 公共队列 + queuePub := "tasks:public" + + // 节点队列 + queueCur := "tasks:node:" + node.Id.Hex() + + // 节点队列任务 + var msg string + msg, err = database.RedisClient.LPop(queueCur) + if err != nil { + if msg == "" { + // 节点队列没有任务,获取公共队列任务 + msg, err = database.RedisClient.LPop(queuePub) + if err != nil { + if msg == "" { + // 公共队列没有任务 + log.Debugf(GetWorkerPrefix(id) + "没有任务...") + return + } else { + log.Errorf(GetWorkerPrefix(id) + err.Error()) + debug.PrintStack() + return + } + } + } else { + log.Errorf(GetWorkerPrefix(id) + err.Error()) + debug.PrintStack() + return + } + } + + // 反序列化 + tMsg := TaskMessage{} + if err := json.Unmarshal([]byte(msg), &tMsg); err != nil { + log.Errorf(GetWorkerPrefix(id) + err.Error()) + debug.PrintStack() + return + } + + // 获取任务 + t, err := model.GetTask(tMsg.Id) + if err != nil { + log.Errorf(GetWorkerPrefix(id) + err.Error()) + return + } + + // 获取爬虫 + spider, err := t.GetSpider() + if err != nil { + log.Errorf(GetWorkerPrefix(id) + err.Error()) + return + } + + // 创建日志目录 + fileDir, err := MakeLogDir(t) + if err != nil { + log.Errorf(GetWorkerPrefix(id) + err.Error()) + return + } + + // 获取日志文件路径 + t.LogPath = GetLogFilePaths(fileDir) + + // 创建日志目录文件夹 + fileStdoutDir := filepath.Dir(t.LogPath) + if !utils.Exists(fileStdoutDir) { + if err := os.MkdirAll(fileStdoutDir, os.ModePerm); err != nil { + log.Errorf(GetWorkerPrefix(id) + err.Error()) + return + } + } + + // 工作目录 + cwd := filepath.Join( + viper.GetString("spider.path"), + spider.Name, + ) + + // 执行命令 + cmd := spider.Cmd + if t.Cmd != "" { + cmd = t.Cmd + } + + // 任务赋值 + t.NodeId = node.Id // 任务节点信息 + t.StartTs = time.Now() // 任务开始时间 + t.Status = constants.StatusRunning // 任务状态 + + // 开始执行任务 + log.Infof(GetWorkerPrefix(id) + "开始执行任务(ID:" + t.Id + ")") + + // 储存任务 + if err := t.Save(); err != nil { + log.Errorf(err.Error()) + HandleTaskError(t, err) + return + } + + // 执行Shell命令 + if err := ExecuteShellCmd(cmd, cwd, t, spider); err != nil { + log.Errorf(GetWorkerPrefix(id) + err.Error()) + return + } + + // 结束计时 + toc := time.Now() + + // 统计时长 + duration := toc.Sub(tic).Seconds() + durationStr := strconv.FormatFloat(duration, 'f', 6, 64) + log.Infof(GetWorkerPrefix(id) + "任务(ID:" + t.Id + ")" + "执行完毕. 消耗时间:" + durationStr + "秒") +} + +func GetTaskLog(id string) (logStr string, err error) { + task, err := model.GetTask(id) + if err != nil { + return "", err + } + + logStr = "" + if IsMaster() { + // 若为主节点,获取本机日志 + logBytes, err := GetLocalLog(task.LogPath) + logStr = string(logBytes) + if err != nil { + log.Errorf(err.Error()) + return "", err + } + logStr = string(logBytes) + } else { + // 若不为主节点,获取远端日志 + logStr, err = GetRemoteLog(task) + if err != nil { + log.Errorf(err.Error()) + return "", err + } + } + + return logStr, nil +} + +func CancelTask(id string) (err error) { + // 获取任务 + task, err := model.GetTask(id) + if err != nil { + return err + } + + // 如果任务状态不为pending或running,返回错误 + if task.Status != constants.StatusPending && task.Status != constants.StatusRunning { + return errors.New("task is not cancellable") + } + + // 获取当前节点(默认当前节点为主节点) + node, err := GetCurrentNode() + if err != nil { + return err + } + + if node.Id == task.NodeId { + // 任务节点为主节点 + + // 获取任务执行频道 + ch := TaskExecChanMap.ChanBlocked(id) + + // 发出取消进程信号 + ch <- constants.TaskCancel + } else { + // 任务节点为工作节点 + + // 序列化消息 + msg := NodeMessage{ + Type: constants.MsgTypeCancelTask, + TaskId: id, + } + msgBytes, err := json.Marshal(&msg) + if err != nil { + return err + } + + // 发布消息 + if err := database.Publish("nodes:"+task.NodeId.Hex(), string(msgBytes)); err != nil { + return err + } + } + + return nil +} + +func HandleTaskError(t model.Task, err error) { + t.Status = constants.StatusError + t.Error = err.Error() + t.FinishTs = time.Now() + if err := t.Save(); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + debug.PrintStack() +} + +func InitTaskExecutor() error { + c := cron.New(cron.WithSeconds()) + Exec = &Executor{ + Cron: c, + } + if err := Exec.Start(); err != nil { + return err + } + return nil +} diff --git a/backend/test/test.http b/backend/test/test.http new file mode 100644 index 00000000..bde72001 --- /dev/null +++ b/backend/test/test.http @@ -0,0 +1,49 @@ +# For a quick start check out our HTTP Requests collection (Tools|HTTP Client|Open HTTP Requests Collection). +# +# Following HTTP Request Live Templates are available: +# * 'gtrp' and 'gtr' create a GET request with or without query parameters; +# * 'ptr' and 'ptrp' create a POST request with a simple or parameter-like body; +# * 'mptr' and 'fptr' create a POST request to submit a form with a text or file field (multipart/form-data); +PUT http://localhost:8000/schedules +Content-Type: application/json +#Content-Type: application/x-www-form-urlencoded + +{ + "cron": "*/10 * * * * *", + "spider_id": "5d2ead494bdee04810bb7654" +} + +### cron=*/10 * * * * *&spider_id=5d2ead494bdee04810bb7654 + +DELETE http://localhost:8000/schedules/5d31b5334bdee082f6e69e7a + +### + +DELETE http://localhost:8000/spiders/5d31a6ed4bdee07b25d70c7b + +### + + +### + +PUT http://localhost:8000/tasks +Content-Type: application/json + +{ + "spider_id": "5d32f20f4bdee0f4aae526de", + "node_id": "5d3343cc4bdee01cb772883e" +} + +### + +POST http://localhost:8000/spiders/5d32ad224bdee0a60ee5639c/publish + +### + +POST http://localhost:8000/spiders + +### + +GET http://localhost:8000/tasks/bb79ec82-1e8f-41c4-858a-5cb682396409/log + +### diff --git a/backend/utils/chan.go b/backend/utils/chan.go new file mode 100644 index 00000000..3e9fde61 --- /dev/null +++ b/backend/utils/chan.go @@ -0,0 +1,27 @@ +package utils + +type ChanMap struct { + m map[string]chan string +} + +func NewChanMap() *ChanMap { + return &ChanMap{m: make(map[string]chan string)} +} + +func (cm *ChanMap) Chan(key string) chan string { + if ch, ok := cm.m[key]; ok { + return ch + } + ch := make(chan string, 10) + cm.m[key] = ch + return ch +} + +func (cm *ChanMap) ChanBlocked(key string) chan string { + if ch, ok := cm.m[key]; ok { + return ch + } + ch := make(chan string) + cm.m[key] = ch + return ch +} diff --git a/backend/utils/file.go b/backend/utils/file.go new file mode 100644 index 00000000..77480728 --- /dev/null +++ b/backend/utils/file.go @@ -0,0 +1,200 @@ +package utils + +import ( + "archive/zip" + "github.com/apex/log" + "io" + "os" + "path/filepath" + "runtime/debug" +) + +// 判断所给路径文件/文件夹是否存在 +func Exists(path string) bool { + _, err := os.Stat(path) //os.Stat获取文件信息 + if err != nil { + if os.IsExist(err) { + return true + } + return false + } + return true +} + +// 判断所给路径是否为文件夹 +func IsDir(path string) bool { + s, err := os.Stat(path) + if err != nil { + return false + } + return s.IsDir() +} + +// 判断所给路径是否为文件 +func IsFile(path string) bool { + return !IsDir(path) +} + +/** +@tarFile:压缩文件路径 +@dest:解压文件夹 +*/ +func DeCompressByPath(tarFile, dest string) error { + srcFile, err := os.Open(tarFile) + if err != nil { + return err + } + defer srcFile.Close() + return DeCompress(srcFile, dest) +} + +/** +@zipFile:压缩文件 +@dstPath:解压之后文件保存路径 +*/ +func DeCompress(srcFile *os.File, dstPath string) error { + // 如果保存路径不存在,创建一个 + if !Exists(dstPath) { + if err := os.MkdirAll(dstPath, os.ModePerm); err != nil { + debug.PrintStack() + return err + } + } + + // 读取zip文件 + zipFile, err := zip.OpenReader(srcFile.Name()) + if err != nil { + log.Errorf("Unzip File Error:" + err.Error()) + debug.PrintStack() + return err + } + defer zipFile.Close() + + // 遍历zip内所有文件和目录 + for _, innerFile := range zipFile.File { + // 获取该文件数据 + info := innerFile.FileInfo() + + // 如果是目录,则创建一个 + if info.IsDir() { + err = os.MkdirAll(filepath.Join(dstPath, innerFile.Name), os.ModePerm) + if err != nil { + log.Errorf("Unzip File Error : " + err.Error()) + debug.PrintStack() + return err + } + continue + } + + // 如果文件目录不存在,则创建一个 + dirPath := filepath.Dir(innerFile.Name) + if !Exists(dirPath) { + err = os.MkdirAll(filepath.Join(dstPath, dirPath), os.ModePerm) + if err != nil { + log.Errorf("Unzip File Error : " + err.Error()) + debug.PrintStack() + return err + } + } + + // 打开该文件 + srcFile, err := innerFile.Open() + if err != nil { + log.Errorf("Unzip File Error : " + err.Error()) + debug.PrintStack() + continue + } + + // 创建新文件 + newFile, err := os.Create(filepath.Join(dstPath, innerFile.Name)) + if err != nil { + log.Errorf("Unzip File Error : " + err.Error()) + debug.PrintStack() + continue + } + defer newFile.Close() + + // 拷贝该文件到新文件中 + if _, err := io.Copy(newFile, srcFile); err != nil { + debug.PrintStack() + return err + } + + // 关闭该文件 + if err := srcFile.Close(); err != nil { + debug.PrintStack() + return err + } + + // 关闭新文件 + if err := newFile.Close(); err != nil { + debug.PrintStack() + return err + } + } + return nil +} + +//压缩文件 +//files 文件数组,可以是不同dir下的文件或者文件夹 +//dest 压缩文件存放地址 +func Compress(files []*os.File, dest string) error { + d, _ := os.Create(dest) + defer d.Close() + w := zip.NewWriter(d) + defer w.Close() + for _, file := range files { + err := _Compress(file, "", w) + if err != nil { + return err + } + } + return nil +} + +func _Compress(file *os.File, prefix string, zw *zip.Writer) error { + info, err := file.Stat() + if err != nil { + debug.PrintStack() + return err + } + if info.IsDir() { + prefix = prefix + "/" + info.Name() + fileInfos, err := file.Readdir(-1) + if err != nil { + debug.PrintStack() + return err + } + for _, fi := range fileInfos { + f, err := os.Open(file.Name() + "/" + fi.Name()) + if err != nil { + debug.PrintStack() + return err + } + err = _Compress(f, prefix, zw) + if err != nil { + debug.PrintStack() + return err + } + } + } else { + header, err := zip.FileInfoHeader(info) + header.Name = prefix + "/" + header.Name + if err != nil { + debug.PrintStack() + return err + } + writer, err := zw.CreateHeader(header) + if err != nil { + debug.PrintStack() + return err + } + _, err = io.Copy(writer, file) + file.Close() + if err != nil { + debug.PrintStack() + return err + } + } + return nil +} diff --git a/backend/utils/model.go b/backend/utils/model.go new file mode 100644 index 00000000..867ae620 --- /dev/null +++ b/backend/utils/model.go @@ -0,0 +1,26 @@ +package utils + +import ( + "crawlab/constants" + "github.com/globalsign/mgo/bson" + "strconv" + "time" +) + +func IsObjectIdNull(id bson.ObjectId) bool { + return id.Hex() == constants.ObjectIdNull +} + +func InterfaceToString(value interface{}) string { + switch value.(type) { + case bson.ObjectId: + return value.(bson.ObjectId).Hex() + case string: + return value.(string) + case int: + return strconv.Itoa(value.(int)) + case time.Time: + return value.(time.Time).String() + } + return "" +} diff --git a/backend/vendor/github.com/apex/log/.gitignore b/backend/vendor/github.com/apex/log/.gitignore new file mode 100644 index 00000000..7a6353d6 --- /dev/null +++ b/backend/vendor/github.com/apex/log/.gitignore @@ -0,0 +1 @@ +.envrc diff --git a/backend/vendor/github.com/apex/log/History.md b/backend/vendor/github.com/apex/log/History.md new file mode 100644 index 00000000..c88be39e --- /dev/null +++ b/backend/vendor/github.com/apex/log/History.md @@ -0,0 +1,12 @@ + +v1.1.1 / 2019-06-24 +=================== + + * add go.mod + * add rough pass at apexlogs handler + +v1.1.0 / 2018-10-11 +=================== + + * fix: cli handler to show non-string fields appropriately + * fix: cli using fatih/color to better support windows diff --git a/backend/vendor/github.com/apex/log/LICENSE b/backend/vendor/github.com/apex/log/LICENSE new file mode 100644 index 00000000..af718006 --- /dev/null +++ b/backend/vendor/github.com/apex/log/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2015 TJ Holowaychuk tj@tjholowaychuk.com + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/backend/vendor/github.com/apex/log/Makefile b/backend/vendor/github.com/apex/log/Makefile new file mode 100644 index 00000000..f948e88e --- /dev/null +++ b/backend/vendor/github.com/apex/log/Makefile @@ -0,0 +1,2 @@ + +include github.com/tj/make/golang diff --git a/backend/vendor/github.com/apex/log/Readme.md b/backend/vendor/github.com/apex/log/Readme.md new file mode 100644 index 00000000..4a48b671 --- /dev/null +++ b/backend/vendor/github.com/apex/log/Readme.md @@ -0,0 +1,29 @@ + +![Structured logging for golang](assets/title.png) + +Package log implements a simple structured logging API inspired by Logrus, designed with centralization in mind. Read more on [Medium](https://medium.com/@tjholowaychuk/apex-log-e8d9627f4a9a#.rav8yhkud). + +## Handlers + +- __cli__ – human-friendly CLI output +- __discard__ – discards all logs +- __es__ – Elasticsearch handler +- __graylog__ – Graylog handler +- __json__ – JSON output handler +- __kinesis__ – AWS Kinesis handler +- __level__ – level filter handler +- __logfmt__ – logfmt plain-text formatter +- __memory__ – in-memory handler for tests +- __multi__ – fan-out to multiple handlers +- __papertrail__ – Papertrail handler +- __text__ – human-friendly colored output +- __delta__ – outputs the delta between log calls and spinner + +--- + +[![Build Status](https://semaphoreci.com/api/v1/projects/d8a8b1c0-45b0-4b89-b066-99d788d0b94c/642077/badge.svg)](https://semaphoreci.com/tj/log) +[![GoDoc](https://godoc.org/github.com/apex/log?status.svg)](https://godoc.org/github.com/apex/log) +![](https://img.shields.io/badge/license-MIT-blue.svg) +![](https://img.shields.io/badge/status-stable-green.svg) + + diff --git a/backend/vendor/github.com/apex/log/default.go b/backend/vendor/github.com/apex/log/default.go new file mode 100644 index 00000000..22134862 --- /dev/null +++ b/backend/vendor/github.com/apex/log/default.go @@ -0,0 +1,45 @@ +package log + +import ( + "bytes" + "fmt" + "log" + "sort" +) + +// field used for sorting. +type field struct { + Name string + Value interface{} +} + +// by sorts fields by name. +type byName []field + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } + +// handleStdLog outpouts to the stlib log. +func handleStdLog(e *Entry) error { + level := levelNames[e.Level] + + var fields []field + + for k, v := range e.Fields { + fields = append(fields, field{k, v}) + } + + sort.Sort(byName(fields)) + + var b bytes.Buffer + fmt.Fprintf(&b, "%5s %-25s", level, e.Message) + + for _, f := range fields { + fmt.Fprintf(&b, " %s=%v", f.Name, f.Value) + } + + log.Println(b.String()) + + return nil +} diff --git a/backend/vendor/github.com/apex/log/doc.go b/backend/vendor/github.com/apex/log/doc.go new file mode 100644 index 00000000..0331e8e1 --- /dev/null +++ b/backend/vendor/github.com/apex/log/doc.go @@ -0,0 +1,10 @@ +/* +Package log implements a simple structured logging API designed with few assumptions. Designed for +centralized logging solutions such as Kinesis which require encoding and decoding before fanning-out +to handlers. + +You may use this package with inline handlers, much like Logrus, however a centralized solution +is recommended so that apps do not need to be re-deployed to add or remove logging service +providers. +*/ +package log diff --git a/backend/vendor/github.com/apex/log/entry.go b/backend/vendor/github.com/apex/log/entry.go new file mode 100644 index 00000000..9f0a5e1f --- /dev/null +++ b/backend/vendor/github.com/apex/log/entry.go @@ -0,0 +1,172 @@ +package log + +import ( + "fmt" + "os" + "strings" + "time" +) + +// assert interface compliance. +var _ Interface = (*Entry)(nil) + +// Now returns the current time. +var Now = time.Now + +// Entry represents a single log entry. +type Entry struct { + Logger *Logger `json:"-"` + Fields Fields `json:"fields"` + Level Level `json:"level"` + Timestamp time.Time `json:"timestamp"` + Message string `json:"message"` + start time.Time + fields []Fields +} + +// NewEntry returns a new entry for `log`. +func NewEntry(log *Logger) *Entry { + return &Entry{ + Logger: log, + } +} + +// WithFields returns a new entry with `fields` set. +func (e *Entry) WithFields(fields Fielder) *Entry { + f := []Fields{} + f = append(f, e.fields...) + f = append(f, fields.Fields()) + return &Entry{ + Logger: e.Logger, + fields: f, + } +} + +// WithField returns a new entry with the `key` and `value` set. +func (e *Entry) WithField(key string, value interface{}) *Entry { + return e.WithFields(Fields{key: value}) +} + +// WithError returns a new entry with the "error" set to `err`. +// +// The given error may implement .Fielder, if it does the method +// will add all its `.Fields()` into the returned entry. +func (e *Entry) WithError(err error) *Entry { + ctx := e.WithField("error", err.Error()) + + if s, ok := err.(stackTracer); ok { + frame := s.StackTrace()[0] + + name := fmt.Sprintf("%n", frame) + file := fmt.Sprintf("%+s", frame) + line := fmt.Sprintf("%d", frame) + + parts := strings.Split(file, "\n\t") + if len(parts) > 1 { + file = parts[1] + } + + ctx = ctx.WithField("source", fmt.Sprintf("%s: %s:%s", name, file, line)) + } + + if f, ok := err.(Fielder); ok { + ctx = ctx.WithFields(f.Fields()) + } + + return ctx +} + +// Debug level message. +func (e *Entry) Debug(msg string) { + e.Logger.log(DebugLevel, e, msg) +} + +// Info level message. +func (e *Entry) Info(msg string) { + e.Logger.log(InfoLevel, e, msg) +} + +// Warn level message. +func (e *Entry) Warn(msg string) { + e.Logger.log(WarnLevel, e, msg) +} + +// Error level message. +func (e *Entry) Error(msg string) { + e.Logger.log(ErrorLevel, e, msg) +} + +// Fatal level message, followed by an exit. +func (e *Entry) Fatal(msg string) { + e.Logger.log(FatalLevel, e, msg) + os.Exit(1) +} + +// Debugf level formatted message. +func (e *Entry) Debugf(msg string, v ...interface{}) { + e.Debug(fmt.Sprintf(msg, v...)) +} + +// Infof level formatted message. +func (e *Entry) Infof(msg string, v ...interface{}) { + e.Info(fmt.Sprintf(msg, v...)) +} + +// Warnf level formatted message. +func (e *Entry) Warnf(msg string, v ...interface{}) { + e.Warn(fmt.Sprintf(msg, v...)) +} + +// Errorf level formatted message. +func (e *Entry) Errorf(msg string, v ...interface{}) { + e.Error(fmt.Sprintf(msg, v...)) +} + +// Fatalf level formatted message, followed by an exit. +func (e *Entry) Fatalf(msg string, v ...interface{}) { + e.Fatal(fmt.Sprintf(msg, v...)) +} + +// Trace returns a new entry with a Stop method to fire off +// a corresponding completion log, useful with defer. +func (e *Entry) Trace(msg string) *Entry { + e.Info(msg) + v := e.WithFields(e.Fields) + v.Message = msg + v.start = time.Now() + return v +} + +// Stop should be used with Trace, to fire off the completion message. When +// an `err` is passed the "error" field is set, and the log level is error. +func (e *Entry) Stop(err *error) { + if err == nil || *err == nil { + e.WithField("duration", time.Since(e.start)).Info(e.Message) + } else { + e.WithField("duration", time.Since(e.start)).WithError(*err).Error(e.Message) + } +} + +// mergedFields returns the fields list collapsed into a single map. +func (e *Entry) mergedFields() Fields { + f := Fields{} + + for _, fields := range e.fields { + for k, v := range fields { + f[k] = v + } + } + + return f +} + +// finalize returns a copy of the Entry with Fields merged. +func (e *Entry) finalize(level Level, msg string) *Entry { + return &Entry{ + Logger: e.Logger, + Fields: e.mergedFields(), + Level: level, + Message: msg, + Timestamp: Now(), + } +} diff --git a/backend/vendor/github.com/apex/log/go.mod b/backend/vendor/github.com/apex/log/go.mod new file mode 100644 index 00000000..efc674b8 --- /dev/null +++ b/backend/vendor/github.com/apex/log/go.mod @@ -0,0 +1,25 @@ +module github.com/apex/log + +go 1.12 + +require ( + github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a + github.com/aphistic/sweet v0.2.0 // indirect + github.com/aws/aws-sdk-go v1.20.6 + github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 + github.com/fatih/color v1.7.0 + github.com/go-logfmt/logfmt v0.4.0 + github.com/google/uuid v1.1.1 // indirect + github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 // indirect + github.com/mattn/go-colorable v0.1.2 + github.com/pkg/errors v0.8.1 + github.com/rogpeppe/fastuuid v1.1.0 + github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 // indirect + github.com/smartystreets/gunit v1.0.0 // indirect + github.com/stretchr/testify v1.3.0 + github.com/tj/assert v0.0.0-20171129193455-018094318fb0 + github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2 + github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b + github.com/tj/go-spin v1.1.0 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 // indirect +) diff --git a/backend/vendor/github.com/apex/log/go.sum b/backend/vendor/github.com/apex/log/go.sum new file mode 100644 index 00000000..972a66db --- /dev/null +++ b/backend/vendor/github.com/apex/log/go.sum @@ -0,0 +1,90 @@ +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a h1:2KLQMJ8msqoPHIPDufkxVcoTtcmE5+1sL9950m4R9Pk= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0 h1:I4z+fAUqvKfvZV/CHi5dV0QuwbmIvYYFDjG0Ss5QpAs= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/aws/aws-sdk-go v1.20.6 h1:kmy4Gvdlyez1fV4kw5RYxZzWKVyuHZHgPWeU/YvRsV4= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/fastuuid v1.1.0 h1:INyGLmTCMGFr6OVIb977ghJvABML2CMVjPoRfNDdYDo= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 h1:hp2CYQUINdZMHdvTdXtPOY2ainKl4IoMcpAXEf2xj3Q= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/gunit v1.0.0 h1:RyPDUFcJbvtXlhJPk7v+wnxZRY2EUokhEYl2EJOPToI= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2 h1:eGaGNxrtoZf/mBURsnNQKDR7u50Klgcf2eFDQEnc8Bc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b h1:m74UWYy+HBs+jMFR9mdZU6shPewugMyH5+GV6LNgW8w= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/backend/vendor/github.com/apex/log/interface.go b/backend/vendor/github.com/apex/log/interface.go new file mode 100644 index 00000000..c92ebea7 --- /dev/null +++ b/backend/vendor/github.com/apex/log/interface.go @@ -0,0 +1,19 @@ +package log + +// Interface represents the API of both Logger and Entry. +type Interface interface { + WithFields(fields Fielder) *Entry + WithField(key string, value interface{}) *Entry + WithError(err error) *Entry + Debug(msg string) + Info(msg string) + Warn(msg string) + Error(msg string) + Fatal(msg string) + Debugf(msg string, v ...interface{}) + Infof(msg string, v ...interface{}) + Warnf(msg string, v ...interface{}) + Errorf(msg string, v ...interface{}) + Fatalf(msg string, v ...interface{}) + Trace(msg string) *Entry +} diff --git a/backend/vendor/github.com/apex/log/levels.go b/backend/vendor/github.com/apex/log/levels.go new file mode 100644 index 00000000..7d43a436 --- /dev/null +++ b/backend/vendor/github.com/apex/log/levels.go @@ -0,0 +1,81 @@ +package log + +import ( + "bytes" + "errors" + "strings" +) + +// ErrInvalidLevel is returned if the severity level is invalid. +var ErrInvalidLevel = errors.New("invalid level") + +// Level of severity. +type Level int + +// Log levels. +const ( + InvalidLevel Level = iota - 1 + DebugLevel + InfoLevel + WarnLevel + ErrorLevel + FatalLevel +) + +var levelNames = [...]string{ + DebugLevel: "debug", + InfoLevel: "info", + WarnLevel: "warn", + ErrorLevel: "error", + FatalLevel: "fatal", +} + +var levelStrings = map[string]Level{ + "debug": DebugLevel, + "info": InfoLevel, + "warn": WarnLevel, + "warning": WarnLevel, + "error": ErrorLevel, + "fatal": FatalLevel, +} + +// String implementation. +func (l Level) String() string { + return levelNames[l] +} + +// MarshalJSON implementation. +func (l Level) MarshalJSON() ([]byte, error) { + return []byte(`"` + l.String() + `"`), nil +} + +// UnmarshalJSON implementation. +func (l *Level) UnmarshalJSON(b []byte) error { + v, err := ParseLevel(string(bytes.Trim(b, `"`))) + if err != nil { + return err + } + + *l = v + return nil +} + +// ParseLevel parses level string. +func ParseLevel(s string) (Level, error) { + l, ok := levelStrings[strings.ToLower(s)] + if !ok { + return InvalidLevel, ErrInvalidLevel + } + + return l, nil +} + +// MustParseLevel parses level string or panics. +func MustParseLevel(s string) Level { + l, err := ParseLevel(s) + if err != nil { + panic("invalid log level") + } + + return l +} diff --git a/backend/vendor/github.com/apex/log/logger.go b/backend/vendor/github.com/apex/log/logger.go new file mode 100644 index 00000000..1755747c --- /dev/null +++ b/backend/vendor/github.com/apex/log/logger.go @@ -0,0 +1,149 @@ +package log + +import ( + stdlog "log" + "sort" +) + +// assert interface compliance. +var _ Interface = (*Logger)(nil) + +// Fielder is an interface for providing fields to custom types. +type Fielder interface { + Fields() Fields +} + +// Fields represents a map of entry level data used for structured logging. +type Fields map[string]interface{} + +// Fields implements Fielder. +func (f Fields) Fields() Fields { + return f +} + +// Get field value by name. +func (f Fields) Get(name string) interface{} { + return f[name] +} + +// Names returns field names sorted. +func (f Fields) Names() (v []string) { + for k := range f { + v = append(v, k) + } + + sort.Strings(v) + return +} + +// The HandlerFunc type is an adapter to allow the use of ordinary functions as +// log handlers. If f is a function with the appropriate signature, +// HandlerFunc(f) is a Handler object that calls f. +type HandlerFunc func(*Entry) error + +// HandleLog calls f(e). +func (f HandlerFunc) HandleLog(e *Entry) error { + return f(e) +} + +// Handler is used to handle log events, outputting them to +// stdio or sending them to remote services. See the "handlers" +// directory for implementations. +// +// It is left up to Handlers to implement thread-safety. +type Handler interface { + HandleLog(*Entry) error +} + +// Logger represents a logger with configurable Level and Handler. +type Logger struct { + Handler Handler + Level Level +} + +// WithFields returns a new entry with `fields` set. +func (l *Logger) WithFields(fields Fielder) *Entry { + return NewEntry(l).WithFields(fields.Fields()) +} + +// WithField returns a new entry with the `key` and `value` set. +// +// Note that the `key` should not have spaces in it - use camel +// case or underscores +func (l *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(l).WithField(key, value) +} + +// WithError returns a new entry with the "error" set to `err`. +func (l *Logger) WithError(err error) *Entry { + return NewEntry(l).WithError(err) +} + +// Debug level message. +func (l *Logger) Debug(msg string) { + NewEntry(l).Debug(msg) +} + +// Info level message. +func (l *Logger) Info(msg string) { + NewEntry(l).Info(msg) +} + +// Warn level message. +func (l *Logger) Warn(msg string) { + NewEntry(l).Warn(msg) +} + +// Error level message. +func (l *Logger) Error(msg string) { + NewEntry(l).Error(msg) +} + +// Fatal level message, followed by an exit. +func (l *Logger) Fatal(msg string) { + NewEntry(l).Fatal(msg) +} + +// Debugf level formatted message. +func (l *Logger) Debugf(msg string, v ...interface{}) { + NewEntry(l).Debugf(msg, v...) +} + +// Infof level formatted message. +func (l *Logger) Infof(msg string, v ...interface{}) { + NewEntry(l).Infof(msg, v...) +} + +// Warnf level formatted message. +func (l *Logger) Warnf(msg string, v ...interface{}) { + NewEntry(l).Warnf(msg, v...) +} + +// Errorf level formatted message. +func (l *Logger) Errorf(msg string, v ...interface{}) { + NewEntry(l).Errorf(msg, v...) +} + +// Fatalf level formatted message, followed by an exit. +func (l *Logger) Fatalf(msg string, v ...interface{}) { + NewEntry(l).Fatalf(msg, v...) +} + +// Trace returns a new entry with a Stop method to fire off +// a corresponding completion log, useful with defer. +func (l *Logger) Trace(msg string) *Entry { + return NewEntry(l).Trace(msg) +} + +// log the message, invoking the handler. We clone the entry here +// to bypass the overhead in Entry methods when the level is not +// met. +func (l *Logger) log(level Level, e *Entry, msg string) { + if level < l.Level { + return + } + + if err := l.Handler.HandleLog(e.finalize(level, msg)); err != nil { + stdlog.Printf("error logging: %s", err) + } +} diff --git a/backend/vendor/github.com/apex/log/pkg.go b/backend/vendor/github.com/apex/log/pkg.go new file mode 100644 index 00000000..9bf51dc8 --- /dev/null +++ b/backend/vendor/github.com/apex/log/pkg.go @@ -0,0 +1,100 @@ +package log + +// singletons ftw? +var Log Interface = &Logger{ + Handler: HandlerFunc(handleStdLog), + Level: InfoLevel, +} + +// SetHandler sets the handler. This is not thread-safe. +// The default handler outputs to the stdlib log. +func SetHandler(h Handler) { + if logger, ok := Log.(*Logger); ok { + logger.Handler = h + } +} + +// SetLevel sets the log level. This is not thread-safe. +func SetLevel(l Level) { + if logger, ok := Log.(*Logger); ok { + logger.Level = l + } +} + +// SetLevelFromString sets the log level from a string, panicing when invalid. This is not thread-safe. +func SetLevelFromString(s string) { + if logger, ok := Log.(*Logger); ok { + logger.Level = MustParseLevel(s) + } +} + +// WithFields returns a new entry with `fields` set. +func WithFields(fields Fielder) *Entry { + return Log.WithFields(fields) +} + +// WithField returns a new entry with the `key` and `value` set. +func WithField(key string, value interface{}) *Entry { + return Log.WithField(key, value) +} + +// WithError returns a new entry with the "error" set to `err`. +func WithError(err error) *Entry { + return Log.WithError(err) +} + +// Debug level message. +func Debug(msg string) { + Log.Debug(msg) +} + +// Info level message. +func Info(msg string) { + Log.Info(msg) +} + +// Warn level message. +func Warn(msg string) { + Log.Warn(msg) +} + +// Error level message. +func Error(msg string) { + Log.Error(msg) +} + +// Fatal level message, followed by an exit. +func Fatal(msg string) { + Log.Fatal(msg) +} + +// Debugf level formatted message. +func Debugf(msg string, v ...interface{}) { + Log.Debugf(msg, v...) +} + +// Infof level formatted message. +func Infof(msg string, v ...interface{}) { + Log.Infof(msg, v...) +} + +// Warnf level formatted message. +func Warnf(msg string, v ...interface{}) { + Log.Warnf(msg, v...) +} + +// Errorf level formatted message. +func Errorf(msg string, v ...interface{}) { + Log.Errorf(msg, v...) +} + +// Fatalf level formatted message, followed by an exit. +func Fatalf(msg string, v ...interface{}) { + Log.Fatalf(msg, v...) +} + +// Trace returns a new entry with a Stop method to fire off +// a corresponding completion log, useful with defer. +func Trace(msg string) *Entry { + return Log.Trace(msg) +} diff --git a/backend/vendor/github.com/apex/log/stack.go b/backend/vendor/github.com/apex/log/stack.go new file mode 100644 index 00000000..57efe326 --- /dev/null +++ b/backend/vendor/github.com/apex/log/stack.go @@ -0,0 +1,8 @@ +package log + +import "github.com/pkg/errors" + +// stackTracer interface. +type stackTracer interface { + StackTrace() errors.StackTrace +} diff --git a/backend/vendor/github.com/fsnotify/fsnotify/.editorconfig b/backend/vendor/github.com/fsnotify/fsnotify/.editorconfig new file mode 100644 index 00000000..ba49e3c2 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/.editorconfig @@ -0,0 +1,5 @@ +root = true + +[*] +indent_style = tab +indent_size = 4 diff --git a/backend/vendor/github.com/fsnotify/fsnotify/.gitignore b/backend/vendor/github.com/fsnotify/fsnotify/.gitignore new file mode 100644 index 00000000..4cd0cbaf --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -0,0 +1,6 @@ +# Setup a Global .gitignore for OS and editor generated files: +# https://help.github.com/articles/ignoring-files +# git config --global core.excludesfile ~/.gitignore_global + +.vagrant +*.sublime-project diff --git a/backend/vendor/github.com/fsnotify/fsnotify/.travis.yml b/backend/vendor/github.com/fsnotify/fsnotify/.travis.yml new file mode 100644 index 00000000..981d1bb8 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/.travis.yml @@ -0,0 +1,30 @@ +sudo: false +language: go + +go: + - 1.8.x + - 1.9.x + - tip + +matrix: + allow_failures: + - go: tip + fast_finish: true + +before_script: + - go get -u github.com/golang/lint/golint + +script: + - go test -v --race ./... + +after_script: + - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" + - test -z "$(golint ./... | tee /dev/stderr)" + - go vet ./... + +os: + - linux + - osx + +notifications: + email: false diff --git a/backend/vendor/github.com/fsnotify/fsnotify/AUTHORS b/backend/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 00000000..5ab5d41c --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/backend/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/backend/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md new file mode 100644 index 00000000..be4d7ea2 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -0,0 +1,317 @@ +# Changelog + +## v1.4.7 / 2018-01-09 + +* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) +* Tests: Fix missing verb on format string (thanks @rchiossi) +* Linux: Fix deadlock in Remove (thanks @aarondl) +* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) +* Docs: Moved FAQ into the README (thanks @vahe) +* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) +* Docs: replace references to OS X with macOS + +## v1.4.2 / 2016-10-10 + +* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) + +## v1.4.1 / 2016-10-04 + +* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) + +## v1.4.0 / 2016-10-01 + +* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) + +## v1.3.1 / 2016-06-28 + +* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) + +## v1.3.0 / 2016-04-19 + +* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) + +## v1.2.10 / 2016-03-02 + +* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) + +## v1.2.9 / 2016-01-13 + +kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) + +## v1.2.8 / 2015-12-17 + +* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) +* inotify: fix race in test +* enable race detection for continuous integration (Linux, Mac, Windows) + +## v1.2.5 / 2015-10-17 + +* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) +* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) +* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) +* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) + +## v1.2.1 / 2015-10-14 + +* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) + +## v1.2.0 / 2015-02-08 + +* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) +* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) +* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) + +## v1.1.1 / 2015-02-05 + +* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) + +## v1.1.0 / 2014-12-12 + +* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) + * add low-level functions + * only need to store flags on directories + * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) + * done can be an unbuffered channel + * remove calls to os.NewSyscallError +* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) +* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v1.0.4 / 2014-09-07 + +* kqueue: add dragonfly to the build tags. +* Rename source code files, rearrange code so exported APIs are at the top. +* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) + +## v1.0.3 / 2014-08-19 + +* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) + +## v1.0.2 / 2014-08-17 + +* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) +* [Fix] Make ./path and path equivalent. (thanks @zhsso) + +## v1.0.0 / 2014-08-15 + +* [API] Remove AddWatch on Windows, use Add. +* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) +* Minor updates based on feedback from golint. + +## dev / 2014-07-09 + +* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). +* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) + +## dev / 2014-07-04 + +* kqueue: fix incorrect mutex used in Close() +* Update example to demonstrate usage of Op. + +## dev / 2014-06-28 + +* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) +* Fix for String() method on Event (thanks Alex Brainman) +* Don't build on Plan 9 or Solaris (thanks @4ad) + +## dev / 2014-06-21 + +* Events channel of type Event rather than *Event. +* [internal] use syscall constants directly for inotify and kqueue. +* [internal] kqueue: rename events to kevents and fileEvent to event. + +## dev / 2014-06-19 + +* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). +* [internal] remove cookie from Event struct (unused). +* [internal] Event struct has the same definition across every OS. +* [internal] remove internal watch and removeWatch methods. + +## dev / 2014-06-12 + +* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). +* [API] Pluralized channel names: Events and Errors. +* [API] Renamed FileEvent struct to Event. +* [API] Op constants replace methods like IsCreate(). + +## dev / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## dev / 2014-05-23 + +* [API] Remove current implementation of WatchFlags. + * current implementation doesn't take advantage of OS for efficiency + * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes + * no tests for the current implementation + * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) + +## v0.9.3 / 2014-12-31 + +* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) + +## v0.9.2 / 2014-08-17 + +* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) + +## v0.9.1 / 2014-06-12 + +* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) + +## v0.9.0 / 2014-01-17 + +* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) +* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) +* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. + +## v0.8.12 / 2013-11-13 + +* [API] Remove FD_SET and friends from Linux adapter + +## v0.8.11 / 2013-11-02 + +* [Doc] Add Changelog [#72][] (thanks @nathany) +* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) + +## v0.8.10 / 2013-10-19 + +* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) +* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) +* [Doc] specify OS-specific limits in README (thanks @debrando) + +## v0.8.9 / 2013-09-08 + +* [Doc] Contributing (thanks @nathany) +* [Doc] update package path in example code [#63][] (thanks @paulhammond) +* [Doc] GoCI badge in README (Linux only) [#60][] +* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) + +## v0.8.8 / 2013-06-17 + +* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) + +## v0.8.7 / 2013-06-03 + +* [API] Make syscall flags internal +* [Fix] inotify: ignore event changes +* [Fix] race in symlink test [#45][] (reported by @srid) +* [Fix] tests on Windows +* lower case error messages + +## v0.8.6 / 2013-05-23 + +* kqueue: Use EVT_ONLY flag on Darwin +* [Doc] Update README with full example + +## v0.8.5 / 2013-05-09 + +* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) + +## v0.8.4 / 2013-04-07 + +* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) + +## v0.8.3 / 2013-03-13 + +* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) +* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) + +## v0.8.2 / 2013-02-07 + +* [Doc] add Authors +* [Fix] fix data races for map access [#29][] (thanks @fsouza) + +## v0.8.1 / 2013-01-09 + +* [Fix] Windows path separators +* [Doc] BSD License + +## v0.8.0 / 2012-11-09 + +* kqueue: directory watching improvements (thanks @vmirage) +* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) +* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) + +## v0.7.4 / 2012-10-09 + +* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) +* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) +* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) +* [Fix] kqueue: modify after recreation of file + +## v0.7.3 / 2012-09-27 + +* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) +* [Fix] kqueue: no longer get duplicate CREATE events + +## v0.7.2 / 2012-09-01 + +* kqueue: events for created directories + +## v0.7.1 / 2012-07-14 + +* [Fix] for renaming files + +## v0.7.0 / 2012-07-02 + +* [Feature] FSNotify flags +* [Fix] inotify: Added file name back to event path + +## v0.6.0 / 2012-06-06 + +* kqueue: watch files after directory created (thanks @tmc) + +## v0.5.1 / 2012-05-22 + +* [Fix] inotify: remove all watches before Close() + +## v0.5.0 / 2012-05-03 + +* [API] kqueue: return errors during watch instead of sending over channel +* kqueue: match symlink behavior on Linux +* inotify: add `DELETE_SELF` (requested by @taralx) +* [Fix] kqueue: handle EINTR (reported by @robfig) +* [Doc] Godoc example [#1][] (thanks @davecheney) + +## v0.4.0 / 2012-03-30 + +* Go 1 released: build with go tool +* [Feature] Windows support using winfsnotify +* Windows does not have attribute change notifications +* Roll attribute notifications into IsModify + +## v0.3.0 / 2012-02-19 + +* kqueue: add files when watch directory + +## v0.2.0 / 2011-12-30 + +* update to latest Go weekly code + +## v0.1.0 / 2011-10-19 + +* kqueue: add watch on file creation to match inotify +* kqueue: create file event +* inotify: ignore `IN_IGNORED` events +* event String() +* linux: common FileEvent functions +* initial commit + +[#79]: https://github.com/howeyc/fsnotify/pull/79 +[#77]: https://github.com/howeyc/fsnotify/pull/77 +[#72]: https://github.com/howeyc/fsnotify/issues/72 +[#71]: https://github.com/howeyc/fsnotify/issues/71 +[#70]: https://github.com/howeyc/fsnotify/issues/70 +[#63]: https://github.com/howeyc/fsnotify/issues/63 +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#60]: https://github.com/howeyc/fsnotify/issues/60 +[#59]: https://github.com/howeyc/fsnotify/issues/59 +[#49]: https://github.com/howeyc/fsnotify/issues/49 +[#45]: https://github.com/howeyc/fsnotify/issues/45 +[#40]: https://github.com/howeyc/fsnotify/issues/40 +[#36]: https://github.com/howeyc/fsnotify/issues/36 +[#33]: https://github.com/howeyc/fsnotify/issues/33 +[#29]: https://github.com/howeyc/fsnotify/issues/29 +[#25]: https://github.com/howeyc/fsnotify/issues/25 +[#24]: https://github.com/howeyc/fsnotify/issues/24 +[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/backend/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/backend/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md new file mode 100644 index 00000000..828a60b2 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing + +## Issues + +* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). +* Please indicate the platform you are using fsnotify on. +* A code example to reproduce the problem is appreciated. + +## Pull Requests + +### Contributor License Agreement + +fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). + +Please indicate that you have signed the CLA in your pull request. + +### How fsnotify is Developed + +* Development is done on feature branches. +* Tests are run on BSD, Linux, macOS and Windows. +* Pull requests are reviewed and [applied to master][am] using [hub][]. + * Maintainers may modify or squash commits rather than asking contributors to. +* To issue a new release, the maintainers will: + * Update the CHANGELOG + * Tag a version, which will become available through gopkg.in. + +### How to Fork + +For smooth sailing, always use the original import path. Installing with `go get` makes this easy. + +1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Ensure everything works and the tests pass (see below) +4. Commit your changes (`git commit -am 'Add some feature'`) + +Contribute upstream: + +1. Fork fsnotify on GitHub +2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) +3. Push to the branch (`git push fork my-new-feature`) +4. Create a new Pull Request on GitHub + +This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). + +### Testing + +fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. + +Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. + +To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. + +* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) +* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. +* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) +* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. +* When you're done, you will want to halt or destroy the Vagrant boxes. + +Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. + +Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). + +### Maintainers + +Help maintaining fsnotify is welcome. To be a maintainer: + +* Submit a pull request and sign the CLA as above. +* You must be able to run the test suite on Mac, Windows, Linux and BSD. + +To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. + +All code changes should be internal pull requests. + +Releases are tagged using [Semantic Versioning](http://semver.org/). + +[hub]: https://github.com/github/hub +[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/backend/vendor/github.com/fsnotify/fsnotify/LICENSE b/backend/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 00000000..f21e5408 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/vendor/github.com/fsnotify/fsnotify/README.md b/backend/vendor/github.com/fsnotify/fsnotify/README.md new file mode 100644 index 00000000..39932074 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/README.md @@ -0,0 +1,79 @@ +# File system notifications for Go + +[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) + +fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: + +```console +go get -u golang.org/x/sys/... +``` + +Cross platform: Windows, Linux, BSD and macOS. + +|Adapter |OS |Status | +|----------|----------|----------| +|inotify |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|kqueue |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)| +|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)| +|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)| +|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)| +|fanotify |Linux 2.6.37+ | | +|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)| +|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)| + +\* Android and iOS are untested. + +Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. + +## API stability + +fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). + +All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. + +Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. + +## Contributing + +Please refer to [CONTRIBUTING][] before opening an issue or pull request. + +## Example + +See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). + +## FAQ + +**When a file is moved to another directory is it still being watched?** + +No (it shouldn't be, unless you are watching where it was moved to). + +**When I watch a directory, are all subdirectories watched as well?** + +No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). + +**Do I have to watch the Error and Event channels in a separate goroutine?** + +As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) + +**Why am I receiving multiple events for the same file on OS X?** + +Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). + +**How many files can be watched at once?** + +There are OS-specific limits as to how many watches can be created: +* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. +* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. + +[#62]: https://github.com/howeyc/fsnotify/issues/62 +[#18]: https://github.com/fsnotify/fsnotify/issues/18 +[#11]: https://github.com/fsnotify/fsnotify/issues/11 +[#7]: https://github.com/howeyc/fsnotify/issues/7 + +[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md + +## Related Projects + +* [notify](https://github.com/rjeczalik/notify) +* [fsevents](https://github.com/fsnotify/fsevents) + diff --git a/backend/vendor/github.com/fsnotify/fsnotify/fen.go b/backend/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 00000000..ced39cb8 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/backend/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/backend/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 00000000..190bf0de --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,66 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/backend/vendor/github.com/fsnotify/fsnotify/inotify.go b/backend/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 00000000..d9fd1b88 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/backend/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/backend/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 00000000..cc7db4b2 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(0) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/backend/vendor/github.com/fsnotify/fsnotify/kqueue.go b/backend/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 00000000..86e76a3d --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/backend/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/backend/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 00000000..7d8de145 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/backend/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/backend/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 00000000..9139e171 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY diff --git a/backend/vendor/github.com/fsnotify/fsnotify/windows.go b/backend/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 00000000..09436f31 --- /dev/null +++ b/backend/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/backend/vendor/github.com/gin-contrib/sse/.travis.yml b/backend/vendor/github.com/gin-contrib/sse/.travis.yml new file mode 100644 index 00000000..d0e8fcf9 --- /dev/null +++ b/backend/vendor/github.com/gin-contrib/sse/.travis.yml @@ -0,0 +1,26 @@ +language: go +sudo: false +go: + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master + +git: + depth: 10 + +matrix: + fast_finish: true + include: + - go: 1.11.x + env: GO111MODULE=on + - go: 1.12.x + env: GO111MODULE=on + +script: + - go test -v -covermode=count -coverprofile=coverage.out + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/backend/vendor/github.com/gin-contrib/sse/LICENSE b/backend/vendor/github.com/gin-contrib/sse/LICENSE new file mode 100644 index 00000000..1ff7f370 --- /dev/null +++ b/backend/vendor/github.com/gin-contrib/sse/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Manuel Martínez-Almeida + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/backend/vendor/github.com/gin-contrib/sse/README.md b/backend/vendor/github.com/gin-contrib/sse/README.md new file mode 100644 index 00000000..c9c49cf9 --- /dev/null +++ b/backend/vendor/github.com/gin-contrib/sse/README.md @@ -0,0 +1,58 @@ +# Server-Sent Events + +[![GoDoc](https://godoc.org/github.com/gin-contrib/sse?status.svg)](https://godoc.org/github.com/gin-contrib/sse) +[![Build Status](https://travis-ci.org/gin-contrib/sse.svg)](https://travis-ci.org/gin-contrib/sse) +[![codecov](https://codecov.io/gh/gin-contrib/sse/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-contrib/sse) +[![Go Report Card](https://goreportcard.com/badge/github.com/gin-contrib/sse)](https://goreportcard.com/report/github.com/gin-contrib/sse) + +Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. The Server-Sent Events EventSource API is [standardized as part of HTML5[1] by the W3C](http://www.w3.org/TR/2009/WD-eventsource-20091029/). + +- [Read this great SSE introduction by the HTML5Rocks guys](http://www.html5rocks.com/en/tutorials/eventsource/basics/) +- [Browser support](http://caniuse.com/#feat=eventsource) + +## Sample code + +```go +import "github.com/gin-contrib/sse" + +func httpHandler(w http.ResponseWriter, req *http.Request) { + // data can be a primitive like a string, an integer or a float + sse.Encode(w, sse.Event{ + Event: "message", + Data: "some data\nmore data", + }) + + // also a complex type, like a map, a struct or a slice + sse.Encode(w, sse.Event{ + Id: "124", + Event: "message", + Data: map[string]interface{}{ + "user": "manu", + "date": time.Now().Unix(), + "content": "hi!", + }, + }) +} +``` +``` +event: message +data: some data\\nmore data + +id: 124 +event: message +data: {"content":"hi!","date":1431540810,"user":"manu"} + +``` + +## Content-Type + +```go +fmt.Println(sse.ContentType) +``` +``` +text/event-stream +``` + +## Decoding support + +There is a client-side implementation of SSE coming soon. diff --git a/backend/vendor/github.com/gin-contrib/sse/sse-decoder.go b/backend/vendor/github.com/gin-contrib/sse/sse-decoder.go new file mode 100644 index 00000000..fd49b9c3 --- /dev/null +++ b/backend/vendor/github.com/gin-contrib/sse/sse-decoder.go @@ -0,0 +1,116 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package sse + +import ( + "bytes" + "io" + "io/ioutil" +) + +type decoder struct { + events []Event +} + +func Decode(r io.Reader) ([]Event, error) { + var dec decoder + return dec.decode(r) +} + +func (d *decoder) dispatchEvent(event Event, data string) { + dataLength := len(data) + if dataLength > 0 { + //If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer. + data = data[:dataLength-1] + dataLength-- + } + if dataLength == 0 && event.Event == "" { + return + } + if event.Event == "" { + event.Event = "message" + } + event.Data = data + d.events = append(d.events, event) +} + +func (d *decoder) decode(r io.Reader) ([]Event, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + var currentEvent Event + var dataBuffer *bytes.Buffer = new(bytes.Buffer) + // TODO (and unit tests) + // Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair, + // a single U+000A LINE FEED (LF) character, + // or a single U+000D CARRIAGE RETURN (CR) character. + lines := bytes.Split(buf, []byte{'\n'}) + for _, line := range lines { + if len(line) == 0 { + // If the line is empty (a blank line). Dispatch the event. + d.dispatchEvent(currentEvent, dataBuffer.String()) + + // reset current event and data buffer + currentEvent = Event{} + dataBuffer.Reset() + continue + } + if line[0] == byte(':') { + // If the line starts with a U+003A COLON character (:), ignore the line. + continue + } + + var field, value []byte + colonIndex := bytes.IndexRune(line, ':') + if colonIndex != -1 { + // If the line contains a U+003A COLON character character (:) + // Collect the characters on the line before the first U+003A COLON character (:), + // and let field be that string. + field = line[:colonIndex] + // Collect the characters on the line after the first U+003A COLON character (:), + // and let value be that string. + value = line[colonIndex+1:] + // If value starts with a single U+0020 SPACE character, remove it from value. + if len(value) > 0 && value[0] == ' ' { + value = value[1:] + } + } else { + // Otherwise, the string is not empty but does not contain a U+003A COLON character character (:) + // Use the whole line as the field name, and the empty string as the field value. + field = line + value = []byte{} + } + // The steps to process the field given a field name and a field value depend on the field name, + // as given in the following list. Field names must be compared literally, + // with no case folding performed. + switch string(field) { + case "event": + // Set the event name buffer to field value. + currentEvent.Event = string(value) + case "id": + // Set the event stream's last event ID to the field value. + currentEvent.Id = string(value) + case "retry": + // If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9), + // then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer. + // Otherwise, ignore the field. + currentEvent.Id = string(value) + case "data": + // Append the field value to the data buffer, + dataBuffer.Write(value) + // then append a single U+000A LINE FEED (LF) character to the data buffer. + dataBuffer.WriteString("\n") + default: + //Otherwise. The field is ignored. + continue + } + } + // Once the end of the file is reached, the user agent must dispatch the event one final time. + d.dispatchEvent(currentEvent, dataBuffer.String()) + + return d.events, nil +} diff --git a/backend/vendor/github.com/gin-contrib/sse/sse-encoder.go b/backend/vendor/github.com/gin-contrib/sse/sse-encoder.go new file mode 100644 index 00000000..f9c80875 --- /dev/null +++ b/backend/vendor/github.com/gin-contrib/sse/sse-encoder.go @@ -0,0 +1,110 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package sse + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + "strings" +) + +// Server-Sent Events +// W3C Working Draft 29 October 2009 +// http://www.w3.org/TR/2009/WD-eventsource-20091029/ + +const ContentType = "text/event-stream" + +var contentType = []string{ContentType} +var noCache = []string{"no-cache"} + +var fieldReplacer = strings.NewReplacer( + "\n", "\\n", + "\r", "\\r") + +var dataReplacer = strings.NewReplacer( + "\n", "\ndata:", + "\r", "\\r") + +type Event struct { + Event string + Id string + Retry uint + Data interface{} +} + +func Encode(writer io.Writer, event Event) error { + w := checkWriter(writer) + writeId(w, event.Id) + writeEvent(w, event.Event) + writeRetry(w, event.Retry) + return writeData(w, event.Data) +} + +func writeId(w stringWriter, id string) { + if len(id) > 0 { + w.WriteString("id:") + fieldReplacer.WriteString(w, id) + w.WriteString("\n") + } +} + +func writeEvent(w stringWriter, event string) { + if len(event) > 0 { + w.WriteString("event:") + fieldReplacer.WriteString(w, event) + w.WriteString("\n") + } +} + +func writeRetry(w stringWriter, retry uint) { + if retry > 0 { + w.WriteString("retry:") + w.WriteString(strconv.FormatUint(uint64(retry), 10)) + w.WriteString("\n") + } +} + +func writeData(w stringWriter, data interface{}) error { + w.WriteString("data:") + switch kindOfData(data) { + case reflect.Struct, reflect.Slice, reflect.Map: + err := json.NewEncoder(w).Encode(data) + if err != nil { + return err + } + w.WriteString("\n") + default: + dataReplacer.WriteString(w, fmt.Sprint(data)) + w.WriteString("\n\n") + } + return nil +} + +func (r Event) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + return Encode(w, r) +} + +func (r Event) WriteContentType(w http.ResponseWriter) { + header := w.Header() + header["Content-Type"] = contentType + + if _, exist := header["Cache-Control"]; !exist { + header["Cache-Control"] = noCache + } +} + +func kindOfData(data interface{}) reflect.Kind { + value := reflect.ValueOf(data) + valueType := value.Kind() + if valueType == reflect.Ptr { + valueType = value.Elem().Kind() + } + return valueType +} diff --git a/backend/vendor/github.com/gin-contrib/sse/writer.go b/backend/vendor/github.com/gin-contrib/sse/writer.go new file mode 100644 index 00000000..6f9806c5 --- /dev/null +++ b/backend/vendor/github.com/gin-contrib/sse/writer.go @@ -0,0 +1,24 @@ +package sse + +import "io" + +type stringWriter interface { + io.Writer + WriteString(string) (int, error) +} + +type stringWrapper struct { + io.Writer +} + +func (w stringWrapper) WriteString(str string) (int, error) { + return w.Writer.Write([]byte(str)) +} + +func checkWriter(writer io.Writer) stringWriter { + if w, ok := writer.(stringWriter); ok { + return w + } else { + return stringWrapper{writer} + } +} diff --git a/backend/vendor/github.com/gin-gonic/gin/.gitignore b/backend/vendor/github.com/gin-gonic/gin/.gitignore new file mode 100644 index 00000000..bdd50c95 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/.gitignore @@ -0,0 +1,7 @@ +vendor/* +!vendor/vendor.json +coverage.out +count.out +test +profile.out +tmp.out diff --git a/backend/vendor/github.com/gin-gonic/gin/.travis.yml b/backend/vendor/github.com/gin-gonic/gin/.travis.yml new file mode 100644 index 00000000..f6ec8a82 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/.travis.yml @@ -0,0 +1,44 @@ +language: go + +matrix: + fast_finish: true + include: + - go: 1.8.x + - go: 1.9.x + - go: 1.10.x + - go: 1.11.x + env: GO111MODULE=on + - go: 1.12.x + env: GO111MODULE=on + - go: master + env: GO111MODULE=on + +git: + depth: 10 + +before_install: + - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi + +install: + - if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make install; fi + - if [[ "${GO111MODULE}" = "on" ]]; then export PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"; fi + - if [[ "${GO111MODULE}" = "on" ]]; then make tools; fi + +go_import_path: github.com/gin-gonic/gin + +script: + - make vet + - make fmt-check + - make misspell-check + - make test + +after_success: + - bash <(curl -s https://codecov.io/bash) + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/7f95bf605c4d356372f4 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: false # default: false diff --git a/backend/vendor/github.com/gin-gonic/gin/AUTHORS.md b/backend/vendor/github.com/gin-gonic/gin/AUTHORS.md new file mode 100644 index 00000000..dda19bcf --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/AUTHORS.md @@ -0,0 +1,231 @@ +List of all the awesome people working to make Gin the best Web Framework in Go. + +## gin 1.x series authors + +**Gin Core Team:** Bo-Yi Wu (@appleboy), 田欧 (@thinkerou), Javier Provecho (@javierprovecho) + +## gin 0.x series authors + +**Maintainers:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho) + +People and companies, who have contributed, in alphabetical order. + +**@858806258 (杰哥)** +- Fix typo in example + + +**@achedeuzot (Klemen Sever)** +- Fix newline debug printing + + +**@adammck (Adam Mckaig)** +- Add MIT license + + +**@AlexanderChen1989 (Alexander)** +- Typos in README + + +**@alexanderdidenko (Aleksandr Didenko)** +- Add support multipart/form-data + + +**@alexandernyquist (Alexander Nyquist)** +- Using template.Must to fix multiple return issue +- ★ Added support for OPTIONS verb +- ★ Setting response headers before calling WriteHeader +- Improved documentation for model binding +- ★ Added Content.Redirect() +- ★ Added tons of Unit tests + + +**@austinheap (Austin Heap)** +- Added travis CI integration + + +**@andredublin (Andre Dublin)** +- Fix typo in comment + + +**@bredov (Ludwig Valda Vasquez)** +- Fix html templating in debug mode + + +**@bluele (Jun Kimura)** +- Fixes code examples in README + + +**@chad-russell** +- ★ Support for serializing gin.H into XML + + +**@dickeyxxx (Jeff Dickey)** +- Typos in README +- Add example about serving static files + + +**@donileo (Adonis)** +- Add NoMethod handler + + +**@dutchcoders (DutchCoders)** +- ★ Fix security bug that allows client to spoof ip +- Fix typo. r.HTMLTemplates -> SetHTMLTemplate + + +**@el3ctro- (Joshua Loper)** +- Fix typo in example + + +**@ethankan (Ethan Kan)** +- Unsigned integers in binding + + +**(Evgeny Persienko)** +- Validate sub structures + + +**@frankbille (Frank Bille)** +- Add support for HTTP Realm Auth + + +**@fmd (Fareed Dudhia)** +- Fix typo. SetHTTPTemplate -> SetHTMLTemplate + + +**@ironiridis (Christopher Harrington)** +- Remove old reference + + +**@jammie-stackhouse (Jamie Stackhouse)** +- Add more shortcuts for router methods + + +**@jasonrhansen** +- Fix spelling and grammar errors in documentation + + +**@JasonSoft (Jason Lee)** +- Fix typo in comment + + +**@joiggama (Ignacio Galindo)** +- Add utf-8 charset header on renders + + +**@julienschmidt (Julien Schmidt)** +- gofmt the code examples + + +**@kelcecil (Kel Cecil)** +- Fix readme typo + + +**@kyledinh (Kyle Dinh)** +- Adds RunTLS() + + +**@LinusU (Linus Unnebäck)** +- Small fixes in README + + +**@loongmxbt (Saint Asky)** +- Fix typo in example + + +**@lucas-clemente (Lucas Clemente)** +- ★ work around path.Join removing trailing slashes from routes + + +**@mattn (Yasuhiro Matsumoto)** +- Improve color logger + + +**@mdigger (Dmitry Sedykh)** +- Fixes Form binding when content-type is x-www-form-urlencoded +- No repeat call c.Writer.Status() in gin.Logger +- Fixes Content-Type for json render + + +**@mirzac (Mirza Ceric)** +- Fix debug printing + + +**@mopemope (Yutaka Matsubara)** +- ★ Adds Godep support (Dependencies Manager) +- Fix variadic parameter in the flexible render API +- Fix Corrupted plain render +- Add Pluggable View Renderer Example + + +**@msemenistyi (Mykyta Semenistyi)** +- update Readme.md. Add code to String method + + +**@msoedov (Sasha Myasoedov)** +- ★ Adds tons of unit tests. + + +**@ngerakines (Nick Gerakines)** +- ★ Improves API, c.GET() doesn't panic +- Adds MustGet() method + + +**@r8k (Rajiv Kilaparti)** +- Fix Port usage in README. + + +**@rayrod2030 (Ray Rodriguez)** +- Fix typo in example + + +**@rns** +- Fix typo in example + + +**@RobAWilkinson (Robert Wilkinson)** +- Add example of forms and params + + +**@rogierlommers (Rogier Lommers)** +- Add updated static serve example + + +**@se77en (Damon Zhao)** +- Improve color logging + + +**@silasb (Silas Baronda)** +- Fixing quotes in README + + +**@SkuliOskarsson (Skuli Oskarsson)** +- Fixes some texts in README II + + +**@slimmy (Jimmy Pettersson)** +- Added messages for required bindings + + +**@smira (Andrey Smirnov)** +- Add support for ignored/unexported fields in binding + + +**@superalsrk (SRK.Lyu)** +- Update httprouter godeps + + +**@tebeka (Miki Tebeka)** +- Use net/http constants instead of numeric values + + +**@techjanitor** +- Update context.go reserved IPs + + +**@yosssi (Keiji Yoshida)** +- Fix link in README + + +**@yuyabee** +- Fixed README diff --git a/backend/vendor/github.com/gin-gonic/gin/BENCHMARKS.md b/backend/vendor/github.com/gin-gonic/gin/BENCHMARKS.md new file mode 100644 index 00000000..9a7df86a --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/BENCHMARKS.md @@ -0,0 +1,604 @@ + +## Benchmark System + +**VM HOST:** DigitalOcean +**Machine:** 4 CPU, 8 GB RAM. Ubuntu 16.04.2 x64 +**Date:** July 19th, 2017 +**Go Version:** 1.8.3 linux/amd64 +**Source:** [Go HTTP Router Benchmark](https://github.com/julienschmidt/go-http-routing-benchmark) + +## Static Routes: 157 + +``` +Gin: 30512 Bytes + +HttpServeMux: 17344 Bytes +Ace: 30080 Bytes +Bear: 30472 Bytes +Beego: 96408 Bytes +Bone: 37904 Bytes +Denco: 10464 Bytes +Echo: 73680 Bytes +GocraftWeb: 55720 Bytes +Goji: 27200 Bytes +Gojiv2: 104464 Bytes +GoJsonRest: 136472 Bytes +GoRestful: 914904 Bytes +GorillaMux: 675568 Bytes +HttpRouter: 21128 Bytes +HttpTreeMux: 73448 Bytes +Kocha: 115072 Bytes +LARS: 30120 Bytes +Macaron: 37984 Bytes +Martini: 310832 Bytes +Pat: 20464 Bytes +Possum: 91328 Bytes +R2router: 23712 Bytes +Rivet: 23880 Bytes +Tango: 28008 Bytes +TigerTonic: 80368 Bytes +Traffic: 626480 Bytes +Vulcan: 369064 Bytes +``` + +## GithubAPI Routes: 203 + +``` +Gin: 52672 Bytes + +Ace: 48992 Bytes +Bear: 161592 Bytes +Beego: 147992 Bytes +Bone: 97728 Bytes +Denco: 36440 Bytes +Echo: 95672 Bytes +GocraftWeb: 95640 Bytes +Goji: 86088 Bytes +Gojiv2: 144392 Bytes +GoJsonRest: 134648 Bytes +GoRestful: 1410760 Bytes +GorillaMux: 1509488 Bytes +HttpRouter: 37464 Bytes +HttpTreeMux: 78800 Bytes +Kocha: 785408 Bytes +LARS: 49032 Bytes +Macaron: 132712 Bytes +Martini: 564352 Bytes +Pat: 21200 Bytes +Possum: 83888 Bytes +R2router: 47104 Bytes +Rivet: 42840 Bytes +Tango: 54584 Bytes +TigerTonic: 96384 Bytes +Traffic: 1061920 Bytes +Vulcan: 465296 Bytes +``` + +## GPlusAPI Routes: 13 + +``` +Gin: 3968 Bytes + +Ace: 3600 Bytes +Bear: 7112 Bytes +Beego: 10048 Bytes +Bone: 6480 Bytes +Denco: 3256 Bytes +Echo: 9000 Bytes +GocraftWeb: 7496 Bytes +Goji: 2912 Bytes +Gojiv2: 7376 Bytes +GoJsonRest: 11544 Bytes +GoRestful: 88776 Bytes +GorillaMux: 71488 Bytes +HttpRouter: 2712 Bytes +HttpTreeMux: 7440 Bytes +Kocha: 128880 Bytes +LARS: 3640 Bytes +Macaron: 8656 Bytes +Martini: 23936 Bytes +Pat: 1856 Bytes +Possum: 7248 Bytes +R2router: 3928 Bytes +Rivet: 3064 Bytes +Tango: 4912 Bytes +TigerTonic: 9408 Bytes +Traffic: 49472 Bytes +Vulcan: 25496 Bytes +``` + +## ParseAPI Routes: 26 + +``` +Gin: 6928 Bytes + +Ace: 6592 Bytes +Bear: 12320 Bytes +Beego: 18960 Bytes +Bone: 11024 Bytes +Denco: 4184 Bytes +Echo: 11168 Bytes +GocraftWeb: 12800 Bytes +Goji: 5232 Bytes +Gojiv2: 14464 Bytes +GoJsonRest: 14216 Bytes +GoRestful: 127368 Bytes +GorillaMux: 123016 Bytes +HttpRouter: 4976 Bytes +HttpTreeMux: 7848 Bytes +Kocha: 181712 Bytes +LARS: 6632 Bytes +Macaron: 13648 Bytes +Martini: 45952 Bytes +Pat: 2560 Bytes +Possum: 9200 Bytes +R2router: 7056 Bytes +Rivet: 5680 Bytes +Tango: 8664 Bytes +TigerTonic: 9840 Bytes +Traffic: 93480 Bytes +Vulcan: 44504 Bytes +``` + +## Static Routes + +``` +BenchmarkGin_StaticAll 50000 34506 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_StaticAll 30000 49657 ns/op 0 B/op 0 allocs/op +BenchmarkHttpServeMux_StaticAll 2000 1183737 ns/op 96 B/op 8 allocs/op +BenchmarkBeego_StaticAll 5000 412621 ns/op 57776 B/op 628 allocs/op +BenchmarkBear_StaticAll 10000 149242 ns/op 20336 B/op 461 allocs/op +BenchmarkBone_StaticAll 10000 118583 ns/op 0 B/op 0 allocs/op +BenchmarkDenco_StaticAll 100000 13247 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_StaticAll 20000 79914 ns/op 5024 B/op 157 allocs/op +BenchmarkGocraftWeb_StaticAll 10000 211823 ns/op 46440 B/op 785 allocs/op +BenchmarkGoji_StaticAll 10000 109390 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_StaticAll 3000 415533 ns/op 145696 B/op 1099 allocs/op +BenchmarkGoJsonRest_StaticAll 5000 364403 ns/op 51653 B/op 1727 allocs/op +BenchmarkGoRestful_StaticAll 500 2578579 ns/op 314936 B/op 3144 allocs/op +BenchmarkGorillaMux_StaticAll 500 2704856 ns/op 115648 B/op 1578 allocs/op +BenchmarkHttpRouter_StaticAll 100000 18541 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_StaticAll 100000 22332 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_StaticAll 50000 31176 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_StaticAll 50000 40840 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_StaticAll 5000 517656 ns/op 120576 B/op 1413 allocs/op +BenchmarkMartini_StaticAll 300 4462289 ns/op 125442 B/op 1717 allocs/op +BenchmarkPat_StaticAll 500 2157275 ns/op 533904 B/op 11123 allocs/op +BenchmarkPossum_StaticAll 10000 254701 ns/op 65312 B/op 471 allocs/op +BenchmarkR2router_StaticAll 10000 133956 ns/op 22608 B/op 628 allocs/op +BenchmarkRivet_StaticAll 30000 46812 ns/op 0 B/op 0 allocs/op +BenchmarkTango_StaticAll 5000 390613 ns/op 39225 B/op 1256 allocs/op +BenchmarkTigerTonic_StaticAll 20000 88060 ns/op 7504 B/op 157 allocs/op +BenchmarkTraffic_StaticAll 500 2910236 ns/op 729736 B/op 14287 allocs/op +BenchmarkVulcan_StaticAll 5000 277366 ns/op 15386 B/op 471 allocs/op +``` + +## Micro Benchmarks + +``` +BenchmarkGin_Param 20000000 113 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_Param 5000000 375 ns/op 32 B/op 1 allocs/op +BenchmarkBear_Param 1000000 1709 ns/op 456 B/op 5 allocs/op +BenchmarkBeego_Param 1000000 2484 ns/op 368 B/op 4 allocs/op +BenchmarkBone_Param 1000000 2391 ns/op 688 B/op 5 allocs/op +BenchmarkDenco_Param 10000000 240 ns/op 32 B/op 1 allocs/op +BenchmarkEcho_Param 5000000 366 ns/op 32 B/op 1 allocs/op +BenchmarkGocraftWeb_Param 1000000 2343 ns/op 648 B/op 8 allocs/op +BenchmarkGoji_Param 1000000 1197 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Param 1000000 2771 ns/op 944 B/op 8 allocs/op +BenchmarkGoJsonRest_Param 1000000 2993 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_Param 200000 8860 ns/op 2296 B/op 21 allocs/op +BenchmarkGorillaMux_Param 500000 4461 ns/op 1056 B/op 11 allocs/op +BenchmarkHttpRouter_Param 10000000 175 ns/op 32 B/op 1 allocs/op +BenchmarkHttpTreeMux_Param 1000000 1167 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_Param 3000000 429 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_Param 10000000 134 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param 500000 4635 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_Param 200000 9933 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_Param 1000000 2929 ns/op 648 B/op 12 allocs/op +BenchmarkPossum_Param 1000000 2503 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_Param 1000000 1507 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Param 5000000 297 ns/op 48 B/op 1 allocs/op +BenchmarkTango_Param 1000000 1862 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_Param 500000 5660 ns/op 992 B/op 17 allocs/op +BenchmarkTraffic_Param 200000 8408 ns/op 1960 B/op 21 allocs/op +BenchmarkVulcan_Param 2000000 963 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Param5 2000000 740 ns/op 160 B/op 1 allocs/op +BenchmarkBear_Param5 1000000 2777 ns/op 501 B/op 5 allocs/op +BenchmarkBeego_Param5 1000000 3740 ns/op 368 B/op 4 allocs/op +BenchmarkBone_Param5 1000000 2950 ns/op 736 B/op 5 allocs/op +BenchmarkDenco_Param5 2000000 644 ns/op 160 B/op 1 allocs/op +BenchmarkEcho_Param5 3000000 558 ns/op 32 B/op 1 allocs/op +BenchmarkGin_Param5 10000000 198 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param5 500000 3870 ns/op 920 B/op 11 allocs/op +BenchmarkGoji_Param5 1000000 1746 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Param5 1000000 3214 ns/op 1008 B/op 8 allocs/op +BenchmarkGoJsonRest_Param5 500000 5509 ns/op 1097 B/op 16 allocs/op +BenchmarkGoRestful_Param5 200000 11232 ns/op 2392 B/op 21 allocs/op +BenchmarkGorillaMux_Param5 300000 7777 ns/op 1184 B/op 11 allocs/op +BenchmarkHttpRouter_Param5 3000000 631 ns/op 160 B/op 1 allocs/op +BenchmarkHttpTreeMux_Param5 1000000 2800 ns/op 576 B/op 6 allocs/op +BenchmarkKocha_Param5 1000000 2053 ns/op 440 B/op 10 allocs/op +BenchmarkLARS_Param5 10000000 232 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param5 500000 5888 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_Param5 200000 12807 ns/op 1232 B/op 11 allocs/op +BenchmarkPat_Param5 300000 7320 ns/op 964 B/op 32 allocs/op +BenchmarkPossum_Param5 1000000 2495 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_Param5 1000000 1844 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Param5 2000000 935 ns/op 240 B/op 1 allocs/op +BenchmarkTango_Param5 1000000 2327 ns/op 360 B/op 8 allocs/op +BenchmarkTigerTonic_Param5 100000 18514 ns/op 2551 B/op 43 allocs/op +BenchmarkTraffic_Param5 200000 11997 ns/op 2248 B/op 25 allocs/op +BenchmarkVulcan_Param5 1000000 1333 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Param20 1000000 2031 ns/op 640 B/op 1 allocs/op +BenchmarkBear_Param20 200000 7285 ns/op 1664 B/op 5 allocs/op +BenchmarkBeego_Param20 300000 6224 ns/op 368 B/op 4 allocs/op +BenchmarkBone_Param20 200000 8023 ns/op 1903 B/op 5 allocs/op +BenchmarkDenco_Param20 1000000 2262 ns/op 640 B/op 1 allocs/op +BenchmarkEcho_Param20 1000000 1387 ns/op 32 B/op 1 allocs/op +BenchmarkGin_Param20 3000000 503 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param20 100000 14408 ns/op 3795 B/op 15 allocs/op +BenchmarkGoji_Param20 500000 5272 ns/op 1247 B/op 2 allocs/op +BenchmarkGojiv2_Param20 1000000 4163 ns/op 1248 B/op 8 allocs/op +BenchmarkGoJsonRest_Param20 100000 17866 ns/op 4485 B/op 20 allocs/op +BenchmarkGoRestful_Param20 100000 21022 ns/op 4724 B/op 23 allocs/op +BenchmarkGorillaMux_Param20 100000 17055 ns/op 3547 B/op 13 allocs/op +BenchmarkHttpRouter_Param20 1000000 1748 ns/op 640 B/op 1 allocs/op +BenchmarkHttpTreeMux_Param20 200000 12246 ns/op 3196 B/op 10 allocs/op +BenchmarkKocha_Param20 300000 6861 ns/op 1808 B/op 27 allocs/op +BenchmarkLARS_Param20 3000000 526 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param20 100000 13069 ns/op 2906 B/op 12 allocs/op +BenchmarkMartini_Param20 100000 23602 ns/op 3597 B/op 13 allocs/op +BenchmarkPat_Param20 50000 32143 ns/op 4688 B/op 111 allocs/op +BenchmarkPossum_Param20 1000000 2396 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_Param20 200000 8907 ns/op 2283 B/op 7 allocs/op +BenchmarkRivet_Param20 1000000 3280 ns/op 1024 B/op 1 allocs/op +BenchmarkTango_Param20 500000 4640 ns/op 856 B/op 8 allocs/op +BenchmarkTigerTonic_Param20 20000 67581 ns/op 10532 B/op 138 allocs/op +BenchmarkTraffic_Param20 50000 40313 ns/op 7941 B/op 45 allocs/op +BenchmarkVulcan_Param20 1000000 2264 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParamWrite 3000000 532 ns/op 40 B/op 2 allocs/op +BenchmarkBear_ParamWrite 1000000 1778 ns/op 456 B/op 5 allocs/op +BenchmarkBeego_ParamWrite 1000000 2596 ns/op 376 B/op 5 allocs/op +BenchmarkBone_ParamWrite 1000000 2519 ns/op 688 B/op 5 allocs/op +BenchmarkDenco_ParamWrite 5000000 411 ns/op 32 B/op 1 allocs/op +BenchmarkEcho_ParamWrite 2000000 718 ns/op 40 B/op 2 allocs/op +BenchmarkGin_ParamWrite 5000000 283 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParamWrite 1000000 2561 ns/op 656 B/op 9 allocs/op +BenchmarkGoji_ParamWrite 1000000 1378 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_ParamWrite 1000000 3128 ns/op 976 B/op 10 allocs/op +BenchmarkGoJsonRest_ParamWrite 500000 4446 ns/op 1128 B/op 18 allocs/op +BenchmarkGoRestful_ParamWrite 200000 10291 ns/op 2304 B/op 22 allocs/op +BenchmarkGorillaMux_ParamWrite 500000 5153 ns/op 1064 B/op 12 allocs/op +BenchmarkHttpRouter_ParamWrite 5000000 263 ns/op 32 B/op 1 allocs/op +BenchmarkHttpTreeMux_ParamWrite 1000000 1351 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_ParamWrite 3000000 538 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_ParamWrite 5000000 316 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParamWrite 500000 5756 ns/op 1160 B/op 14 allocs/op +BenchmarkMartini_ParamWrite 200000 13097 ns/op 1176 B/op 14 allocs/op +BenchmarkPat_ParamWrite 500000 4954 ns/op 1072 B/op 17 allocs/op +BenchmarkPossum_ParamWrite 1000000 2499 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_ParamWrite 1000000 1531 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_ParamWrite 3000000 570 ns/op 112 B/op 2 allocs/op +BenchmarkTango_ParamWrite 2000000 957 ns/op 136 B/op 4 allocs/op +BenchmarkTigerTonic_ParamWrite 200000 7025 ns/op 1424 B/op 23 allocs/op +BenchmarkTraffic_ParamWrite 200000 10112 ns/op 2384 B/op 25 allocs/op +BenchmarkVulcan_ParamWrite 1000000 1006 ns/op 98 B/op 3 allocs/op +``` + +## GitHub + +``` +BenchmarkGin_GithubStatic 10000000 156 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_GithubStatic 5000000 294 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GithubStatic 2000000 893 ns/op 120 B/op 3 allocs/op +BenchmarkBeego_GithubStatic 1000000 2491 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GithubStatic 50000 25300 ns/op 2880 B/op 60 allocs/op +BenchmarkDenco_GithubStatic 20000000 76.0 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_GithubStatic 2000000 516 ns/op 32 B/op 1 allocs/op +BenchmarkGocraftWeb_GithubStatic 1000000 1448 ns/op 296 B/op 5 allocs/op +BenchmarkGoji_GithubStatic 3000000 496 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_GithubStatic 1000000 2941 ns/op 928 B/op 7 allocs/op +BenchmarkGoRestful_GithubStatic 100000 27256 ns/op 3224 B/op 22 allocs/op +BenchmarkGoJsonRest_GithubStatic 1000000 2196 ns/op 329 B/op 11 allocs/op +BenchmarkGorillaMux_GithubStatic 50000 31617 ns/op 736 B/op 10 allocs/op +BenchmarkHttpRouter_GithubStatic 20000000 88.4 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GithubStatic 10000000 134 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_GithubStatic 20000000 113 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_GithubStatic 10000000 195 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubStatic 500000 3740 ns/op 768 B/op 9 allocs/op +BenchmarkMartini_GithubStatic 50000 27673 ns/op 768 B/op 9 allocs/op +BenchmarkPat_GithubStatic 100000 19470 ns/op 3648 B/op 76 allocs/op +BenchmarkPossum_GithubStatic 1000000 1729 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_GithubStatic 2000000 879 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_GithubStatic 10000000 231 ns/op 0 B/op 0 allocs/op +BenchmarkTango_GithubStatic 1000000 2325 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_GithubStatic 3000000 610 ns/op 48 B/op 1 allocs/op +BenchmarkTraffic_GithubStatic 20000 62973 ns/op 18904 B/op 148 allocs/op +BenchmarkVulcan_GithubStatic 1000000 1447 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GithubParam 2000000 686 ns/op 96 B/op 1 allocs/op +BenchmarkBear_GithubParam 1000000 2155 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_GithubParam 1000000 2713 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GithubParam 100000 15088 ns/op 1760 B/op 18 allocs/op +BenchmarkDenco_GithubParam 2000000 629 ns/op 128 B/op 1 allocs/op +BenchmarkEcho_GithubParam 2000000 653 ns/op 32 B/op 1 allocs/op +BenchmarkGin_GithubParam 5000000 255 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubParam 1000000 3145 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_GithubParam 1000000 1916 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GithubParam 1000000 3975 ns/op 1024 B/op 10 allocs/op +BenchmarkGoJsonRest_GithubParam 300000 4134 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_GithubParam 50000 30782 ns/op 2360 B/op 21 allocs/op +BenchmarkGorillaMux_GithubParam 100000 17148 ns/op 1088 B/op 11 allocs/op +BenchmarkHttpRouter_GithubParam 3000000 523 ns/op 96 B/op 1 allocs/op +BenchmarkHttpTreeMux_GithubParam 1000000 1671 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_GithubParam 1000000 1021 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_GithubParam 5000000 283 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubParam 500000 4270 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_GithubParam 100000 21728 ns/op 1152 B/op 11 allocs/op +BenchmarkPat_GithubParam 200000 11208 ns/op 2464 B/op 48 allocs/op +BenchmarkPossum_GithubParam 1000000 2334 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_GithubParam 1000000 1487 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GithubParam 2000000 782 ns/op 96 B/op 1 allocs/op +BenchmarkTango_GithubParam 1000000 2653 ns/op 344 B/op 8 allocs/op +BenchmarkTigerTonic_GithubParam 300000 14073 ns/op 1440 B/op 24 allocs/op +BenchmarkTraffic_GithubParam 50000 29164 ns/op 5992 B/op 52 allocs/op +BenchmarkVulcan_GithubParam 1000000 2529 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GithubAll 10000 134059 ns/op 13792 B/op 167 allocs/op +BenchmarkBear_GithubAll 5000 534445 ns/op 86448 B/op 943 allocs/op +BenchmarkBeego_GithubAll 3000 592444 ns/op 74705 B/op 812 allocs/op +BenchmarkBone_GithubAll 200 6957308 ns/op 698784 B/op 8453 allocs/op +BenchmarkDenco_GithubAll 10000 158819 ns/op 20224 B/op 167 allocs/op +BenchmarkEcho_GithubAll 10000 154700 ns/op 6496 B/op 203 allocs/op +BenchmarkGin_GithubAll 30000 48375 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubAll 3000 570806 ns/op 131656 B/op 1686 allocs/op +BenchmarkGoji_GithubAll 2000 818034 ns/op 56112 B/op 334 allocs/op +BenchmarkGojiv2_GithubAll 2000 1213973 ns/op 274768 B/op 3712 allocs/op +BenchmarkGoJsonRest_GithubAll 2000 785796 ns/op 134371 B/op 2737 allocs/op +BenchmarkGoRestful_GithubAll 300 5238188 ns/op 689672 B/op 4519 allocs/op +BenchmarkGorillaMux_GithubAll 100 10257726 ns/op 211840 B/op 2272 allocs/op +BenchmarkHttpRouter_GithubAll 20000 105414 ns/op 13792 B/op 167 allocs/op +BenchmarkHttpTreeMux_GithubAll 10000 319934 ns/op 65856 B/op 671 allocs/op +BenchmarkKocha_GithubAll 10000 209442 ns/op 23304 B/op 843 allocs/op +BenchmarkLARS_GithubAll 20000 62565 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubAll 2000 1161270 ns/op 204194 B/op 2000 allocs/op +BenchmarkMartini_GithubAll 200 9991713 ns/op 226549 B/op 2325 allocs/op +BenchmarkPat_GithubAll 200 5590793 ns/op 1499568 B/op 27435 allocs/op +BenchmarkPossum_GithubAll 10000 319768 ns/op 84448 B/op 609 allocs/op +BenchmarkR2router_GithubAll 10000 305134 ns/op 77328 B/op 979 allocs/op +BenchmarkRivet_GithubAll 10000 132134 ns/op 16272 B/op 167 allocs/op +BenchmarkTango_GithubAll 3000 552754 ns/op 63826 B/op 1618 allocs/op +BenchmarkTigerTonic_GithubAll 1000 1439483 ns/op 239104 B/op 5374 allocs/op +BenchmarkTraffic_GithubAll 100 11383067 ns/op 2659329 B/op 21848 allocs/op +BenchmarkVulcan_GithubAll 5000 394253 ns/op 19894 B/op 609 allocs/op +``` + +## Google+ + +``` +BenchmarkGin_GPlusStatic 10000000 183 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_GPlusStatic 5000000 276 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlusStatic 2000000 652 ns/op 104 B/op 3 allocs/op +BenchmarkBeego_GPlusStatic 1000000 2239 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GPlusStatic 5000000 380 ns/op 32 B/op 1 allocs/op +BenchmarkDenco_GPlusStatic 30000000 45.8 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_GPlusStatic 5000000 338 ns/op 32 B/op 1 allocs/op +BenchmarkGocraftWeb_GPlusStatic 1000000 1158 ns/op 280 B/op 5 allocs/op +BenchmarkGoji_GPlusStatic 5000000 331 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_GPlusStatic 1000000 2106 ns/op 928 B/op 7 allocs/op +BenchmarkGoJsonRest_GPlusStatic 1000000 1626 ns/op 329 B/op 11 allocs/op +BenchmarkGoRestful_GPlusStatic 300000 7598 ns/op 1976 B/op 20 allocs/op +BenchmarkGorillaMux_GPlusStatic 1000000 2629 ns/op 736 B/op 10 allocs/op +BenchmarkHttpRouter_GPlusStatic 30000000 52.5 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlusStatic 20000000 85.8 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_GPlusStatic 20000000 89.2 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_GPlusStatic 10000000 162 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusStatic 500000 3479 ns/op 768 B/op 9 allocs/op +BenchmarkMartini_GPlusStatic 200000 9092 ns/op 768 B/op 9 allocs/op +BenchmarkPat_GPlusStatic 3000000 493 ns/op 96 B/op 2 allocs/op +BenchmarkPossum_GPlusStatic 1000000 1467 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_GPlusStatic 2000000 788 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_GPlusStatic 20000000 114 ns/op 0 B/op 0 allocs/op +BenchmarkTango_GPlusStatic 1000000 1534 ns/op 200 B/op 8 allocs/op +BenchmarkTigerTonic_GPlusStatic 5000000 282 ns/op 32 B/op 1 allocs/op +BenchmarkTraffic_GPlusStatic 500000 3798 ns/op 1192 B/op 15 allocs/op +BenchmarkVulcan_GPlusStatic 2000000 1125 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlusParam 3000000 528 ns/op 64 B/op 1 allocs/op +BenchmarkBear_GPlusParam 1000000 1570 ns/op 480 B/op 5 allocs/op +BenchmarkBeego_GPlusParam 1000000 2369 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GPlusParam 1000000 2028 ns/op 688 B/op 5 allocs/op +BenchmarkDenco_GPlusParam 5000000 385 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_GPlusParam 3000000 441 ns/op 32 B/op 1 allocs/op +BenchmarkGin_GPlusParam 10000000 174 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusParam 1000000 2033 ns/op 648 B/op 8 allocs/op +BenchmarkGoji_GPlusParam 1000000 1399 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GPlusParam 1000000 2641 ns/op 944 B/op 8 allocs/op +BenchmarkGoJsonRest_GPlusParam 1000000 2824 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_GPlusParam 200000 8875 ns/op 2296 B/op 21 allocs/op +BenchmarkGorillaMux_GPlusParam 200000 6291 ns/op 1056 B/op 11 allocs/op +BenchmarkHttpRouter_GPlusParam 5000000 316 ns/op 64 B/op 1 allocs/op +BenchmarkHttpTreeMux_GPlusParam 1000000 1129 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_GPlusParam 3000000 538 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_GPlusParam 10000000 198 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusParam 500000 3554 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_GPlusParam 200000 9831 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_GPlusParam 1000000 2706 ns/op 688 B/op 12 allocs/op +BenchmarkPossum_GPlusParam 1000000 2297 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_GPlusParam 1000000 1318 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GPlusParam 5000000 399 ns/op 48 B/op 1 allocs/op +BenchmarkTango_GPlusParam 1000000 2070 ns/op 264 B/op 8 allocs/op +BenchmarkTigerTonic_GPlusParam 500000 4853 ns/op 1056 B/op 17 allocs/op +BenchmarkTraffic_GPlusParam 200000 8278 ns/op 1976 B/op 21 allocs/op +BenchmarkVulcan_GPlusParam 1000000 1243 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlus2Params 3000000 549 ns/op 64 B/op 1 allocs/op +BenchmarkBear_GPlus2Params 1000000 2112 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_GPlus2Params 500000 2750 ns/op 368 B/op 4 allocs/op +BenchmarkBone_GPlus2Params 300000 7032 ns/op 1040 B/op 9 allocs/op +BenchmarkDenco_GPlus2Params 3000000 502 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_GPlus2Params 3000000 641 ns/op 32 B/op 1 allocs/op +BenchmarkGin_GPlus2Params 5000000 250 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlus2Params 1000000 2681 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_GPlus2Params 1000000 1926 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GPlus2Params 500000 3996 ns/op 1024 B/op 11 allocs/op +BenchmarkGoJsonRest_GPlus2Params 500000 3886 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_GPlus2Params 200000 10376 ns/op 2360 B/op 21 allocs/op +BenchmarkGorillaMux_GPlus2Params 100000 14162 ns/op 1088 B/op 11 allocs/op +BenchmarkHttpRouter_GPlus2Params 5000000 336 ns/op 64 B/op 1 allocs/op +BenchmarkHttpTreeMux_GPlus2Params 1000000 1523 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_GPlus2Params 2000000 970 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_GPlus2Params 5000000 238 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlus2Params 500000 4016 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_GPlus2Params 100000 21253 ns/op 1200 B/op 13 allocs/op +BenchmarkPat_GPlus2Params 200000 8632 ns/op 2256 B/op 34 allocs/op +BenchmarkPossum_GPlus2Params 1000000 2171 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_GPlus2Params 1000000 1340 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GPlus2Params 3000000 557 ns/op 96 B/op 1 allocs/op +BenchmarkTango_GPlus2Params 1000000 2186 ns/op 344 B/op 8 allocs/op +BenchmarkTigerTonic_GPlus2Params 200000 9060 ns/op 1488 B/op 24 allocs/op +BenchmarkTraffic_GPlus2Params 100000 20324 ns/op 3272 B/op 31 allocs/op +BenchmarkVulcan_GPlus2Params 1000000 2039 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlusAll 300000 6603 ns/op 640 B/op 11 allocs/op +BenchmarkBear_GPlusAll 100000 22363 ns/op 5488 B/op 61 allocs/op +BenchmarkBeego_GPlusAll 50000 38757 ns/op 4784 B/op 52 allocs/op +BenchmarkBone_GPlusAll 20000 54916 ns/op 10336 B/op 98 allocs/op +BenchmarkDenco_GPlusAll 300000 4959 ns/op 672 B/op 11 allocs/op +BenchmarkEcho_GPlusAll 200000 6558 ns/op 416 B/op 13 allocs/op +BenchmarkGin_GPlusAll 500000 2757 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusAll 50000 34615 ns/op 8040 B/op 103 allocs/op +BenchmarkGoji_GPlusAll 100000 16002 ns/op 3696 B/op 22 allocs/op +BenchmarkGojiv2_GPlusAll 50000 35060 ns/op 12624 B/op 115 allocs/op +BenchmarkGoJsonRest_GPlusAll 50000 41479 ns/op 8117 B/op 170 allocs/op +BenchmarkGoRestful_GPlusAll 10000 131653 ns/op 32024 B/op 275 allocs/op +BenchmarkGorillaMux_GPlusAll 10000 101380 ns/op 13296 B/op 142 allocs/op +BenchmarkHttpRouter_GPlusAll 500000 3711 ns/op 640 B/op 11 allocs/op +BenchmarkHttpTreeMux_GPlusAll 100000 14438 ns/op 4032 B/op 38 allocs/op +BenchmarkKocha_GPlusAll 200000 8039 ns/op 976 B/op 43 allocs/op +BenchmarkLARS_GPlusAll 500000 2630 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusAll 30000 51123 ns/op 13152 B/op 128 allocs/op +BenchmarkMartini_GPlusAll 10000 176157 ns/op 14016 B/op 145 allocs/op +BenchmarkPat_GPlusAll 20000 69911 ns/op 16576 B/op 298 allocs/op +BenchmarkPossum_GPlusAll 100000 20716 ns/op 5408 B/op 39 allocs/op +BenchmarkR2router_GPlusAll 100000 17463 ns/op 5040 B/op 63 allocs/op +BenchmarkRivet_GPlusAll 300000 5142 ns/op 768 B/op 11 allocs/op +BenchmarkTango_GPlusAll 50000 27321 ns/op 3656 B/op 104 allocs/op +BenchmarkTigerTonic_GPlusAll 20000 77597 ns/op 14512 B/op 288 allocs/op +BenchmarkTraffic_GPlusAll 10000 151406 ns/op 37360 B/op 392 allocs/op +BenchmarkVulcan_GPlusAll 100000 18555 ns/op 1274 B/op 39 allocs/op +``` + +## Parse.com + +``` +BenchmarkGin_ParseStatic 10000000 133 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_ParseStatic 5000000 241 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParseStatic 2000000 728 ns/op 120 B/op 3 allocs/op +BenchmarkBeego_ParseStatic 1000000 2623 ns/op 368 B/op 4 allocs/op +BenchmarkBone_ParseStatic 1000000 1285 ns/op 144 B/op 3 allocs/op +BenchmarkDenco_ParseStatic 30000000 57.8 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_ParseStatic 5000000 342 ns/op 32 B/op 1 allocs/op +BenchmarkGocraftWeb_ParseStatic 1000000 1478 ns/op 296 B/op 5 allocs/op +BenchmarkGoji_ParseStatic 3000000 415 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_ParseStatic 1000000 2087 ns/op 928 B/op 7 allocs/op +BenchmarkGoJsonRest_ParseStatic 1000000 1712 ns/op 329 B/op 11 allocs/op +BenchmarkGoRestful_ParseStatic 200000 11072 ns/op 3224 B/op 22 allocs/op +BenchmarkGorillaMux_ParseStatic 500000 4129 ns/op 752 B/op 11 allocs/op +BenchmarkHttpRouter_ParseStatic 30000000 52.4 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParseStatic 20000000 109 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_ParseStatic 20000000 81.8 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_ParseStatic 10000000 150 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseStatic 1000000 3288 ns/op 768 B/op 9 allocs/op +BenchmarkMartini_ParseStatic 200000 9110 ns/op 768 B/op 9 allocs/op +BenchmarkPat_ParseStatic 1000000 1135 ns/op 240 B/op 5 allocs/op +BenchmarkPossum_ParseStatic 1000000 1557 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_ParseStatic 2000000 730 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_ParseStatic 10000000 121 ns/op 0 B/op 0 allocs/op +BenchmarkTango_ParseStatic 1000000 1688 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_ParseStatic 3000000 427 ns/op 48 B/op 1 allocs/op +BenchmarkTraffic_ParseStatic 500000 5962 ns/op 1816 B/op 20 allocs/op +BenchmarkVulcan_ParseStatic 2000000 969 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParseParam 3000000 497 ns/op 64 B/op 1 allocs/op +BenchmarkBear_ParseParam 1000000 1473 ns/op 467 B/op 5 allocs/op +BenchmarkBeego_ParseParam 1000000 2384 ns/op 368 B/op 4 allocs/op +BenchmarkBone_ParseParam 1000000 2513 ns/op 768 B/op 6 allocs/op +BenchmarkDenco_ParseParam 5000000 364 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_ParseParam 5000000 418 ns/op 32 B/op 1 allocs/op +BenchmarkGin_ParseParam 10000000 163 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseParam 1000000 2361 ns/op 664 B/op 8 allocs/op +BenchmarkGoji_ParseParam 1000000 1590 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_ParseParam 1000000 2851 ns/op 976 B/op 9 allocs/op +BenchmarkGoJsonRest_ParseParam 1000000 2965 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_ParseParam 200000 12207 ns/op 3544 B/op 23 allocs/op +BenchmarkGorillaMux_ParseParam 500000 5187 ns/op 1088 B/op 12 allocs/op +BenchmarkHttpRouter_ParseParam 5000000 275 ns/op 64 B/op 1 allocs/op +BenchmarkHttpTreeMux_ParseParam 1000000 1108 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_ParseParam 3000000 495 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_ParseParam 10000000 192 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseParam 500000 4103 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_ParseParam 200000 9878 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_ParseParam 500000 3657 ns/op 1120 B/op 17 allocs/op +BenchmarkPossum_ParseParam 1000000 2084 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_ParseParam 1000000 1251 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_ParseParam 5000000 335 ns/op 48 B/op 1 allocs/op +BenchmarkTango_ParseParam 1000000 1854 ns/op 280 B/op 8 allocs/op +BenchmarkTigerTonic_ParseParam 500000 4582 ns/op 1008 B/op 17 allocs/op +BenchmarkTraffic_ParseParam 200000 8125 ns/op 2248 B/op 23 allocs/op +BenchmarkVulcan_ParseParam 1000000 1148 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Parse2Params 3000000 539 ns/op 64 B/op 1 allocs/op +BenchmarkBear_Parse2Params 1000000 1778 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_Parse2Params 1000000 2519 ns/op 368 B/op 4 allocs/op +BenchmarkBone_Parse2Params 1000000 2596 ns/op 720 B/op 5 allocs/op +BenchmarkDenco_Parse2Params 3000000 492 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_Parse2Params 3000000 484 ns/op 32 B/op 1 allocs/op +BenchmarkGin_Parse2Params 10000000 193 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Parse2Params 1000000 2575 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_Parse2Params 1000000 1373 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Parse2Params 500000 2416 ns/op 960 B/op 8 allocs/op +BenchmarkGoJsonRest_Parse2Params 300000 3452 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_Parse2Params 100000 17719 ns/op 6008 B/op 25 allocs/op +BenchmarkGorillaMux_Parse2Params 300000 5102 ns/op 1088 B/op 11 allocs/op +BenchmarkHttpRouter_Parse2Params 5000000 303 ns/op 64 B/op 1 allocs/op +BenchmarkHttpTreeMux_Parse2Params 1000000 1372 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_Parse2Params 2000000 874 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_Parse2Params 10000000 192 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Parse2Params 500000 3871 ns/op 1056 B/op 10 allocs/op +BenchmarkMartini_Parse2Params 200000 9954 ns/op 1152 B/op 11 allocs/op +BenchmarkPat_Parse2Params 500000 4194 ns/op 832 B/op 17 allocs/op +BenchmarkPossum_Parse2Params 1000000 2121 ns/op 560 B/op 6 allocs/op +BenchmarkR2router_Parse2Params 1000000 1415 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Parse2Params 3000000 457 ns/op 96 B/op 1 allocs/op +BenchmarkTango_Parse2Params 1000000 1914 ns/op 312 B/op 8 allocs/op +BenchmarkTigerTonic_Parse2Params 300000 6895 ns/op 1408 B/op 24 allocs/op +BenchmarkTraffic_Parse2Params 200000 8317 ns/op 2040 B/op 22 allocs/op +BenchmarkVulcan_Parse2Params 1000000 1274 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParseAll 200000 10401 ns/op 640 B/op 16 allocs/op +BenchmarkBear_ParseAll 50000 37743 ns/op 8928 B/op 110 allocs/op +BenchmarkBeego_ParseAll 20000 63193 ns/op 9568 B/op 104 allocs/op +BenchmarkBone_ParseAll 20000 61767 ns/op 14160 B/op 131 allocs/op +BenchmarkDenco_ParseAll 300000 7036 ns/op 928 B/op 16 allocs/op +BenchmarkEcho_ParseAll 200000 11824 ns/op 832 B/op 26 allocs/op +BenchmarkGin_ParseAll 300000 4199 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseAll 30000 51758 ns/op 13728 B/op 181 allocs/op +BenchmarkGoji_ParseAll 50000 29614 ns/op 5376 B/op 32 allocs/op +BenchmarkGojiv2_ParseAll 20000 68676 ns/op 24464 B/op 199 allocs/op +BenchmarkGoJsonRest_ParseAll 20000 76135 ns/op 13866 B/op 321 allocs/op +BenchmarkGoRestful_ParseAll 5000 389487 ns/op 110928 B/op 600 allocs/op +BenchmarkGorillaMux_ParseAll 10000 221250 ns/op 24864 B/op 292 allocs/op +BenchmarkHttpRouter_ParseAll 200000 6444 ns/op 640 B/op 16 allocs/op +BenchmarkHttpTreeMux_ParseAll 50000 30702 ns/op 5728 B/op 51 allocs/op +BenchmarkKocha_ParseAll 200000 13712 ns/op 1112 B/op 54 allocs/op +BenchmarkLARS_ParseAll 300000 6925 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseAll 20000 96278 ns/op 24576 B/op 250 allocs/op +BenchmarkMartini_ParseAll 5000 271352 ns/op 25072 B/op 253 allocs/op +BenchmarkPat_ParseAll 20000 74941 ns/op 17264 B/op 343 allocs/op +BenchmarkPossum_ParseAll 50000 39947 ns/op 10816 B/op 78 allocs/op +BenchmarkR2router_ParseAll 50000 42479 ns/op 8352 B/op 120 allocs/op +BenchmarkRivet_ParseAll 200000 7726 ns/op 912 B/op 16 allocs/op +BenchmarkTango_ParseAll 30000 50014 ns/op 7168 B/op 208 allocs/op +BenchmarkTigerTonic_ParseAll 10000 106550 ns/op 19728 B/op 379 allocs/op +BenchmarkTraffic_ParseAll 10000 216037 ns/op 57776 B/op 642 allocs/op +BenchmarkVulcan_ParseAll 50000 34379 ns/op 2548 B/op 78 allocs/op +``` diff --git a/backend/vendor/github.com/gin-gonic/gin/CHANGELOG.md b/backend/vendor/github.com/gin-gonic/gin/CHANGELOG.md new file mode 100644 index 00000000..8ea2495d --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/CHANGELOG.md @@ -0,0 +1,269 @@ + +### Gin 1.4.0 + +- [NEW] Support for [Go Modules](https://github.com/golang/go/wiki/Modules) [#1569](https://github.com/gin-gonic/gin/pull/1569) +- [NEW] Refactor of form mapping multipart requesta [#1829](https://github.com/gin-gonic/gin/pull/1829) +- [FIX] Truncate Latency precision in long running request [#1830](https://github.com/gin-gonic/gin/pull/1830) +- [FIX] IsTerm flag should not be affected by DisableConsoleColor method. [#1802](https://github.com/gin-gonic/gin/pull/1802) +- [NEW] Supporting file binding [#1264](https://github.com/gin-gonic/gin/pull/1264) +- [NEW] Add support for mapping arrays [#1797](https://github.com/gin-gonic/gin/pull/1797) +- [FIX] Readme updates [#1793](https://github.com/gin-gonic/gin/pull/1793) [#1788](https://github.com/gin-gonic/gin/pull/1788) [1789](https://github.com/gin-gonic/gin/pull/1789) +- [FIX] StaticFS: Fixed Logging two log lines on 404. [#1805](https://github.com/gin-gonic/gin/pull/1805), [#1804](https://github.com/gin-gonic/gin/pull/1804) +- [NEW] Make context.Keys available as LogFormatterParams [#1779](https://github.com/gin-gonic/gin/pull/1779) +- [NEW] Use internal/json for Marshal/Unmarshal [#1791](https://github.com/gin-gonic/gin/pull/1791) +- [NEW] Support mapping time.Duration [#1794](https://github.com/gin-gonic/gin/pull/1794) +- [NEW] Refactor form mappings [#1749](https://github.com/gin-gonic/gin/pull/1749) +- [NEW] Added flag to context.Stream indicates if client disconnected in middle of stream [#1252](https://github.com/gin-gonic/gin/pull/1252) +- [FIX] Moved [examples](https://github.com/gin-gonic/examples) to stand alone Repo [#1775](https://github.com/gin-gonic/gin/pull/1775) +- [NEW] Extend context.File to allow for the content-dispositon attachments via a new method context.Attachment [#1260](https://github.com/gin-gonic/gin/pull/1260) +- [FIX] Support HTTP content negotiation wildcards [#1112](https://github.com/gin-gonic/gin/pull/1112) +- [NEW] Add prefix from X-Forwarded-Prefix in redirectTrailingSlash [#1238](https://github.com/gin-gonic/gin/pull/1238) +- [FIX] context.Copy() race condition [#1020](https://github.com/gin-gonic/gin/pull/1020) +- [NEW] Add context.HandlerNames() [#1729](https://github.com/gin-gonic/gin/pull/1729) +- [FIX] Change color methods to public in the defaultLogger. [#1771](https://github.com/gin-gonic/gin/pull/1771) +- [FIX] Update writeHeaders method to use http.Header.Set [#1722](https://github.com/gin-gonic/gin/pull/1722) +- [NEW] Add response size to LogFormatterParams [#1752](https://github.com/gin-gonic/gin/pull/1752) +- [NEW] Allow ignoring field on form mapping [#1733](https://github.com/gin-gonic/gin/pull/1733) +- [NEW] Add a function to force color in console output. [#1724](https://github.com/gin-gonic/gin/pull/1724) +- [FIX] Context.Next() - recheck len of handlers on every iteration. [#1745](https://github.com/gin-gonic/gin/pull/1745) +- [FIX] Fix all errcheck warnings [#1739](https://github.com/gin-gonic/gin/pull/1739) [#1653](https://github.com/gin-gonic/gin/pull/1653) +- [NEW] context: inherits context cancellation and deadline from http.Request context for Go>=1.7 [#1690](https://github.com/gin-gonic/gin/pull/1690) +- [NEW] Binding for URL Params [#1694](https://github.com/gin-gonic/gin/pull/1694) +- [NEW] Add LoggerWithFormatter method [#1677](https://github.com/gin-gonic/gin/pull/1677) +- [FIX] CI testing updates [#1671](https://github.com/gin-gonic/gin/pull/1671) [#1670](https://github.com/gin-gonic/gin/pull/1670) [#1682](https://github.com/gin-gonic/gin/pull/1682) [#1669](https://github.com/gin-gonic/gin/pull/1669) +- [FIX] StaticFS(): Send 404 when path does not exist [#1663](https://github.com/gin-gonic/gin/pull/1663) +- [FIX] Handle nil body for JSON binding [#1638](https://github.com/gin-gonic/gin/pull/1638) +- [FIX] Support bind uri param [#1612](https://github.com/gin-gonic/gin/pull/1612) +- [FIX] recovery: fix issue with syscall import on google app engine [#1640](https://github.com/gin-gonic/gin/pull/1640) +- [FIX] Make sure the debug log contains line breaks [#1650](https://github.com/gin-gonic/gin/pull/1650) +- [FIX] Panic stack trace being printed during recovery of broken pipe [#1089](https://github.com/gin-gonic/gin/pull/1089) [#1259](https://github.com/gin-gonic/gin/pull/1259) +- [NEW] RunFd method to run http.Server through a file descriptor [#1609](https://github.com/gin-gonic/gin/pull/1609) +- [NEW] Yaml binding support [#1618](https://github.com/gin-gonic/gin/pull/1618) +- [FIX] Pass MaxMultipartMemory when FormFile is called [#1600](https://github.com/gin-gonic/gin/pull/1600) +- [FIX] LoadHTML* tests [#1559](https://github.com/gin-gonic/gin/pull/1559) +- [FIX] Removed use of sync.pool from HandleContext [#1565](https://github.com/gin-gonic/gin/pull/1565) +- [FIX] Format output log to os.Stderr [#1571](https://github.com/gin-gonic/gin/pull/1571) +- [FIX] Make logger use a yellow background and a darkgray text for legibility [#1570](https://github.com/gin-gonic/gin/pull/1570) +- [FIX] Remove sensitive request information from panic log. [#1370](https://github.com/gin-gonic/gin/pull/1370) +- [FIX] log.Println() does not print timestamp [#829](https://github.com/gin-gonic/gin/pull/829) [#1560](https://github.com/gin-gonic/gin/pull/1560) +- [NEW] Add PureJSON renderer [#694](https://github.com/gin-gonic/gin/pull/694) +- [FIX] Add missing copyright and update if/else [#1497](https://github.com/gin-gonic/gin/pull/1497) +- [FIX] Update msgpack usage [#1498](https://github.com/gin-gonic/gin/pull/1498) +- [FIX] Use protobuf on render [#1496](https://github.com/gin-gonic/gin/pull/1496) +- [FIX] Add support for Protobuf format response [#1479](https://github.com/gin-gonic/gin/pull/1479) +- [NEW] Set default time format in form binding [#1487](https://github.com/gin-gonic/gin/pull/1487) +- [FIX] Add BindXML and ShouldBindXML [#1485](https://github.com/gin-gonic/gin/pull/1485) +- [NEW] Upgrade dependency libraries [#1491](https://github.com/gin-gonic/gin/pull/1491) + + +### Gin 1.3.0 + +- [NEW] Add [`func (*Context) QueryMap`](https://godoc.org/github.com/gin-gonic/gin#Context.QueryMap), [`func (*Context) GetQueryMap`](https://godoc.org/github.com/gin-gonic/gin#Context.GetQueryMap), [`func (*Context) PostFormMap`](https://godoc.org/github.com/gin-gonic/gin#Context.PostFormMap) and [`func (*Context) GetPostFormMap`](https://godoc.org/github.com/gin-gonic/gin#Context.GetPostFormMap) to support `type map[string]string` as query string or form parameters, see [#1383](https://github.com/gin-gonic/gin/pull/1383) +- [NEW] Add [`func (*Context) AsciiJSON`](https://godoc.org/github.com/gin-gonic/gin#Context.AsciiJSON), see [#1358](https://github.com/gin-gonic/gin/pull/1358) +- [NEW] Add `Pusher()` in [`type ResponseWriter`](https://godoc.org/github.com/gin-gonic/gin#ResponseWriter) for supporting http2 push, see [#1273](https://github.com/gin-gonic/gin/pull/1273) +- [NEW] Add [`func (*Context) DataFromReader`](https://godoc.org/github.com/gin-gonic/gin#Context.DataFromReader) for serving dynamic data, see [#1304](https://github.com/gin-gonic/gin/pull/1304) +- [NEW] Add [`func (*Context) ShouldBindBodyWith`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBindBodyWith) allowing to call binding multiple times, see [#1341](https://github.com/gin-gonic/gin/pull/1341) +- [NEW] Support pointers in form binding, see [#1336](https://github.com/gin-gonic/gin/pull/1336) +- [NEW] Add [`func (*Context) JSONP`](https://godoc.org/github.com/gin-gonic/gin#Context.JSONP), see [#1333](https://github.com/gin-gonic/gin/pull/1333) +- [NEW] Support default value in form binding, see [#1138](https://github.com/gin-gonic/gin/pull/1138) +- [NEW] Expose validator engine in [`type StructValidator`](https://godoc.org/github.com/gin-gonic/gin/binding#StructValidator), see [#1277](https://github.com/gin-gonic/gin/pull/1277) +- [NEW] Add [`func (*Context) ShouldBind`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBind), [`func (*Context) ShouldBindQuery`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBindQuery) and [`func (*Context) ShouldBindJSON`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBindJSON), see [#1047](https://github.com/gin-gonic/gin/pull/1047) +- [NEW] Add support for `time.Time` location in form binding, see [#1117](https://github.com/gin-gonic/gin/pull/1117) +- [NEW] Add [`func (*Context) BindQuery`](https://godoc.org/github.com/gin-gonic/gin#Context.BindQuery), see [#1029](https://github.com/gin-gonic/gin/pull/1029) +- [NEW] Make [jsonite](https://github.com/json-iterator/go) optional with build tags, see [#1026](https://github.com/gin-gonic/gin/pull/1026) +- [NEW] Show query string in logger, see [#999](https://github.com/gin-gonic/gin/pull/999) +- [NEW] Add [`func (*Context) SecureJSON`](https://godoc.org/github.com/gin-gonic/gin#Context.SecureJSON), see [#987](https://github.com/gin-gonic/gin/pull/987) and [#993](https://github.com/gin-gonic/gin/pull/993) +- [DEPRECATE] `func (*Context) GetCookie` for [`func (*Context) Cookie`](https://godoc.org/github.com/gin-gonic/gin#Context.Cookie) +- [FIX] Don't display color tags if [`func DisableConsoleColor`](https://godoc.org/github.com/gin-gonic/gin#DisableConsoleColor) called, see [#1072](https://github.com/gin-gonic/gin/pull/1072) +- [FIX] Gin Mode `""` when calling [`func Mode`](https://godoc.org/github.com/gin-gonic/gin#Mode) now returns `const DebugMode`, see [#1250](https://github.com/gin-gonic/gin/pull/1250) +- [FIX] `Flush()` now doesn't overwrite `responseWriter` status code, see [#1460](https://github.com/gin-gonic/gin/pull/1460) + +### Gin 1.2.0 + +- [NEW] Switch from godeps to govendor +- [NEW] Add support for Let's Encrypt via gin-gonic/autotls +- [NEW] Improve README examples and add extra at examples folder +- [NEW] Improved support with App Engine +- [NEW] Add custom template delimiters, see #860 +- [NEW] Add Template Func Maps, see #962 +- [NEW] Add \*context.Handler(), see #928 +- [NEW] Add \*context.GetRawData() +- [NEW] Add \*context.GetHeader() (request) +- [NEW] Add \*context.AbortWithStatusJSON() (JSON content type) +- [NEW] Add \*context.Keys type cast helpers +- [NEW] Add \*context.ShouldBindWith() +- [NEW] Add \*context.MustBindWith() +- [NEW] Add \*engine.SetFuncMap() +- [DEPRECATE] On next release: \*context.BindWith(), see #855 +- [FIX] Refactor render +- [FIX] Reworked tests +- [FIX] logger now supports cygwin +- [FIX] Use X-Forwarded-For before X-Real-Ip +- [FIX] time.Time binding (#904) + +### Gin 1.1.4 + +- [NEW] Support google appengine for IsTerminal func + +### Gin 1.1.3 + +- [FIX] Reverted Logger: skip ANSI color commands + +### Gin 1.1 + +- [NEW] Implement QueryArray and PostArray methods +- [NEW] Refactor GetQuery and GetPostForm +- [NEW] Add contribution guide +- [FIX] Corrected typos in README +- [FIX] Removed additional Iota +- [FIX] Changed imports to gopkg instead of github in README (#733) +- [FIX] Logger: skip ANSI color commands if output is not a tty + +### Gin 1.0rc2 (...) + +- [PERFORMANCE] Fast path for writing Content-Type. +- [PERFORMANCE] Much faster 404 routing +- [PERFORMANCE] Allocation optimizations +- [PERFORMANCE] Faster root tree lookup +- [PERFORMANCE] Zero overhead, String() and JSON() rendering. +- [PERFORMANCE] Faster ClientIP parsing +- [PERFORMANCE] Much faster SSE implementation +- [NEW] Benchmarks suite +- [NEW] Bind validation can be disabled and replaced with custom validators. +- [NEW] More flexible HTML render +- [NEW] Multipart and PostForm bindings +- [NEW] Adds method to return all the registered routes +- [NEW] Context.HandlerName() returns the main handler's name +- [NEW] Adds Error.IsType() helper +- [FIX] Binding multipart form +- [FIX] Integration tests +- [FIX] Crash when binding non struct object in Context. +- [FIX] RunTLS() implementation +- [FIX] Logger() unit tests +- [FIX] Adds SetHTMLTemplate() warning +- [FIX] Context.IsAborted() +- [FIX] More unit tests +- [FIX] JSON, XML, HTML renders accept custom content-types +- [FIX] gin.AbortIndex is unexported +- [FIX] Better approach to avoid directory listing in StaticFS() +- [FIX] Context.ClientIP() always returns the IP with trimmed spaces. +- [FIX] Better warning when running in debug mode. +- [FIX] Google App Engine integration. debugPrint does not use os.Stdout +- [FIX] Fixes integer overflow in error type +- [FIX] Error implements the json.Marshaller interface +- [FIX] MIT license in every file + + +### Gin 1.0rc1 (May 22, 2015) + +- [PERFORMANCE] Zero allocation router +- [PERFORMANCE] Faster JSON, XML and text rendering +- [PERFORMANCE] Custom hand optimized HttpRouter for Gin +- [PERFORMANCE] Misc code optimizations. Inlining, tail call optimizations +- [NEW] Built-in support for golang.org/x/net/context +- [NEW] Any(path, handler). Create a route that matches any path +- [NEW] Refactored rendering pipeline (faster and static typeded) +- [NEW] Refactored errors API +- [NEW] IndentedJSON() prints pretty JSON +- [NEW] Added gin.DefaultWriter +- [NEW] UNIX socket support +- [NEW] RouterGroup.BasePath is exposed +- [NEW] JSON validation using go-validate-yourself (very powerful options) +- [NEW] Completed suite of unit tests +- [NEW] HTTP streaming with c.Stream() +- [NEW] StaticFile() creates a router for serving just one file. +- [NEW] StaticFS() has an option to disable directory listing. +- [NEW] StaticFS() for serving static files through virtual filesystems +- [NEW] Server-Sent Events native support +- [NEW] WrapF() and WrapH() helpers for wrapping http.HandlerFunc and http.Handler +- [NEW] Added LoggerWithWriter() middleware +- [NEW] Added RecoveryWithWriter() middleware +- [NEW] Added DefaultPostFormValue() +- [NEW] Added DefaultFormValue() +- [NEW] Added DefaultParamValue() +- [FIX] BasicAuth() when using custom realm +- [FIX] Bug when serving static files in nested routing group +- [FIX] Redirect using built-in http.Redirect() +- [FIX] Logger when printing the requested path +- [FIX] Documentation typos +- [FIX] Context.Engine renamed to Context.engine +- [FIX] Better debugging messages +- [FIX] ErrorLogger +- [FIX] Debug HTTP render +- [FIX] Refactored binding and render modules +- [FIX] Refactored Context initialization +- [FIX] Refactored BasicAuth() +- [FIX] NoMethod/NoRoute handlers +- [FIX] Hijacking http +- [FIX] Better support for Google App Engine (using log instead of fmt) + + +### Gin 0.6 (Mar 9, 2015) + +- [NEW] Support multipart/form-data +- [NEW] NoMethod handler +- [NEW] Validate sub structures +- [NEW] Support for HTTP Realm Auth +- [FIX] Unsigned integers in binding +- [FIX] Improve color logger + + +### Gin 0.5 (Feb 7, 2015) + +- [NEW] Content Negotiation +- [FIX] Solved security bug that allow a client to spoof ip +- [FIX] Fix unexported/ignored fields in binding + + +### Gin 0.4 (Aug 21, 2014) + +- [NEW] Development mode +- [NEW] Unit tests +- [NEW] Add Content.Redirect() +- [FIX] Deferring WriteHeader() +- [FIX] Improved documentation for model binding + + +### Gin 0.3 (Jul 18, 2014) + +- [PERFORMANCE] Normal log and error log are printed in the same call. +- [PERFORMANCE] Improve performance of NoRouter() +- [PERFORMANCE] Improve context's memory locality, reduce CPU cache faults. +- [NEW] Flexible rendering API +- [NEW] Add Context.File() +- [NEW] Add shorcut RunTLS() for http.ListenAndServeTLS +- [FIX] Rename NotFound404() to NoRoute() +- [FIX] Errors in context are purged +- [FIX] Adds HEAD method in Static file serving +- [FIX] Refactors Static() file serving +- [FIX] Using keyed initialization to fix app-engine integration +- [FIX] Can't unmarshal JSON array, #63 +- [FIX] Renaming Context.Req to Context.Request +- [FIX] Check application/x-www-form-urlencoded when parsing form + + +### Gin 0.2b (Jul 08, 2014) +- [PERFORMANCE] Using sync.Pool to allocatio/gc overhead +- [NEW] Travis CI integration +- [NEW] Completely new logger +- [NEW] New API for serving static files. gin.Static() +- [NEW] gin.H() can be serialized into XML +- [NEW] Typed errors. Errors can be typed. Internet/external/custom. +- [NEW] Support for Godeps +- [NEW] Travis/Godocs badges in README +- [NEW] New Bind() and BindWith() methods for parsing request body. +- [NEW] Add Content.Copy() +- [NEW] Add context.LastError() +- [NEW] Add shorcut for OPTIONS HTTP method +- [FIX] Tons of README fixes +- [FIX] Header is written before body +- [FIX] BasicAuth() and changes API a little bit +- [FIX] Recovery() middleware only prints panics +- [FIX] Context.Get() does not panic anymore. Use MustGet() instead. +- [FIX] Multiple http.WriteHeader() in NotFound handlers +- [FIX] Engine.Run() panics if http server can't be setted up +- [FIX] Crash when route path doesn't start with '/' +- [FIX] Do not update header when status code is negative +- [FIX] Setting response headers before calling WriteHeader in context.String() +- [FIX] Add MIT license +- [FIX] Changes behaviour of ErrorLogger() and Logger() diff --git a/backend/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md b/backend/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..4ea14f39 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at teamgingonic@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/backend/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md b/backend/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md new file mode 100644 index 00000000..547b777a --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md @@ -0,0 +1,13 @@ +## Contributing + +- With issues: + - Use the search tool before opening a new issue. + - Please provide source code and commit sha if you found a bug. + - Review existing issues and provide feedback or react to them. + +- With pull requests: + - Open your pull request against `master` + - Your pull request should have no more than two commits, if not you should squash them. + - It should pass all tests in the available continuous integrations systems such as TravisCI. + - You should add/modify tests to cover your proposed code changes. + - If your pull request contains a new feature, please document it on the README. diff --git a/backend/vendor/github.com/gin-gonic/gin/LICENSE b/backend/vendor/github.com/gin-gonic/gin/LICENSE new file mode 100644 index 00000000..1ff7f370 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Manuel Martínez-Almeida + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/backend/vendor/github.com/gin-gonic/gin/Makefile b/backend/vendor/github.com/gin-gonic/gin/Makefile new file mode 100644 index 00000000..51a6b916 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/Makefile @@ -0,0 +1,80 @@ +GO ?= go +GOFMT ?= gofmt "-s" +PACKAGES ?= $(shell $(GO) list ./... | grep -v /vendor/) +VETPACKAGES ?= $(shell $(GO) list ./... | grep -v /vendor/ | grep -v /examples/) +GOFILES := $(shell find . -name "*.go" -type f -not -path "./vendor/*") +TESTFOLDER := $(shell $(GO) list ./... | grep -E 'gin$$|binding$$|render$$' | grep -v examples) + +all: install + +install: deps + govendor sync + +.PHONY: test +test: + echo "mode: count" > coverage.out + for d in $(TESTFOLDER); do \ + $(GO) test -v -covermode=count -coverprofile=profile.out $$d > tmp.out; \ + cat tmp.out; \ + if grep -q "^--- FAIL" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + elif grep -q "build failed" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + elif grep -q "setup failed" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + fi; \ + if [ -f profile.out ]; then \ + cat profile.out | grep -v "mode:" >> coverage.out; \ + rm profile.out; \ + fi; \ + done + +.PHONY: fmt +fmt: + $(GOFMT) -w $(GOFILES) + +.PHONY: fmt-check +fmt-check: + @diff=$$($(GOFMT) -d $(GOFILES)); \ + if [ -n "$$diff" ]; then \ + echo "Please run 'make fmt' and commit the result:"; \ + echo "$${diff}"; \ + exit 1; \ + fi; + +vet: + $(GO) vet $(VETPACKAGES) + +deps: + @hash govendor > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/kardianos/govendor; \ + fi + +.PHONY: lint +lint: + @hash golint > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u golang.org/x/lint/golint; \ + fi + for PKG in $(PACKAGES); do golint -set_exit_status $$PKG || exit 1; done; + +.PHONY: misspell-check +misspell-check: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -error $(GOFILES) + +.PHONY: misspell +misspell: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -w $(GOFILES) + +.PHONY: tools +tools: + go install golang.org/x/lint/golint; \ + go install github.com/client9/misspell/cmd/misspell; diff --git a/backend/vendor/github.com/gin-gonic/gin/README.md b/backend/vendor/github.com/gin-gonic/gin/README.md new file mode 100644 index 00000000..3e817a78 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/README.md @@ -0,0 +1,2070 @@ +# Gin Web Framework + + + +[![Build Status](https://travis-ci.org/gin-gonic/gin.svg)](https://travis-ci.org/gin-gonic/gin) +[![codecov](https://codecov.io/gh/gin-gonic/gin/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-gonic/gin) +[![Go Report Card](https://goreportcard.com/badge/github.com/gin-gonic/gin)](https://goreportcard.com/report/github.com/gin-gonic/gin) +[![GoDoc](https://godoc.org/github.com/gin-gonic/gin?status.svg)](https://godoc.org/github.com/gin-gonic/gin) +[![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Sourcegraph](https://sourcegraph.com/github.com/gin-gonic/gin/-/badge.svg)](https://sourcegraph.com/github.com/gin-gonic/gin?badge) +[![Open Source Helpers](https://www.codetriage.com/gin-gonic/gin/badges/users.svg)](https://www.codetriage.com/gin-gonic/gin) +[![Release](https://img.shields.io/github/release/gin-gonic/gin.svg?style=flat-square)](https://github.com/gin-gonic/gin/releases) + +Gin is a web framework written in Go (Golang). It features a martini-like API with much better performance, up to 40 times faster thanks to [httprouter](https://github.com/julienschmidt/httprouter). If you need performance and good productivity, you will love Gin. + + +## Contents + +- [Installation](#installation) +- [Prerequisite](#prerequisite) +- [Quick start](#quick-start) +- [Benchmarks](#benchmarks) +- [Gin v1.stable](#gin-v1-stable) +- [Build with jsoniter](#build-with-jsoniter) +- [API Examples](#api-examples) + - [Using GET,POST,PUT,PATCH,DELETE and OPTIONS](#using-get-post-put-patch-delete-and-options) + - [Parameters in path](#parameters-in-path) + - [Querystring parameters](#querystring-parameters) + - [Multipart/Urlencoded Form](#multiparturlencoded-form) + - [Another example: query + post form](#another-example-query--post-form) + - [Map as querystring or postform parameters](#map-as-querystring-or-postform-parameters) + - [Upload files](#upload-files) + - [Grouping routes](#grouping-routes) + - [Blank Gin without middleware by default](#blank-gin-without-middleware-by-default) + - [Using middleware](#using-middleware) + - [How to write log file](#how-to-write-log-file) + - [Custom Log Format](#custom-log-format) + - [Model binding and validation](#model-binding-and-validation) + - [Custom Validators](#custom-validators) + - [Only Bind Query String](#only-bind-query-string) + - [Bind Query String or Post Data](#bind-query-string-or-post-data) + - [Bind Uri](#bind-uri) + - [Bind HTML checkboxes](#bind-html-checkboxes) + - [Multipart/Urlencoded binding](#multiparturlencoded-binding) + - [XML, JSON, YAML and ProtoBuf rendering](#xml-json-yaml-and-protobuf-rendering) + - [JSONP rendering](#jsonp) + - [Serving static files](#serving-static-files) + - [Serving data from reader](#serving-data-from-reader) + - [HTML rendering](#html-rendering) + - [Multitemplate](#multitemplate) + - [Redirects](#redirects) + - [Custom Middleware](#custom-middleware) + - [Using BasicAuth() middleware](#using-basicauth-middleware) + - [Goroutines inside a middleware](#goroutines-inside-a-middleware) + - [Custom HTTP configuration](#custom-http-configuration) + - [Support Let's Encrypt](#support-lets-encrypt) + - [Run multiple service using Gin](#run-multiple-service-using-gin) + - [Graceful restart or stop](#graceful-restart-or-stop) + - [Build a single binary with templates](#build-a-single-binary-with-templates) + - [Bind form-data request with custom struct](#bind-form-data-request-with-custom-struct) + - [Try to bind body into different structs](#try-to-bind-body-into-different-structs) + - [http2 server push](#http2-server-push) + - [Define format for the log of routes](#define-format-for-the-log-of-routes) + - [Set and get a cookie](#set-and-get-a-cookie) +- [Testing](#testing) +- [Users](#users) + +## Installation + +To install Gin package, you need to install Go and set your Go workspace first. + +1. The first need [Go](https://golang.org/) installed (**version 1.8+ is required**), then you can use the below Go command to install Gin. + +```sh +$ go get -u github.com/gin-gonic/gin +``` + +2. Import it in your code: + +```go +import "github.com/gin-gonic/gin" +``` + +3. (Optional) Import `net/http`. This is required for example if using constants such as `http.StatusOK`. + +```go +import "net/http" +``` + +### Use a vendor tool like [Govendor](https://github.com/kardianos/govendor) + +1. `go get` govendor + +```sh +$ go get github.com/kardianos/govendor +``` +2. Create your project folder and `cd` inside + +```sh +$ mkdir -p $GOPATH/src/github.com/myusername/project && cd "$_" +``` + +3. Vendor init your project and add gin + +```sh +$ govendor init +$ govendor fetch github.com/gin-gonic/gin@v1.3 +``` + +4. Copy a starting template inside your project + +```sh +$ curl https://raw.githubusercontent.com/gin-gonic/examples/master/basic/main.go > main.go +``` + +5. Run your project + +```sh +$ go run main.go +``` + +## Prerequisite + +Now Gin requires Go 1.6 or later and Go 1.7 will be required soon. + +## Quick start + +```sh +# assume the following codes in example.go file +$ cat example.go +``` + +```go +package main + +import "github.com/gin-gonic/gin" + +func main() { + r := gin.Default() + r.GET("/ping", func(c *gin.Context) { + c.JSON(200, gin.H{ + "message": "pong", + }) + }) + r.Run() // listen and serve on 0.0.0.0:8080 +} +``` + +``` +# run example.go and visit 0.0.0.0:8080/ping on browser +$ go run example.go +``` + +## Benchmarks + +Gin uses a custom version of [HttpRouter](https://github.com/julienschmidt/httprouter) + +[See all benchmarks](/BENCHMARKS.md) + +Benchmark name | (1) | (2) | (3) | (4) +--------------------------------------------|-----------:|------------:|-----------:|---------: +**BenchmarkGin_GithubAll** | **30000** | **48375** | **0** | **0** +BenchmarkAce_GithubAll | 10000 | 134059 | 13792 | 167 +BenchmarkBear_GithubAll | 5000 | 534445 | 86448 | 943 +BenchmarkBeego_GithubAll | 3000 | 592444 | 74705 | 812 +BenchmarkBone_GithubAll | 200 | 6957308 | 698784 | 8453 +BenchmarkDenco_GithubAll | 10000 | 158819 | 20224 | 167 +BenchmarkEcho_GithubAll | 10000 | 154700 | 6496 | 203 +BenchmarkGocraftWeb_GithubAll | 3000 | 570806 | 131656 | 1686 +BenchmarkGoji_GithubAll | 2000 | 818034 | 56112 | 334 +BenchmarkGojiv2_GithubAll | 2000 | 1213973 | 274768 | 3712 +BenchmarkGoJsonRest_GithubAll | 2000 | 785796 | 134371 | 2737 +BenchmarkGoRestful_GithubAll | 300 | 5238188 | 689672 | 4519 +BenchmarkGorillaMux_GithubAll | 100 | 10257726 | 211840 | 2272 +BenchmarkHttpRouter_GithubAll | 20000 | 105414 | 13792 | 167 +BenchmarkHttpTreeMux_GithubAll | 10000 | 319934 | 65856 | 671 +BenchmarkKocha_GithubAll | 10000 | 209442 | 23304 | 843 +BenchmarkLARS_GithubAll | 20000 | 62565 | 0 | 0 +BenchmarkMacaron_GithubAll | 2000 | 1161270 | 204194 | 2000 +BenchmarkMartini_GithubAll | 200 | 9991713 | 226549 | 2325 +BenchmarkPat_GithubAll | 200 | 5590793 | 1499568 | 27435 +BenchmarkPossum_GithubAll | 10000 | 319768 | 84448 | 609 +BenchmarkR2router_GithubAll | 10000 | 305134 | 77328 | 979 +BenchmarkRivet_GithubAll | 10000 | 132134 | 16272 | 167 +BenchmarkTango_GithubAll | 3000 | 552754 | 63826 | 1618 +BenchmarkTigerTonic_GithubAll | 1000 | 1439483 | 239104 | 5374 +BenchmarkTraffic_GithubAll | 100 | 11383067 | 2659329 | 21848 +BenchmarkVulcan_GithubAll | 5000 | 394253 | 19894 | 609 + +- (1): Total Repetitions achieved in constant time, higher means more confident result +- (2): Single Repetition Duration (ns/op), lower is better +- (3): Heap Memory (B/op), lower is better +- (4): Average Allocations per Repetition (allocs/op), lower is better + +## Gin v1. stable + +- [x] Zero allocation router. +- [x] Still the fastest http router and framework. From routing to writing. +- [x] Complete suite of unit tests +- [x] Battle tested +- [x] API frozen, new releases will not break your code. + +## Build with [jsoniter](https://github.com/json-iterator/go) + +Gin uses `encoding/json` as default json package but you can change to [jsoniter](https://github.com/json-iterator/go) by build from other tags. + +```sh +$ go build -tags=jsoniter . +``` + +## API Examples + +You can find a number of ready-to-run examples at [Gin examples repository](https://github.com/gin-gonic/examples). + +### Using GET, POST, PUT, PATCH, DELETE and OPTIONS + +```go +func main() { + // Creates a gin router with default middleware: + // logger and recovery (crash-free) middleware + router := gin.Default() + + router.GET("/someGet", getting) + router.POST("/somePost", posting) + router.PUT("/somePut", putting) + router.DELETE("/someDelete", deleting) + router.PATCH("/somePatch", patching) + router.HEAD("/someHead", head) + router.OPTIONS("/someOptions", options) + + // By default it serves on :8080 unless a + // PORT environment variable was defined. + router.Run() + // router.Run(":3000") for a hard coded port +} +``` + +### Parameters in path + +```go +func main() { + router := gin.Default() + + // This handler will match /user/john but will not match /user/ or /user + router.GET("/user/:name", func(c *gin.Context) { + name := c.Param("name") + c.String(http.StatusOK, "Hello %s", name) + }) + + // However, this one will match /user/john/ and also /user/john/send + // If no other routers match /user/john, it will redirect to /user/john/ + router.GET("/user/:name/*action", func(c *gin.Context) { + name := c.Param("name") + action := c.Param("action") + message := name + " is " + action + c.String(http.StatusOK, message) + }) + + router.Run(":8080") +} +``` + +### Querystring parameters + +```go +func main() { + router := gin.Default() + + // Query string parameters are parsed using the existing underlying request object. + // The request responds to a url matching: /welcome?firstname=Jane&lastname=Doe + router.GET("/welcome", func(c *gin.Context) { + firstname := c.DefaultQuery("firstname", "Guest") + lastname := c.Query("lastname") // shortcut for c.Request.URL.Query().Get("lastname") + + c.String(http.StatusOK, "Hello %s %s", firstname, lastname) + }) + router.Run(":8080") +} +``` + +### Multipart/Urlencoded Form + +```go +func main() { + router := gin.Default() + + router.POST("/form_post", func(c *gin.Context) { + message := c.PostForm("message") + nick := c.DefaultPostForm("nick", "anonymous") + + c.JSON(200, gin.H{ + "status": "posted", + "message": message, + "nick": nick, + }) + }) + router.Run(":8080") +} +``` + +### Another example: query + post form + +``` +POST /post?id=1234&page=1 HTTP/1.1 +Content-Type: application/x-www-form-urlencoded + +name=manu&message=this_is_great +``` + +```go +func main() { + router := gin.Default() + + router.POST("/post", func(c *gin.Context) { + + id := c.Query("id") + page := c.DefaultQuery("page", "0") + name := c.PostForm("name") + message := c.PostForm("message") + + fmt.Printf("id: %s; page: %s; name: %s; message: %s", id, page, name, message) + }) + router.Run(":8080") +} +``` + +``` +id: 1234; page: 1; name: manu; message: this_is_great +``` + +### Map as querystring or postform parameters + +``` +POST /post?ids[a]=1234&ids[b]=hello HTTP/1.1 +Content-Type: application/x-www-form-urlencoded + +names[first]=thinkerou&names[second]=tianou +``` + +```go +func main() { + router := gin.Default() + + router.POST("/post", func(c *gin.Context) { + + ids := c.QueryMap("ids") + names := c.PostFormMap("names") + + fmt.Printf("ids: %v; names: %v", ids, names) + }) + router.Run(":8080") +} +``` + +``` +ids: map[b:hello a:1234], names: map[second:tianou first:thinkerou] +``` + +### Upload files + +#### Single file + +References issue [#774](https://github.com/gin-gonic/gin/issues/774) and detail [example code](https://github.com/gin-gonic/examples/tree/master/upload-file/single). + +`file.Filename` **SHOULD NOT** be trusted. See [`Content-Disposition` on MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition#Directives) and [#1693](https://github.com/gin-gonic/gin/issues/1693) + +> The filename is always optional and must not be used blindly by the application: path information should be stripped, and conversion to the server file system rules should be done. + +```go +func main() { + router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + // router.MaxMultipartMemory = 8 << 20 // 8 MiB + router.POST("/upload", func(c *gin.Context) { + // single file + file, _ := c.FormFile("file") + log.Println(file.Filename) + + // Upload the file to specific dst. + // c.SaveUploadedFile(file, dst) + + c.String(http.StatusOK, fmt.Sprintf("'%s' uploaded!", file.Filename)) + }) + router.Run(":8080") +} +``` + +How to `curl`: + +```bash +curl -X POST http://localhost:8080/upload \ + -F "file=@/Users/appleboy/test.zip" \ + -H "Content-Type: multipart/form-data" +``` + +#### Multiple files + +See the detail [example code](https://github.com/gin-gonic/examples/tree/master/upload-file/multiple). + +```go +func main() { + router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + // router.MaxMultipartMemory = 8 << 20 // 8 MiB + router.POST("/upload", func(c *gin.Context) { + // Multipart form + form, _ := c.MultipartForm() + files := form.File["upload[]"] + + for _, file := range files { + log.Println(file.Filename) + + // Upload the file to specific dst. + // c.SaveUploadedFile(file, dst) + } + c.String(http.StatusOK, fmt.Sprintf("%d files uploaded!", len(files))) + }) + router.Run(":8080") +} +``` + +How to `curl`: + +```bash +curl -X POST http://localhost:8080/upload \ + -F "upload[]=@/Users/appleboy/test1.zip" \ + -F "upload[]=@/Users/appleboy/test2.zip" \ + -H "Content-Type: multipart/form-data" +``` + +### Grouping routes + +```go +func main() { + router := gin.Default() + + // Simple group: v1 + v1 := router.Group("/v1") + { + v1.POST("/login", loginEndpoint) + v1.POST("/submit", submitEndpoint) + v1.POST("/read", readEndpoint) + } + + // Simple group: v2 + v2 := router.Group("/v2") + { + v2.POST("/login", loginEndpoint) + v2.POST("/submit", submitEndpoint) + v2.POST("/read", readEndpoint) + } + + router.Run(":8080") +} +``` + +### Blank Gin without middleware by default + +Use + +```go +r := gin.New() +``` + +instead of + +```go +// Default With the Logger and Recovery middleware already attached +r := gin.Default() +``` + + +### Using middleware +```go +func main() { + // Creates a router without any middleware by default + r := gin.New() + + // Global middleware + // Logger middleware will write the logs to gin.DefaultWriter even if you set with GIN_MODE=release. + // By default gin.DefaultWriter = os.Stdout + r.Use(gin.Logger()) + + // Recovery middleware recovers from any panics and writes a 500 if there was one. + r.Use(gin.Recovery()) + + // Per route middleware, you can add as many as you desire. + r.GET("/benchmark", MyBenchLogger(), benchEndpoint) + + // Authorization group + // authorized := r.Group("/", AuthRequired()) + // exactly the same as: + authorized := r.Group("/") + // per group middleware! in this case we use the custom created + // AuthRequired() middleware just in the "authorized" group. + authorized.Use(AuthRequired()) + { + authorized.POST("/login", loginEndpoint) + authorized.POST("/submit", submitEndpoint) + authorized.POST("/read", readEndpoint) + + // nested group + testing := authorized.Group("testing") + testing.GET("/analytics", analyticsEndpoint) + } + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### How to write log file +```go +func main() { + // Disable Console Color, you don't need console color when writing the logs to file. + gin.DisableConsoleColor() + + // Logging to a file. + f, _ := os.Create("gin.log") + gin.DefaultWriter = io.MultiWriter(f) + + // Use the following code if you need to write the logs to file and console at the same time. + // gin.DefaultWriter = io.MultiWriter(f, os.Stdout) + + router := gin.Default() + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + +    router.Run(":8080") +} +``` + +### Custom Log Format +```go +func main() { + router := gin.New() + + // LoggerWithFormatter middleware will write the logs to gin.DefaultWriter + // By default gin.DefaultWriter = os.Stdout + router.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string { + + // your custom format + return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s \"%s\" %s\"\n", + param.ClientIP, + param.TimeStamp.Format(time.RFC1123), + param.Method, + param.Path, + param.Request.Proto, + param.StatusCode, + param.Latency, + param.Request.UserAgent(), + param.ErrorMessage, + ) + })) + router.Use(gin.Recovery()) + + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + router.Run(":8080") +} +``` + +**Sample Output** +``` +::1 - [Fri, 07 Dec 2018 17:04:38 JST] "GET /ping HTTP/1.1 200 122.767µs "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36" " +``` + +### Controlling Log output coloring + +By default, logs output on console should be colorized depending on the detected TTY. + +Never colorize logs: + +```go +func main() { + // Disable log's color + gin.DisableConsoleColor() + + // Creates a gin router with default middleware: + // logger and recovery (crash-free) middleware + router := gin.Default() + + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + router.Run(":8080") +} +``` + +Always colorize logs: + +```go +func main() { + // Force log's color + gin.ForceConsoleColor() + + // Creates a gin router with default middleware: + // logger and recovery (crash-free) middleware + router := gin.Default() + + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + router.Run(":8080") +} +``` + +### Model binding and validation + +To bind a request body into a type, use model binding. We currently support binding of JSON, XML, YAML and standard form values (foo=bar&boo=baz). + +Gin uses [**go-playground/validator.v8**](https://github.com/go-playground/validator) for validation. Check the full docs on tags usage [here](http://godoc.org/gopkg.in/go-playground/validator.v8#hdr-Baked_In_Validators_and_Tags). + +Note that you need to set the corresponding binding tag on all fields you want to bind. For example, when binding from JSON, set `json:"fieldname"`. + +Also, Gin provides two sets of methods for binding: +- **Type** - Must bind + - **Methods** - `Bind`, `BindJSON`, `BindXML`, `BindQuery`, `BindYAML` + - **Behavior** - These methods use `MustBindWith` under the hood. If there is a binding error, the request is aborted with `c.AbortWithError(400, err).SetType(ErrorTypeBind)`. This sets the response status code to 400 and the `Content-Type` header is set to `text/plain; charset=utf-8`. Note that if you try to set the response code after this, it will result in a warning `[GIN-debug] [WARNING] Headers were already written. Wanted to override status code 400 with 422`. If you wish to have greater control over the behavior, consider using the `ShouldBind` equivalent method. +- **Type** - Should bind + - **Methods** - `ShouldBind`, `ShouldBindJSON`, `ShouldBindXML`, `ShouldBindQuery`, `ShouldBindYAML` + - **Behavior** - These methods use `ShouldBindWith` under the hood. If there is a binding error, the error is returned and it is the developer's responsibility to handle the request and error appropriately. + +When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use `MustBindWith` or `ShouldBindWith`. + +You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, an error will be returned. + +```go +// Binding from JSON +type Login struct { + User string `form:"user" json:"user" xml:"user" binding:"required"` + Password string `form:"password" json:"password" xml:"password" binding:"required"` +} + +func main() { + router := gin.Default() + + // Example for binding JSON ({"user": "manu", "password": "123"}) + router.POST("/loginJSON", func(c *gin.Context) { + var json Login + if err := c.ShouldBindJSON(&json); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if json.User != "manu" || json.Password != "123" { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) + }) + + // Example for binding XML ( + // + // + // user + // 123 + // ) + router.POST("/loginXML", func(c *gin.Context) { + var xml Login + if err := c.ShouldBindXML(&xml); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if xml.User != "manu" || xml.Password != "123" { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) + }) + + // Example for binding a HTML form (user=manu&password=123) + router.POST("/loginForm", func(c *gin.Context) { + var form Login + // This will infer what binder to use depending on the content-type header. + if err := c.ShouldBind(&form); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if form.User != "manu" || form.Password != "123" { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) + }) + + // Listen and serve on 0.0.0.0:8080 + router.Run(":8080") +} +``` + +**Sample request** +```shell +$ curl -v -X POST \ + http://localhost:8080/loginJSON \ + -H 'content-type: application/json' \ + -d '{ "user": "manu" }' +> POST /loginJSON HTTP/1.1 +> Host: localhost:8080 +> User-Agent: curl/7.51.0 +> Accept: */* +> content-type: application/json +> Content-Length: 18 +> +* upload completely sent off: 18 out of 18 bytes +< HTTP/1.1 400 Bad Request +< Content-Type: application/json; charset=utf-8 +< Date: Fri, 04 Aug 2017 03:51:31 GMT +< Content-Length: 100 +< +{"error":"Key: 'Login.Password' Error:Field validation for 'Password' failed on the 'required' tag"} +``` + +**Skip validate** + +When running the above example using the above the `curl` command, it returns error. Because the example use `binding:"required"` for `Password`. If use `binding:"-"` for `Password`, then it will not return error when running the above example again. + +### Custom Validators + +It is also possible to register custom validators. See the [example code](https://github.com/gin-gonic/examples/tree/master/custom-validation/server.go). + +```go +package main + +import ( + "net/http" + "reflect" + "time" + + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/binding" + "gopkg.in/go-playground/validator.v8" +) + +// Booking contains binded and validated data. +type Booking struct { + CheckIn time.Time `form:"check_in" binding:"required,bookabledate" time_format:"2006-01-02"` + CheckOut time.Time `form:"check_out" binding:"required,gtfield=CheckIn" time_format:"2006-01-02"` +} + +func bookableDate( + v *validator.Validate, topStruct reflect.Value, currentStructOrField reflect.Value, + field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string, +) bool { + if date, ok := field.Interface().(time.Time); ok { + today := time.Now() + if today.Year() > date.Year() || today.YearDay() > date.YearDay() { + return false + } + } + return true +} + +func main() { + route := gin.Default() + + if v, ok := binding.Validator.Engine().(*validator.Validate); ok { + v.RegisterValidation("bookabledate", bookableDate) + } + + route.GET("/bookable", getBookable) + route.Run(":8085") +} + +func getBookable(c *gin.Context) { + var b Booking + if err := c.ShouldBindWith(&b, binding.Query); err == nil { + c.JSON(http.StatusOK, gin.H{"message": "Booking dates are valid!"}) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} +``` + +```console +$ curl "localhost:8085/bookable?check_in=2018-04-16&check_out=2018-04-17" +{"message":"Booking dates are valid!"} + +$ curl "localhost:8085/bookable?check_in=2018-03-08&check_out=2018-03-09" +{"error":"Key: 'Booking.CheckIn' Error:Field validation for 'CheckIn' failed on the 'bookabledate' tag"} +``` + +[Struct level validations](https://github.com/go-playground/validator/releases/tag/v8.7) can also be registered this way. +See the [struct-lvl-validation example](https://github.com/gin-gonic/examples/tree/master/struct-lvl-validations) to learn more. + +### Only Bind Query String + +`ShouldBindQuery` function only binds the query params and not the post data. See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-315953017). + +```go +package main + +import ( + "log" + + "github.com/gin-gonic/gin" +) + +type Person struct { + Name string `form:"name"` + Address string `form:"address"` +} + +func main() { + route := gin.Default() + route.Any("/testing", startPage) + route.Run(":8085") +} + +func startPage(c *gin.Context) { + var person Person + if c.ShouldBindQuery(&person) == nil { + log.Println("====== Only Bind By Query String ======") + log.Println(person.Name) + log.Println(person.Address) + } + c.String(200, "Success") +} + +``` + +### Bind Query String or Post Data + +See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-264681292). + +```go +package main + +import ( + "log" + "time" + + "github.com/gin-gonic/gin" +) + +type Person struct { + Name string `form:"name"` + Address string `form:"address"` + Birthday time.Time `form:"birthday" time_format:"2006-01-02" time_utc:"1"` +} + +func main() { + route := gin.Default() + route.GET("/testing", startPage) + route.Run(":8085") +} + +func startPage(c *gin.Context) { + var person Person + // If `GET`, only `Form` binding engine (`query`) used. + // If `POST`, first checks the `content-type` for `JSON` or `XML`, then uses `Form` (`form-data`). + // See more at https://github.com/gin-gonic/gin/blob/master/binding/binding.go#L48 + if c.ShouldBind(&person) == nil { + log.Println(person.Name) + log.Println(person.Address) + log.Println(person.Birthday) + } + + c.String(200, "Success") +} +``` + +Test it with: +```sh +$ curl -X GET "localhost:8085/testing?name=appleboy&address=xyz&birthday=1992-03-15" +``` + +### Bind Uri + +See the [detail information](https://github.com/gin-gonic/gin/issues/846). + +```go +package main + +import "github.com/gin-gonic/gin" + +type Person struct { + ID string `uri:"id" binding:"required,uuid"` + Name string `uri:"name" binding:"required"` +} + +func main() { + route := gin.Default() + route.GET("/:name/:id", func(c *gin.Context) { + var person Person + if err := c.ShouldBindUri(&person); err != nil { + c.JSON(400, gin.H{"msg": err}) + return + } + c.JSON(200, gin.H{"name": person.Name, "uuid": person.ID}) + }) + route.Run(":8088") +} +``` + +Test it with: +```sh +$ curl -v localhost:8088/thinkerou/987fbc97-4bed-5078-9f07-9141ba07c9f3 +$ curl -v localhost:8088/thinkerou/not-uuid +``` + +### Bind HTML checkboxes + +See the [detail information](https://github.com/gin-gonic/gin/issues/129#issuecomment-124260092) + +main.go + +```go +... + +type myForm struct { + Colors []string `form:"colors[]"` +} + +... + +func formHandler(c *gin.Context) { + var fakeForm myForm + c.ShouldBind(&fakeForm) + c.JSON(200, gin.H{"color": fakeForm.Colors}) +} + +... + +``` + +form.html + +```html +
+

Check some colors

+ + + + + + + +
+``` + +result: + +``` +{"color":["red","green","blue"]} +``` + +### Multipart/Urlencoded binding + +```go +package main + +import ( + "github.com/gin-gonic/gin" +) + +type LoginForm struct { + User string `form:"user" binding:"required"` + Password string `form:"password" binding:"required"` +} + +func main() { + router := gin.Default() + router.POST("/login", func(c *gin.Context) { + // you can bind multipart form with explicit binding declaration: + // c.ShouldBindWith(&form, binding.Form) + // or you can simply use autobinding with ShouldBind method: + var form LoginForm + // in this case proper binding will be automatically selected + if c.ShouldBind(&form) == nil { + if form.User == "user" && form.Password == "password" { + c.JSON(200, gin.H{"status": "you are logged in"}) + } else { + c.JSON(401, gin.H{"status": "unauthorized"}) + } + } + }) + router.Run(":8080") +} +``` + +Test it with: +```sh +$ curl -v --form user=user --form password=password http://localhost:8080/login +``` + +### XML, JSON, YAML and ProtoBuf rendering + +```go +func main() { + r := gin.Default() + + // gin.H is a shortcut for map[string]interface{} + r.GET("/someJSON", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) + }) + + r.GET("/moreJSON", func(c *gin.Context) { + // You also can use a struct + var msg struct { + Name string `json:"user"` + Message string + Number int + } + msg.Name = "Lena" + msg.Message = "hey" + msg.Number = 123 + // Note that msg.Name becomes "user" in the JSON + // Will output : {"user": "Lena", "Message": "hey", "Number": 123} + c.JSON(http.StatusOK, msg) + }) + + r.GET("/someXML", func(c *gin.Context) { + c.XML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) + }) + + r.GET("/someYAML", func(c *gin.Context) { + c.YAML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) + }) + + r.GET("/someProtoBuf", func(c *gin.Context) { + reps := []int64{int64(1), int64(2)} + label := "test" + // The specific definition of protobuf is written in the testdata/protoexample file. + data := &protoexample.Test{ + Label: &label, + Reps: reps, + } + // Note that data becomes binary data in the response + // Will output protoexample.Test protobuf serialized data + c.ProtoBuf(http.StatusOK, data) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +#### SecureJSON + +Using SecureJSON to prevent json hijacking. Default prepends `"while(1),"` to response body if the given struct is array values. + +```go +func main() { + r := gin.Default() + + // You can also use your own secure json prefix + // r.SecureJsonPrefix(")]}',\n") + + r.GET("/someJSON", func(c *gin.Context) { + names := []string{"lena", "austin", "foo"} + + // Will output : while(1);["lena","austin","foo"] + c.SecureJSON(http.StatusOK, names) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` +#### JSONP + +Using JSONP to request data from a server in a different domain. Add callback to response body if the query parameter callback exists. + +```go +func main() { + r := gin.Default() + + r.GET("/JSONP?callback=x", func(c *gin.Context) { + data := map[string]interface{}{ + "foo": "bar", + } + + //callback is x + // Will output : x({\"foo\":\"bar\"}) + c.JSONP(http.StatusOK, data) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +#### AsciiJSON + +Using AsciiJSON to Generates ASCII-only JSON with escaped non-ASCII chracters. + +```go +func main() { + r := gin.Default() + + r.GET("/someJSON", func(c *gin.Context) { + data := map[string]interface{}{ + "lang": "GO语言", + "tag": "
", + } + + // will output : {"lang":"GO\u8bed\u8a00","tag":"\u003cbr\u003e"} + c.AsciiJSON(http.StatusOK, data) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +#### PureJSON + +Normally, JSON replaces special HTML characters with their unicode entities, e.g. `<` becomes `\u003c`. If you want to encode such characters literally, you can use PureJSON instead. +This feature is unavailable in Go 1.6 and lower. + +```go +func main() { + r := gin.Default() + + // Serves unicode entities + r.GET("/json", func(c *gin.Context) { + c.JSON(200, gin.H{ + "html": "Hello, world!", + }) + }) + + // Serves literal characters + r.GET("/purejson", func(c *gin.Context) { + c.PureJSON(200, gin.H{ + "html": "Hello, world!", + }) + }) + + // listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Serving static files + +```go +func main() { + router := gin.Default() + router.Static("/assets", "./assets") + router.StaticFS("/more_static", http.Dir("my_file_system")) + router.StaticFile("/favicon.ico", "./resources/favicon.ico") + + // Listen and serve on 0.0.0.0:8080 + router.Run(":8080") +} +``` + +### Serving data from reader + +```go +func main() { + router := gin.Default() + router.GET("/someDataFromReader", func(c *gin.Context) { + response, err := http.Get("https://raw.githubusercontent.com/gin-gonic/logo/master/color.png") + if err != nil || response.StatusCode != http.StatusOK { + c.Status(http.StatusServiceUnavailable) + return + } + + reader := response.Body + contentLength := response.ContentLength + contentType := response.Header.Get("Content-Type") + + extraHeaders := map[string]string{ + "Content-Disposition": `attachment; filename="gopher.png"`, + } + + c.DataFromReader(http.StatusOK, contentLength, contentType, reader, extraHeaders) + }) + router.Run(":8080") +} +``` + +### HTML rendering + +Using LoadHTMLGlob() or LoadHTMLFiles() + +```go +func main() { + router := gin.Default() + router.LoadHTMLGlob("templates/*") + //router.LoadHTMLFiles("templates/template1.html", "templates/template2.html") + router.GET("/index", func(c *gin.Context) { + c.HTML(http.StatusOK, "index.tmpl", gin.H{ + "title": "Main website", + }) + }) + router.Run(":8080") +} +``` + +templates/index.tmpl + +```html + +

+ {{ .title }} +

+ +``` + +Using templates with same name in different directories + +```go +func main() { + router := gin.Default() + router.LoadHTMLGlob("templates/**/*") + router.GET("/posts/index", func(c *gin.Context) { + c.HTML(http.StatusOK, "posts/index.tmpl", gin.H{ + "title": "Posts", + }) + }) + router.GET("/users/index", func(c *gin.Context) { + c.HTML(http.StatusOK, "users/index.tmpl", gin.H{ + "title": "Users", + }) + }) + router.Run(":8080") +} +``` + +templates/posts/index.tmpl + +```html +{{ define "posts/index.tmpl" }} +

+ {{ .title }} +

+

Using posts/index.tmpl

+ +{{ end }} +``` + +templates/users/index.tmpl + +```html +{{ define "users/index.tmpl" }} +

+ {{ .title }} +

+

Using users/index.tmpl

+ +{{ end }} +``` + +#### Custom Template renderer + +You can also use your own html template render + +```go +import "html/template" + +func main() { + router := gin.Default() + html := template.Must(template.ParseFiles("file1", "file2")) + router.SetHTMLTemplate(html) + router.Run(":8080") +} +``` + +#### Custom Delimiters + +You may use custom delims + +```go + r := gin.Default() + r.Delims("{[{", "}]}") + r.LoadHTMLGlob("/path/to/templates") +``` + +#### Custom Template Funcs + +See the detail [example code](https://github.com/gin-gonic/examples/tree/master/template). + +main.go + +```go +import ( + "fmt" + "html/template" + "net/http" + "time" + + "github.com/gin-gonic/gin" +) + +func formatAsDate(t time.Time) string { + year, month, day := t.Date() + return fmt.Sprintf("%d%02d/%02d", year, month, day) +} + +func main() { + router := gin.Default() + router.Delims("{[{", "}]}") + router.SetFuncMap(template.FuncMap{ + "formatAsDate": formatAsDate, + }) + router.LoadHTMLFiles("./testdata/template/raw.tmpl") + + router.GET("/raw", func(c *gin.Context) { + c.HTML(http.StatusOK, "raw.tmpl", map[string]interface{}{ + "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC), + }) + }) + + router.Run(":8080") +} + +``` + +raw.tmpl + +```html +Date: {[{.now | formatAsDate}]} +``` + +Result: +``` +Date: 2017/07/01 +``` + +### Multitemplate + +Gin allow by default use only one html.Template. Check [a multitemplate render](https://github.com/gin-contrib/multitemplate) for using features like go 1.6 `block template`. + +### Redirects + +Issuing a HTTP redirect is easy. Both internal and external locations are supported. + +```go +r.GET("/test", func(c *gin.Context) { + c.Redirect(http.StatusMovedPermanently, "http://www.google.com/") +}) +``` + + +Issuing a Router redirect, use `HandleContext` like below. + +``` go +r.GET("/test", func(c *gin.Context) { + c.Request.URL.Path = "/test2" + r.HandleContext(c) +}) +r.GET("/test2", func(c *gin.Context) { + c.JSON(200, gin.H{"hello": "world"}) +}) +``` + + +### Custom Middleware + +```go +func Logger() gin.HandlerFunc { + return func(c *gin.Context) { + t := time.Now() + + // Set example variable + c.Set("example", "12345") + + // before request + + c.Next() + + // after request + latency := time.Since(t) + log.Print(latency) + + // access the status we are sending + status := c.Writer.Status() + log.Println(status) + } +} + +func main() { + r := gin.New() + r.Use(Logger()) + + r.GET("/test", func(c *gin.Context) { + example := c.MustGet("example").(string) + + // it would print: "12345" + log.Println(example) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Using BasicAuth() middleware + +```go +// simulate some private data +var secrets = gin.H{ + "foo": gin.H{"email": "foo@bar.com", "phone": "123433"}, + "austin": gin.H{"email": "austin@example.com", "phone": "666"}, + "lena": gin.H{"email": "lena@guapa.com", "phone": "523443"}, +} + +func main() { + r := gin.Default() + + // Group using gin.BasicAuth() middleware + // gin.Accounts is a shortcut for map[string]string + authorized := r.Group("/admin", gin.BasicAuth(gin.Accounts{ + "foo": "bar", + "austin": "1234", + "lena": "hello2", + "manu": "4321", + })) + + // /admin/secrets endpoint + // hit "localhost:8080/admin/secrets + authorized.GET("/secrets", func(c *gin.Context) { + // get user, it was set by the BasicAuth middleware + user := c.MustGet(gin.AuthUserKey).(string) + if secret, ok := secrets[user]; ok { + c.JSON(http.StatusOK, gin.H{"user": user, "secret": secret}) + } else { + c.JSON(http.StatusOK, gin.H{"user": user, "secret": "NO SECRET :("}) + } + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Goroutines inside a middleware + +When starting new Goroutines inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy. + +```go +func main() { + r := gin.Default() + + r.GET("/long_async", func(c *gin.Context) { + // create copy to be used inside the goroutine + cCp := c.Copy() + go func() { + // simulate a long task with time.Sleep(). 5 seconds + time.Sleep(5 * time.Second) + + // note that you are using the copied context "cCp", IMPORTANT + log.Println("Done! in path " + cCp.Request.URL.Path) + }() + }) + + r.GET("/long_sync", func(c *gin.Context) { + // simulate a long task with time.Sleep(). 5 seconds + time.Sleep(5 * time.Second) + + // since we are NOT using a goroutine, we do not have to copy the context + log.Println("Done! in path " + c.Request.URL.Path) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Custom HTTP configuration + +Use `http.ListenAndServe()` directly, like this: + +```go +func main() { + router := gin.Default() + http.ListenAndServe(":8080", router) +} +``` +or + +```go +func main() { + router := gin.Default() + + s := &http.Server{ + Addr: ":8080", + Handler: router, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxHeaderBytes: 1 << 20, + } + s.ListenAndServe() +} +``` + +### Support Let's Encrypt + +example for 1-line LetsEncrypt HTTPS servers. + +```go +package main + +import ( + "log" + + "github.com/gin-gonic/autotls" + "github.com/gin-gonic/gin" +) + +func main() { + r := gin.Default() + + // Ping handler + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + log.Fatal(autotls.Run(r, "example1.com", "example2.com")) +} +``` + +example for custom autocert manager. + +```go +package main + +import ( + "log" + + "github.com/gin-gonic/autotls" + "github.com/gin-gonic/gin" + "golang.org/x/crypto/acme/autocert" +) + +func main() { + r := gin.Default() + + // Ping handler + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + m := autocert.Manager{ + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("example1.com", "example2.com"), + Cache: autocert.DirCache("/var/www/.cache"), + } + + log.Fatal(autotls.RunWithManager(r, &m)) +} +``` + +### Run multiple service using Gin + +See the [question](https://github.com/gin-gonic/gin/issues/346) and try the following example: + +```go +package main + +import ( + "log" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "golang.org/x/sync/errgroup" +) + +var ( + g errgroup.Group +) + +func router01() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 01", + }, + ) + }) + + return e +} + +func router02() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 02", + }, + ) + }) + + return e +} + +func main() { + server01 := &http.Server{ + Addr: ":8080", + Handler: router01(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + server02 := &http.Server{ + Addr: ":8081", + Handler: router02(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + g.Go(func() error { + return server01.ListenAndServe() + }) + + g.Go(func() error { + return server02.ListenAndServe() + }) + + if err := g.Wait(); err != nil { + log.Fatal(err) + } +} +``` + +### Graceful restart or stop + +Do you want to graceful restart or stop your web server? +There are some ways this can be done. + +We can use [fvbock/endless](https://github.com/fvbock/endless) to replace the default `ListenAndServe`. Refer issue [#296](https://github.com/gin-gonic/gin/issues/296) for more details. + +```go +router := gin.Default() +router.GET("/", handler) +// [...] +endless.ListenAndServe(":4242", router) +``` + +An alternative to endless: + +* [manners](https://github.com/braintree/manners): A polite Go HTTP server that shuts down gracefully. +* [graceful](https://github.com/tylerb/graceful): Graceful is a Go package enabling graceful shutdown of an http.Handler server. +* [grace](https://github.com/facebookgo/grace): Graceful restart & zero downtime deploy for Go servers. + +If you are using Go 1.8, you may not need to use this library! Consider using http.Server's built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown) method for graceful shutdowns. See the full [graceful-shutdown](https://github.com/gin-gonic/examples/tree/master/graceful-shutdown) example with gin. + +```go +// +build go1.8 + +package main + +import ( + "context" + "log" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() + router.GET("/", func(c *gin.Context) { + time.Sleep(5 * time.Second) + c.String(http.StatusOK, "Welcome Gin Server") + }) + + srv := &http.Server{ + Addr: ":8080", + Handler: router, + } + + go func() { + // service connections + if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { + log.Fatalf("listen: %s\n", err) + } + }() + + // Wait for interrupt signal to gracefully shutdown the server with + // a timeout of 5 seconds. + quit := make(chan os.Signal) + // kill (no param) default send syscall.SIGTERM + // kill -2 is syscall.SIGINT + // kill -9 is syscall.SIGKILL but can"t be catch, so don't need add it + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) + <-quit + log.Println("Shutdown Server ...") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := srv.Shutdown(ctx); err != nil { + log.Fatal("Server Shutdown:", err) + } + // catching ctx.Done(). timeout of 5 seconds. + select { + case <-ctx.Done(): + log.Println("timeout of 5 seconds.") + } + log.Println("Server exiting") +} +``` + +### Build a single binary with templates + +You can build a server into a single binary containing templates by using [go-assets][]. + +[go-assets]: https://github.com/jessevdk/go-assets + +```go +func main() { + r := gin.New() + + t, err := loadTemplate() + if err != nil { + panic(err) + } + r.SetHTMLTemplate(t) + + r.GET("/", func(c *gin.Context) { + c.HTML(http.StatusOK, "/html/index.tmpl",nil) + }) + r.Run(":8080") +} + +// loadTemplate loads templates embedded by go-assets-builder +func loadTemplate() (*template.Template, error) { + t := template.New("") + for name, file := range Assets.Files { + if file.IsDir() || !strings.HasSuffix(name, ".tmpl") { + continue + } + h, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + t, err = t.New(name).Parse(string(h)) + if err != nil { + return nil, err + } + } + return t, nil +} +``` + +See a complete example in the `https://github.com/gin-gonic/examples/tree/master/assets-in-binary` directory. + +### Bind form-data request with custom struct + +The follow example using custom struct: + +```go +type StructA struct { + FieldA string `form:"field_a"` +} + +type StructB struct { + NestedStruct StructA + FieldB string `form:"field_b"` +} + +type StructC struct { + NestedStructPointer *StructA + FieldC string `form:"field_c"` +} + +type StructD struct { + NestedAnonyStruct struct { + FieldX string `form:"field_x"` + } + FieldD string `form:"field_d"` +} + +func GetDataB(c *gin.Context) { + var b StructB + c.Bind(&b) + c.JSON(200, gin.H{ + "a": b.NestedStruct, + "b": b.FieldB, + }) +} + +func GetDataC(c *gin.Context) { + var b StructC + c.Bind(&b) + c.JSON(200, gin.H{ + "a": b.NestedStructPointer, + "c": b.FieldC, + }) +} + +func GetDataD(c *gin.Context) { + var b StructD + c.Bind(&b) + c.JSON(200, gin.H{ + "x": b.NestedAnonyStruct, + "d": b.FieldD, + }) +} + +func main() { + r := gin.Default() + r.GET("/getb", GetDataB) + r.GET("/getc", GetDataC) + r.GET("/getd", GetDataD) + + r.Run() +} +``` + +Using the command `curl` command result: + +``` +$ curl "http://localhost:8080/getb?field_a=hello&field_b=world" +{"a":{"FieldA":"hello"},"b":"world"} +$ curl "http://localhost:8080/getc?field_a=hello&field_c=world" +{"a":{"FieldA":"hello"},"c":"world"} +$ curl "http://localhost:8080/getd?field_x=hello&field_d=world" +{"d":"world","x":{"FieldX":"hello"}} +``` + +### Try to bind body into different structs + +The normal methods for binding request body consumes `c.Request.Body` and they +cannot be called multiple times. + +```go +type formA struct { + Foo string `json:"foo" xml:"foo" binding:"required"` +} + +type formB struct { + Bar string `json:"bar" xml:"bar" binding:"required"` +} + +func SomeHandler(c *gin.Context) { + objA := formA{} + objB := formB{} + // This c.ShouldBind consumes c.Request.Body and it cannot be reused. + if errA := c.ShouldBind(&objA); errA == nil { + c.String(http.StatusOK, `the body should be formA`) + // Always an error is occurred by this because c.Request.Body is EOF now. + } else if errB := c.ShouldBind(&objB); errB == nil { + c.String(http.StatusOK, `the body should be formB`) + } else { + ... + } +} +``` + +For this, you can use `c.ShouldBindBodyWith`. + +```go +func SomeHandler(c *gin.Context) { + objA := formA{} + objB := formB{} + // This reads c.Request.Body and stores the result into the context. + if errA := c.ShouldBindBodyWith(&objA, binding.JSON); errA == nil { + c.String(http.StatusOK, `the body should be formA`) + // At this time, it reuses body stored in the context. + } else if errB := c.ShouldBindBodyWith(&objB, binding.JSON); errB == nil { + c.String(http.StatusOK, `the body should be formB JSON`) + // And it can accepts other formats + } else if errB2 := c.ShouldBindBodyWith(&objB, binding.XML); errB2 == nil { + c.String(http.StatusOK, `the body should be formB XML`) + } else { + ... + } +} +``` + +* `c.ShouldBindBodyWith` stores body into the context before binding. This has +a slight impact to performance, so you should not use this method if you are +enough to call binding at once. +* This feature is only needed for some formats -- `JSON`, `XML`, `MsgPack`, +`ProtoBuf`. For other formats, `Query`, `Form`, `FormPost`, `FormMultipart`, +can be called by `c.ShouldBind()` multiple times without any damage to +performance (See [#1341](https://github.com/gin-gonic/gin/pull/1341)). + +### http2 server push + +http.Pusher is supported only **go1.8+**. See the [golang blog](https://blog.golang.org/h2push) for detail information. + +```go +package main + +import ( + "html/template" + "log" + + "github.com/gin-gonic/gin" +) + +var html = template.Must(template.New("https").Parse(` + + + Https Test + + + +

Welcome, Ginner!

+ + +`)) + +func main() { + r := gin.Default() + r.Static("/assets", "./assets") + r.SetHTMLTemplate(html) + + r.GET("/", func(c *gin.Context) { + if pusher := c.Writer.Pusher(); pusher != nil { + // use pusher.Push() to do server push + if err := pusher.Push("/assets/app.js", nil); err != nil { + log.Printf("Failed to push: %v", err) + } + } + c.HTML(200, "https", gin.H{ + "status": "success", + }) + }) + + // Listen and Server in https://127.0.0.1:8080 + r.RunTLS(":8080", "./testdata/server.pem", "./testdata/server.key") +} +``` + +### Define format for the log of routes + +The default log of routes is: +``` +[GIN-debug] POST /foo --> main.main.func1 (3 handlers) +[GIN-debug] GET /bar --> main.main.func2 (3 handlers) +[GIN-debug] GET /status --> main.main.func3 (3 handlers) +``` + +If you want to log this information in given format (e.g. JSON, key values or something else), then you can define this format with `gin.DebugPrintRouteFunc`. +In the example below, we log all routes with standard log package but you can use another log tools that suits of your needs. +```go +import ( + "log" + "net/http" + + "github.com/gin-gonic/gin" +) + +func main() { + r := gin.Default() + gin.DebugPrintRouteFunc = func(httpMethod, absolutePath, handlerName string, nuHandlers int) { + log.Printf("endpoint %v %v %v %v\n", httpMethod, absolutePath, handlerName, nuHandlers) + } + + r.POST("/foo", func(c *gin.Context) { + c.JSON(http.StatusOK, "foo") + }) + + r.GET("/bar", func(c *gin.Context) { + c.JSON(http.StatusOK, "bar") + }) + + r.GET("/status", func(c *gin.Context) { + c.JSON(http.StatusOK, "ok") + }) + + // Listen and Server in http://0.0.0.0:8080 + r.Run() +} +``` + +### Set and get a cookie + +```go +import ( + "fmt" + + "github.com/gin-gonic/gin" +) + +func main() { + + router := gin.Default() + + router.GET("/cookie", func(c *gin.Context) { + + cookie, err := c.Cookie("gin_cookie") + + if err != nil { + cookie = "NotSet" + c.SetCookie("gin_cookie", "test", 3600, "/", "localhost", false, true) + } + + fmt.Printf("Cookie value: %s \n", cookie) + }) + + router.Run() +} +``` + + +## Testing + +The `net/http/httptest` package is preferable way for HTTP testing. + +```go +package main + +func setupRouter() *gin.Engine { + r := gin.Default() + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + return r +} + +func main() { + r := setupRouter() + r.Run(":8080") +} +``` + +Test for code example above: + +```go +package main + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPingRoute(t *testing.T) { + router := setupRouter() + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/ping", nil) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "pong", w.Body.String()) +} +``` + +## Users + +Awesome project lists using [Gin](https://github.com/gin-gonic/gin) web framework. + +* [gorush](https://github.com/appleboy/gorush): A push notification server written in Go. +* [fnproject](https://github.com/fnproject/fn): The container native, cloud agnostic serverless platform. +* [photoprism](https://github.com/photoprism/photoprism): Personal photo management powered by Go and Google TensorFlow. +* [krakend](https://github.com/devopsfaith/krakend): Ultra performant API Gateway with middlewares. +* [picfit](https://github.com/thoas/picfit): An image resizing server written in Go. diff --git a/backend/vendor/github.com/gin-gonic/gin/auth.go b/backend/vendor/github.com/gin-gonic/gin/auth.go new file mode 100644 index 00000000..9ed81b5d --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/auth.go @@ -0,0 +1,96 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "crypto/subtle" + "encoding/base64" + "net/http" + "strconv" +) + +// AuthUserKey is the cookie name for user credential in basic auth. +const AuthUserKey = "user" + +// Accounts defines a key/value for user/pass list of authorized logins. +type Accounts map[string]string + +type authPair struct { + value string + user string +} + +type authPairs []authPair + +func (a authPairs) searchCredential(authValue string) (string, bool) { + if authValue == "" { + return "", false + } + for _, pair := range a { + if pair.value == authValue { + return pair.user, true + } + } + return "", false +} + +// BasicAuthForRealm returns a Basic HTTP Authorization middleware. It takes as arguments a map[string]string where +// the key is the user name and the value is the password, as well as the name of the Realm. +// If the realm is empty, "Authorization Required" will be used by default. +// (see http://tools.ietf.org/html/rfc2617#section-1.2) +func BasicAuthForRealm(accounts Accounts, realm string) HandlerFunc { + if realm == "" { + realm = "Authorization Required" + } + realm = "Basic realm=" + strconv.Quote(realm) + pairs := processAccounts(accounts) + return func(c *Context) { + // Search user in the slice of allowed credentials + user, found := pairs.searchCredential(c.requestHeader("Authorization")) + if !found { + // Credentials doesn't match, we return 401 and abort handlers chain. + c.Header("WWW-Authenticate", realm) + c.AbortWithStatus(http.StatusUnauthorized) + return + } + + // The user credentials was found, set user's id to key AuthUserKey in this context, the user's id can be read later using + // c.MustGet(gin.AuthUserKey). + c.Set(AuthUserKey, user) + } +} + +// BasicAuth returns a Basic HTTP Authorization middleware. It takes as argument a map[string]string where +// the key is the user name and the value is the password. +func BasicAuth(accounts Accounts) HandlerFunc { + return BasicAuthForRealm(accounts, "") +} + +func processAccounts(accounts Accounts) authPairs { + assert1(len(accounts) > 0, "Empty list of authorized credentials") + pairs := make(authPairs, 0, len(accounts)) + for user, password := range accounts { + assert1(user != "", "User can not be empty") + value := authorizationHeader(user, password) + pairs = append(pairs, authPair{ + value: value, + user: user, + }) + } + return pairs +} + +func authorizationHeader(user, password string) string { + base := user + ":" + password + return "Basic " + base64.StdEncoding.EncodeToString([]byte(base)) +} + +func secureCompare(given, actual string) bool { + if subtle.ConstantTimeEq(int32(len(given)), int32(len(actual))) == 1 { + return subtle.ConstantTimeCompare([]byte(given), []byte(actual)) == 1 + } + // Securely compare actual to itself to keep constant time, but always return false. + return subtle.ConstantTimeCompare([]byte(actual), []byte(actual)) == 1 && false +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/binding.go b/backend/vendor/github.com/gin-gonic/gin/binding/binding.go new file mode 100644 index 00000000..520c5109 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/binding.go @@ -0,0 +1,113 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import "net/http" + +// Content-Type MIME of the most common data formats. +const ( + MIMEJSON = "application/json" + MIMEHTML = "text/html" + MIMEXML = "application/xml" + MIMEXML2 = "text/xml" + MIMEPlain = "text/plain" + MIMEPOSTForm = "application/x-www-form-urlencoded" + MIMEMultipartPOSTForm = "multipart/form-data" + MIMEPROTOBUF = "application/x-protobuf" + MIMEMSGPACK = "application/x-msgpack" + MIMEMSGPACK2 = "application/msgpack" + MIMEYAML = "application/x-yaml" +) + +// Binding describes the interface which needs to be implemented for binding the +// data present in the request such as JSON request body, query parameters or +// the form POST. +type Binding interface { + Name() string + Bind(*http.Request, interface{}) error +} + +// BindingBody adds BindBody method to Binding. BindBody is similar with Bind, +// but it reads the body from supplied bytes instead of req.Body. +type BindingBody interface { + Binding + BindBody([]byte, interface{}) error +} + +// BindingUri adds BindUri method to Binding. BindUri is similar with Bind, +// but it read the Params. +type BindingUri interface { + Name() string + BindUri(map[string][]string, interface{}) error +} + +// StructValidator is the minimal interface which needs to be implemented in +// order for it to be used as the validator engine for ensuring the correctness +// of the request. Gin provides a default implementation for this using +// https://github.com/go-playground/validator/tree/v8.18.2. +type StructValidator interface { + // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right. + // If the received type is not a struct, any validation should be skipped and nil must be returned. + // If the received type is a struct or pointer to a struct, the validation should be performed. + // If the struct is not valid or the validation itself fails, a descriptive error should be returned. + // Otherwise nil must be returned. + ValidateStruct(interface{}) error + + // Engine returns the underlying validator engine which powers the + // StructValidator implementation. + Engine() interface{} +} + +// Validator is the default validator which implements the StructValidator +// interface. It uses https://github.com/go-playground/validator/tree/v8.18.2 +// under the hood. +var Validator StructValidator = &defaultValidator{} + +// These implement the Binding interface and can be used to bind the data +// present in the request to struct instances. +var ( + JSON = jsonBinding{} + XML = xmlBinding{} + Form = formBinding{} + Query = queryBinding{} + FormPost = formPostBinding{} + FormMultipart = formMultipartBinding{} + ProtoBuf = protobufBinding{} + MsgPack = msgpackBinding{} + YAML = yamlBinding{} + Uri = uriBinding{} +) + +// Default returns the appropriate Binding instance based on the HTTP method +// and the content type. +func Default(method, contentType string) Binding { + if method == "GET" { + return Form + } + + switch contentType { + case MIMEJSON: + return JSON + case MIMEXML, MIMEXML2: + return XML + case MIMEPROTOBUF: + return ProtoBuf + case MIMEMSGPACK, MIMEMSGPACK2: + return MsgPack + case MIMEYAML: + return YAML + case MIMEMultipartPOSTForm: + return FormMultipart + default: // case MIMEPOSTForm: + return Form + } +} + +func validate(obj interface{}) error { + if Validator == nil { + return nil + } + return Validator.ValidateStruct(obj) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/default_validator.go b/backend/vendor/github.com/gin-gonic/gin/binding/default_validator.go new file mode 100644 index 00000000..e7a302de --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/default_validator.go @@ -0,0 +1,51 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "reflect" + "sync" + + "gopkg.in/go-playground/validator.v8" +) + +type defaultValidator struct { + once sync.Once + validate *validator.Validate +} + +var _ StructValidator = &defaultValidator{} + +// ValidateStruct receives any kind of type, but only performed struct or pointer to struct type. +func (v *defaultValidator) ValidateStruct(obj interface{}) error { + value := reflect.ValueOf(obj) + valueType := value.Kind() + if valueType == reflect.Ptr { + valueType = value.Elem().Kind() + } + if valueType == reflect.Struct { + v.lazyinit() + if err := v.validate.Struct(obj); err != nil { + return err + } + } + return nil +} + +// Engine returns the underlying validator engine which powers the default +// Validator instance. This is useful if you want to register custom validations +// or struct level validations. See validator GoDoc for more info - +// https://godoc.org/gopkg.in/go-playground/validator.v8 +func (v *defaultValidator) Engine() interface{} { + v.lazyinit() + return v.validate +} + +func (v *defaultValidator) lazyinit() { + v.once.Do(func() { + config := &validator.Config{TagName: "binding"} + v.validate = validator.New(config) + }) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/form.go b/backend/vendor/github.com/gin-gonic/gin/binding/form.go new file mode 100644 index 00000000..0b28aa8a --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/form.go @@ -0,0 +1,89 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "mime/multipart" + "net/http" + "reflect" +) + +const defaultMemory = 32 * 1024 * 1024 + +type formBinding struct{} +type formPostBinding struct{} +type formMultipartBinding struct{} + +func (formBinding) Name() string { + return "form" +} + +func (formBinding) Bind(req *http.Request, obj interface{}) error { + if err := req.ParseForm(); err != nil { + return err + } + if err := req.ParseMultipartForm(defaultMemory); err != nil { + if err != http.ErrNotMultipart { + return err + } + } + if err := mapForm(obj, req.Form); err != nil { + return err + } + return validate(obj) +} + +func (formPostBinding) Name() string { + return "form-urlencoded" +} + +func (formPostBinding) Bind(req *http.Request, obj interface{}) error { + if err := req.ParseForm(); err != nil { + return err + } + if err := mapForm(obj, req.PostForm); err != nil { + return err + } + return validate(obj) +} + +func (formMultipartBinding) Name() string { + return "multipart/form-data" +} + +func (formMultipartBinding) Bind(req *http.Request, obj interface{}) error { + if err := req.ParseMultipartForm(defaultMemory); err != nil { + return err + } + if err := mappingByPtr(obj, (*multipartRequest)(req), "form"); err != nil { + return err + } + + return validate(obj) +} + +type multipartRequest http.Request + +var _ setter = (*multipartRequest)(nil) + +var ( + multipartFileHeaderStructType = reflect.TypeOf(multipart.FileHeader{}) +) + +// TrySet tries to set a value by the multipart request with the binding a form file +func (r *multipartRequest) TrySet(value reflect.Value, field reflect.StructField, key string, opt setOptions) (isSetted bool, err error) { + if value.Type() == multipartFileHeaderStructType { + _, file, err := (*http.Request)(r).FormFile(key) + if err != nil { + return false, err + } + if file != nil { + value.Set(reflect.ValueOf(*file)) + return true, nil + } + } + + return setByForm(value, field, r.MultipartForm.Value, key, opt) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/form_mapping.go b/backend/vendor/github.com/gin-gonic/gin/binding/form_mapping.go new file mode 100644 index 00000000..aaacf6c5 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/form_mapping.go @@ -0,0 +1,330 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/gin-gonic/gin/internal/json" +) + +var errUnknownType = errors.New("Unknown type") + +func mapUri(ptr interface{}, m map[string][]string) error { + return mapFormByTag(ptr, m, "uri") +} + +func mapForm(ptr interface{}, form map[string][]string) error { + return mapFormByTag(ptr, form, "form") +} + +var emptyField = reflect.StructField{} + +func mapFormByTag(ptr interface{}, form map[string][]string, tag string) error { + return mappingByPtr(ptr, formSource(form), tag) +} + +// setter tries to set value on a walking by fields of a struct +type setter interface { + TrySet(value reflect.Value, field reflect.StructField, key string, opt setOptions) (isSetted bool, err error) +} + +type formSource map[string][]string + +var _ setter = formSource(nil) + +// TrySet tries to set a value by request's form source (like map[string][]string) +func (form formSource) TrySet(value reflect.Value, field reflect.StructField, tagValue string, opt setOptions) (isSetted bool, err error) { + return setByForm(value, field, form, tagValue, opt) +} + +func mappingByPtr(ptr interface{}, setter setter, tag string) error { + _, err := mapping(reflect.ValueOf(ptr), emptyField, setter, tag) + return err +} + +func mapping(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) { + var vKind = value.Kind() + + if vKind == reflect.Ptr { + var isNew bool + vPtr := value + if value.IsNil() { + isNew = true + vPtr = reflect.New(value.Type().Elem()) + } + isSetted, err := mapping(vPtr.Elem(), field, setter, tag) + if err != nil { + return false, err + } + if isNew && isSetted { + value.Set(vPtr) + } + return isSetted, nil + } + + ok, err := tryToSetValue(value, field, setter, tag) + if err != nil { + return false, err + } + if ok { + return true, nil + } + + if vKind == reflect.Struct { + tValue := value.Type() + + var isSetted bool + for i := 0; i < value.NumField(); i++ { + if !value.Field(i).CanSet() { + continue + } + ok, err := mapping(value.Field(i), tValue.Field(i), setter, tag) + if err != nil { + return false, err + } + isSetted = isSetted || ok + } + return isSetted, nil + } + return false, nil +} + +type setOptions struct { + isDefaultExists bool + defaultValue string +} + +func tryToSetValue(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) { + var tagValue string + var setOpt setOptions + + tagValue = field.Tag.Get(tag) + tagValue, opts := head(tagValue, ",") + + if tagValue == "-" { // just ignoring this field + return false, nil + } + if tagValue == "" { // default value is FieldName + tagValue = field.Name + } + if tagValue == "" { // when field is "emptyField" variable + return false, nil + } + + var opt string + for len(opts) > 0 { + opt, opts = head(opts, ",") + + k, v := head(opt, "=") + switch k { + case "default": + setOpt.isDefaultExists = true + setOpt.defaultValue = v + } + } + + return setter.TrySet(value, field, tagValue, setOpt) +} + +func setByForm(value reflect.Value, field reflect.StructField, form map[string][]string, tagValue string, opt setOptions) (isSetted bool, err error) { + vs, ok := form[tagValue] + if !ok && !opt.isDefaultExists { + return false, nil + } + + switch value.Kind() { + case reflect.Slice: + if !ok { + vs = []string{opt.defaultValue} + } + return true, setSlice(vs, value, field) + case reflect.Array: + if !ok { + vs = []string{opt.defaultValue} + } + if len(vs) != value.Len() { + return false, fmt.Errorf("%q is not valid value for %s", vs, value.Type().String()) + } + return true, setArray(vs, value, field) + default: + var val string + if !ok { + val = opt.defaultValue + } + + if len(vs) > 0 { + val = vs[0] + } + return true, setWithProperType(val, value, field) + } +} + +func setWithProperType(val string, value reflect.Value, field reflect.StructField) error { + switch value.Kind() { + case reflect.Int: + return setIntField(val, 0, value) + case reflect.Int8: + return setIntField(val, 8, value) + case reflect.Int16: + return setIntField(val, 16, value) + case reflect.Int32: + return setIntField(val, 32, value) + case reflect.Int64: + switch value.Interface().(type) { + case time.Duration: + return setTimeDuration(val, value, field) + } + return setIntField(val, 64, value) + case reflect.Uint: + return setUintField(val, 0, value) + case reflect.Uint8: + return setUintField(val, 8, value) + case reflect.Uint16: + return setUintField(val, 16, value) + case reflect.Uint32: + return setUintField(val, 32, value) + case reflect.Uint64: + return setUintField(val, 64, value) + case reflect.Bool: + return setBoolField(val, value) + case reflect.Float32: + return setFloatField(val, 32, value) + case reflect.Float64: + return setFloatField(val, 64, value) + case reflect.String: + value.SetString(val) + case reflect.Struct: + switch value.Interface().(type) { + case time.Time: + return setTimeField(val, field, value) + } + return json.Unmarshal([]byte(val), value.Addr().Interface()) + case reflect.Map: + return json.Unmarshal([]byte(val), value.Addr().Interface()) + default: + return errUnknownType + } + return nil +} + +func setIntField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0" + } + intVal, err := strconv.ParseInt(val, 10, bitSize) + if err == nil { + field.SetInt(intVal) + } + return err +} + +func setUintField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0" + } + uintVal, err := strconv.ParseUint(val, 10, bitSize) + if err == nil { + field.SetUint(uintVal) + } + return err +} + +func setBoolField(val string, field reflect.Value) error { + if val == "" { + val = "false" + } + boolVal, err := strconv.ParseBool(val) + if err == nil { + field.SetBool(boolVal) + } + return err +} + +func setFloatField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0.0" + } + floatVal, err := strconv.ParseFloat(val, bitSize) + if err == nil { + field.SetFloat(floatVal) + } + return err +} + +func setTimeField(val string, structField reflect.StructField, value reflect.Value) error { + timeFormat := structField.Tag.Get("time_format") + if timeFormat == "" { + timeFormat = time.RFC3339 + } + + if val == "" { + value.Set(reflect.ValueOf(time.Time{})) + return nil + } + + l := time.Local + if isUTC, _ := strconv.ParseBool(structField.Tag.Get("time_utc")); isUTC { + l = time.UTC + } + + if locTag := structField.Tag.Get("time_location"); locTag != "" { + loc, err := time.LoadLocation(locTag) + if err != nil { + return err + } + l = loc + } + + t, err := time.ParseInLocation(timeFormat, val, l) + if err != nil { + return err + } + + value.Set(reflect.ValueOf(t)) + return nil +} + +func setArray(vals []string, value reflect.Value, field reflect.StructField) error { + for i, s := range vals { + err := setWithProperType(s, value.Index(i), field) + if err != nil { + return err + } + } + return nil +} + +func setSlice(vals []string, value reflect.Value, field reflect.StructField) error { + slice := reflect.MakeSlice(value.Type(), len(vals), len(vals)) + err := setArray(vals, slice, field) + if err != nil { + return err + } + value.Set(slice) + return nil +} + +func setTimeDuration(val string, value reflect.Value, field reflect.StructField) error { + d, err := time.ParseDuration(val) + if err != nil { + return err + } + value.Set(reflect.ValueOf(d)) + return nil +} + +func head(str, sep string) (head string, tail string) { + idx := strings.Index(str, sep) + if idx < 0 { + return str, "" + } + return str[:idx], str[idx+len(sep):] +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/json.go b/backend/vendor/github.com/gin-gonic/gin/binding/json.go new file mode 100644 index 00000000..f968161b --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/json.go @@ -0,0 +1,47 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "fmt" + "io" + "net/http" + + "github.com/gin-gonic/gin/internal/json" +) + +// EnableDecoderUseNumber is used to call the UseNumber method on the JSON +// Decoder instance. UseNumber causes the Decoder to unmarshal a number into an +// interface{} as a Number instead of as a float64. +var EnableDecoderUseNumber = false + +type jsonBinding struct{} + +func (jsonBinding) Name() string { + return "json" +} + +func (jsonBinding) Bind(req *http.Request, obj interface{}) error { + if req == nil || req.Body == nil { + return fmt.Errorf("invalid request") + } + return decodeJSON(req.Body, obj) +} + +func (jsonBinding) BindBody(body []byte, obj interface{}) error { + return decodeJSON(bytes.NewReader(body), obj) +} + +func decodeJSON(r io.Reader, obj interface{}) error { + decoder := json.NewDecoder(r) + if EnableDecoderUseNumber { + decoder.UseNumber() + } + if err := decoder.Decode(obj); err != nil { + return err + } + return validate(obj) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/msgpack.go b/backend/vendor/github.com/gin-gonic/gin/binding/msgpack.go new file mode 100644 index 00000000..b7f73197 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/msgpack.go @@ -0,0 +1,35 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "io" + "net/http" + + "github.com/ugorji/go/codec" +) + +type msgpackBinding struct{} + +func (msgpackBinding) Name() string { + return "msgpack" +} + +func (msgpackBinding) Bind(req *http.Request, obj interface{}) error { + return decodeMsgPack(req.Body, obj) +} + +func (msgpackBinding) BindBody(body []byte, obj interface{}) error { + return decodeMsgPack(bytes.NewReader(body), obj) +} + +func decodeMsgPack(r io.Reader, obj interface{}) error { + cdc := new(codec.MsgpackHandle) + if err := codec.NewDecoder(r, cdc).Decode(&obj); err != nil { + return err + } + return validate(obj) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/protobuf.go b/backend/vendor/github.com/gin-gonic/gin/binding/protobuf.go new file mode 100644 index 00000000..f9ece928 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/protobuf.go @@ -0,0 +1,36 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "io/ioutil" + "net/http" + + "github.com/golang/protobuf/proto" +) + +type protobufBinding struct{} + +func (protobufBinding) Name() string { + return "protobuf" +} + +func (b protobufBinding) Bind(req *http.Request, obj interface{}) error { + buf, err := ioutil.ReadAll(req.Body) + if err != nil { + return err + } + return b.BindBody(buf, obj) +} + +func (protobufBinding) BindBody(body []byte, obj interface{}) error { + if err := proto.Unmarshal(body, obj.(proto.Message)); err != nil { + return err + } + // Here it's same to return validate(obj), but util now we can't add + // `binding:""` to the struct which automatically generate by gen-proto + return nil + // return validate(obj) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/query.go b/backend/vendor/github.com/gin-gonic/gin/binding/query.go new file mode 100644 index 00000000..219743f2 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/query.go @@ -0,0 +1,21 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import "net/http" + +type queryBinding struct{} + +func (queryBinding) Name() string { + return "query" +} + +func (queryBinding) Bind(req *http.Request, obj interface{}) error { + values := req.URL.Query() + if err := mapForm(obj, values); err != nil { + return err + } + return validate(obj) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/uri.go b/backend/vendor/github.com/gin-gonic/gin/binding/uri.go new file mode 100644 index 00000000..f91ec381 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/uri.go @@ -0,0 +1,18 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +type uriBinding struct{} + +func (uriBinding) Name() string { + return "uri" +} + +func (uriBinding) BindUri(m map[string][]string, obj interface{}) error { + if err := mapUri(obj, m); err != nil { + return err + } + return validate(obj) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/xml.go b/backend/vendor/github.com/gin-gonic/gin/binding/xml.go new file mode 100644 index 00000000..4e901149 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/xml.go @@ -0,0 +1,33 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" +) + +type xmlBinding struct{} + +func (xmlBinding) Name() string { + return "xml" +} + +func (xmlBinding) Bind(req *http.Request, obj interface{}) error { + return decodeXML(req.Body, obj) +} + +func (xmlBinding) BindBody(body []byte, obj interface{}) error { + return decodeXML(bytes.NewReader(body), obj) +} +func decodeXML(r io.Reader, obj interface{}) error { + decoder := xml.NewDecoder(r) + if err := decoder.Decode(obj); err != nil { + return err + } + return validate(obj) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/binding/yaml.go b/backend/vendor/github.com/gin-gonic/gin/binding/yaml.go new file mode 100644 index 00000000..a2d36d6a --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/binding/yaml.go @@ -0,0 +1,35 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "io" + "net/http" + + "gopkg.in/yaml.v2" +) + +type yamlBinding struct{} + +func (yamlBinding) Name() string { + return "yaml" +} + +func (yamlBinding) Bind(req *http.Request, obj interface{}) error { + return decodeYAML(req.Body, obj) +} + +func (yamlBinding) BindBody(body []byte, obj interface{}) error { + return decodeYAML(bytes.NewReader(body), obj) +} + +func decodeYAML(r io.Reader, obj interface{}) error { + decoder := yaml.NewDecoder(r) + if err := decoder.Decode(obj); err != nil { + return err + } + return validate(obj) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/codecov.yml b/backend/vendor/github.com/gin-gonic/gin/codecov.yml new file mode 100644 index 00000000..c9c9a522 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/codecov.yml @@ -0,0 +1,5 @@ +coverage: + notify: + gitter: + default: + url: https://webhooks.gitter.im/e/d90dcdeeab2f1e357165 diff --git a/backend/vendor/github.com/gin-gonic/gin/context.go b/backend/vendor/github.com/gin-gonic/gin/context.go new file mode 100644 index 00000000..af747a1e --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/context.go @@ -0,0 +1,1023 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "mime/multipart" + "net" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/gin-contrib/sse" + "github.com/gin-gonic/gin/binding" + "github.com/gin-gonic/gin/render" +) + +// Content-Type MIME of the most common data formats. +const ( + MIMEJSON = binding.MIMEJSON + MIMEHTML = binding.MIMEHTML + MIMEXML = binding.MIMEXML + MIMEXML2 = binding.MIMEXML2 + MIMEPlain = binding.MIMEPlain + MIMEPOSTForm = binding.MIMEPOSTForm + MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm + MIMEYAML = binding.MIMEYAML + BodyBytesKey = "_gin-gonic/gin/bodybyteskey" +) + +const abortIndex int8 = math.MaxInt8 / 2 + +// Context is the most important part of gin. It allows us to pass variables between middleware, +// manage the flow, validate the JSON of a request and render a JSON response for example. +type Context struct { + writermem responseWriter + Request *http.Request + Writer ResponseWriter + + Params Params + handlers HandlersChain + index int8 + + engine *Engine + + // Keys is a key/value pair exclusively for the context of each request. + Keys map[string]interface{} + + // Errors is a list of errors attached to all the handlers/middlewares who used this context. + Errors errorMsgs + + // Accepted defines a list of manually accepted formats for content negotiation. + Accepted []string +} + +/************************************/ +/********** CONTEXT CREATION ********/ +/************************************/ + +func (c *Context) reset() { + c.Writer = &c.writermem + c.Params = c.Params[0:0] + c.handlers = nil + c.index = -1 + c.Keys = nil + c.Errors = c.Errors[0:0] + c.Accepted = nil +} + +// Copy returns a copy of the current context that can be safely used outside the request's scope. +// This has to be used when the context has to be passed to a goroutine. +func (c *Context) Copy() *Context { + var cp = *c + cp.writermem.ResponseWriter = nil + cp.Writer = &cp.writermem + cp.index = abortIndex + cp.handlers = nil + cp.Keys = map[string]interface{}{} + for k, v := range c.Keys { + cp.Keys[k] = v + } + return &cp +} + +// HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()", +// this function will return "main.handleGetUsers". +func (c *Context) HandlerName() string { + return nameOfFunction(c.handlers.Last()) +} + +// HandlerNames returns a list of all registered handlers for this context in descending order, +// following the semantics of HandlerName() +func (c *Context) HandlerNames() []string { + hn := make([]string, 0, len(c.handlers)) + for _, val := range c.handlers { + hn = append(hn, nameOfFunction(val)) + } + return hn +} + +// Handler returns the main handler. +func (c *Context) Handler() HandlerFunc { + return c.handlers.Last() +} + +/************************************/ +/*********** FLOW CONTROL ***********/ +/************************************/ + +// Next should be used only inside middleware. +// It executes the pending handlers in the chain inside the calling handler. +// See example in GitHub. +func (c *Context) Next() { + c.index++ + for c.index < int8(len(c.handlers)) { + c.handlers[c.index](c) + c.index++ + } +} + +// IsAborted returns true if the current context was aborted. +func (c *Context) IsAborted() bool { + return c.index >= abortIndex +} + +// Abort prevents pending handlers from being called. Note that this will not stop the current handler. +// Let's say you have an authorization middleware that validates that the current request is authorized. +// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers +// for this request are not called. +func (c *Context) Abort() { + c.index = abortIndex +} + +// AbortWithStatus calls `Abort()` and writes the headers with the specified status code. +// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401). +func (c *Context) AbortWithStatus(code int) { + c.Status(code) + c.Writer.WriteHeaderNow() + c.Abort() +} + +// AbortWithStatusJSON calls `Abort()` and then `JSON` internally. +// This method stops the chain, writes the status code and return a JSON body. +// It also sets the Content-Type as "application/json". +func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) { + c.Abort() + c.JSON(code, jsonObj) +} + +// AbortWithError calls `AbortWithStatus()` and `Error()` internally. +// This method stops the chain, writes the status code and pushes the specified error to `c.Errors`. +// See Context.Error() for more details. +func (c *Context) AbortWithError(code int, err error) *Error { + c.AbortWithStatus(code) + return c.Error(err) +} + +/************************************/ +/********* ERROR MANAGEMENT *********/ +/************************************/ + +// Error attaches an error to the current context. The error is pushed to a list of errors. +// It's a good idea to call Error for each error that occurred during the resolution of a request. +// A middleware can be used to collect all the errors and push them to a database together, +// print a log, or append it in the HTTP response. +// Error will panic if err is nil. +func (c *Context) Error(err error) *Error { + if err == nil { + panic("err is nil") + } + + parsedError, ok := err.(*Error) + if !ok { + parsedError = &Error{ + Err: err, + Type: ErrorTypePrivate, + } + } + + c.Errors = append(c.Errors, parsedError) + return parsedError +} + +/************************************/ +/******** METADATA MANAGEMENT********/ +/************************************/ + +// Set is used to store a new key/value pair exclusively for this context. +// It also lazy initializes c.Keys if it was not used previously. +func (c *Context) Set(key string, value interface{}) { + if c.Keys == nil { + c.Keys = make(map[string]interface{}) + } + c.Keys[key] = value +} + +// Get returns the value for the given key, ie: (value, true). +// If the value does not exists it returns (nil, false) +func (c *Context) Get(key string) (value interface{}, exists bool) { + value, exists = c.Keys[key] + return +} + +// MustGet returns the value for the given key if it exists, otherwise it panics. +func (c *Context) MustGet(key string) interface{} { + if value, exists := c.Get(key); exists { + return value + } + panic("Key \"" + key + "\" does not exist") +} + +// GetString returns the value associated with the key as a string. +func (c *Context) GetString(key string) (s string) { + if val, ok := c.Get(key); ok && val != nil { + s, _ = val.(string) + } + return +} + +// GetBool returns the value associated with the key as a boolean. +func (c *Context) GetBool(key string) (b bool) { + if val, ok := c.Get(key); ok && val != nil { + b, _ = val.(bool) + } + return +} + +// GetInt returns the value associated with the key as an integer. +func (c *Context) GetInt(key string) (i int) { + if val, ok := c.Get(key); ok && val != nil { + i, _ = val.(int) + } + return +} + +// GetInt64 returns the value associated with the key as an integer. +func (c *Context) GetInt64(key string) (i64 int64) { + if val, ok := c.Get(key); ok && val != nil { + i64, _ = val.(int64) + } + return +} + +// GetFloat64 returns the value associated with the key as a float64. +func (c *Context) GetFloat64(key string) (f64 float64) { + if val, ok := c.Get(key); ok && val != nil { + f64, _ = val.(float64) + } + return +} + +// GetTime returns the value associated with the key as time. +func (c *Context) GetTime(key string) (t time.Time) { + if val, ok := c.Get(key); ok && val != nil { + t, _ = val.(time.Time) + } + return +} + +// GetDuration returns the value associated with the key as a duration. +func (c *Context) GetDuration(key string) (d time.Duration) { + if val, ok := c.Get(key); ok && val != nil { + d, _ = val.(time.Duration) + } + return +} + +// GetStringSlice returns the value associated with the key as a slice of strings. +func (c *Context) GetStringSlice(key string) (ss []string) { + if val, ok := c.Get(key); ok && val != nil { + ss, _ = val.([]string) + } + return +} + +// GetStringMap returns the value associated with the key as a map of interfaces. +func (c *Context) GetStringMap(key string) (sm map[string]interface{}) { + if val, ok := c.Get(key); ok && val != nil { + sm, _ = val.(map[string]interface{}) + } + return +} + +// GetStringMapString returns the value associated with the key as a map of strings. +func (c *Context) GetStringMapString(key string) (sms map[string]string) { + if val, ok := c.Get(key); ok && val != nil { + sms, _ = val.(map[string]string) + } + return +} + +// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. +func (c *Context) GetStringMapStringSlice(key string) (smss map[string][]string) { + if val, ok := c.Get(key); ok && val != nil { + smss, _ = val.(map[string][]string) + } + return +} + +/************************************/ +/************ INPUT DATA ************/ +/************************************/ + +// Param returns the value of the URL param. +// It is a shortcut for c.Params.ByName(key) +// router.GET("/user/:id", func(c *gin.Context) { +// // a GET request to /user/john +// id := c.Param("id") // id == "john" +// }) +func (c *Context) Param(key string) string { + return c.Params.ByName(key) +} + +// Query returns the keyed url query value if it exists, +// otherwise it returns an empty string `("")`. +// It is shortcut for `c.Request.URL.Query().Get(key)` +// GET /path?id=1234&name=Manu&value= +// c.Query("id") == "1234" +// c.Query("name") == "Manu" +// c.Query("value") == "" +// c.Query("wtf") == "" +func (c *Context) Query(key string) string { + value, _ := c.GetQuery(key) + return value +} + +// DefaultQuery returns the keyed url query value if it exists, +// otherwise it returns the specified defaultValue string. +// See: Query() and GetQuery() for further information. +// GET /?name=Manu&lastname= +// c.DefaultQuery("name", "unknown") == "Manu" +// c.DefaultQuery("id", "none") == "none" +// c.DefaultQuery("lastname", "none") == "" +func (c *Context) DefaultQuery(key, defaultValue string) string { + if value, ok := c.GetQuery(key); ok { + return value + } + return defaultValue +} + +// GetQuery is like Query(), it returns the keyed url query value +// if it exists `(value, true)` (even when the value is an empty string), +// otherwise it returns `("", false)`. +// It is shortcut for `c.Request.URL.Query().Get(key)` +// GET /?name=Manu&lastname= +// ("Manu", true) == c.GetQuery("name") +// ("", false) == c.GetQuery("id") +// ("", true) == c.GetQuery("lastname") +func (c *Context) GetQuery(key string) (string, bool) { + if values, ok := c.GetQueryArray(key); ok { + return values[0], ok + } + return "", false +} + +// QueryArray returns a slice of strings for a given query key. +// The length of the slice depends on the number of params with the given key. +func (c *Context) QueryArray(key string) []string { + values, _ := c.GetQueryArray(key) + return values +} + +// GetQueryArray returns a slice of strings for a given query key, plus +// a boolean value whether at least one value exists for the given key. +func (c *Context) GetQueryArray(key string) ([]string, bool) { + if values, ok := c.Request.URL.Query()[key]; ok && len(values) > 0 { + return values, true + } + return []string{}, false +} + +// QueryMap returns a map for a given query key. +func (c *Context) QueryMap(key string) map[string]string { + dicts, _ := c.GetQueryMap(key) + return dicts +} + +// GetQueryMap returns a map for a given query key, plus a boolean value +// whether at least one value exists for the given key. +func (c *Context) GetQueryMap(key string) (map[string]string, bool) { + return c.get(c.Request.URL.Query(), key) +} + +// PostForm returns the specified key from a POST urlencoded form or multipart form +// when it exists, otherwise it returns an empty string `("")`. +func (c *Context) PostForm(key string) string { + value, _ := c.GetPostForm(key) + return value +} + +// DefaultPostForm returns the specified key from a POST urlencoded form or multipart form +// when it exists, otherwise it returns the specified defaultValue string. +// See: PostForm() and GetPostForm() for further information. +func (c *Context) DefaultPostForm(key, defaultValue string) string { + if value, ok := c.GetPostForm(key); ok { + return value + } + return defaultValue +} + +// GetPostForm is like PostForm(key). It returns the specified key from a POST urlencoded +// form or multipart form when it exists `(value, true)` (even when the value is an empty string), +// otherwise it returns ("", false). +// For example, during a PATCH request to update the user's email: +// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com" +// email= --> ("", true) := GetPostForm("email") // set email to "" +// --> ("", false) := GetPostForm("email") // do nothing with email +func (c *Context) GetPostForm(key string) (string, bool) { + if values, ok := c.GetPostFormArray(key); ok { + return values[0], ok + } + return "", false +} + +// PostFormArray returns a slice of strings for a given form key. +// The length of the slice depends on the number of params with the given key. +func (c *Context) PostFormArray(key string) []string { + values, _ := c.GetPostFormArray(key) + return values +} + +// GetPostFormArray returns a slice of strings for a given form key, plus +// a boolean value whether at least one value exists for the given key. +func (c *Context) GetPostFormArray(key string) ([]string, bool) { + req := c.Request + if err := req.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil { + if err != http.ErrNotMultipart { + debugPrint("error on parse multipart form array: %v", err) + } + } + if values := req.PostForm[key]; len(values) > 0 { + return values, true + } + return []string{}, false +} + +// PostFormMap returns a map for a given form key. +func (c *Context) PostFormMap(key string) map[string]string { + dicts, _ := c.GetPostFormMap(key) + return dicts +} + +// GetPostFormMap returns a map for a given form key, plus a boolean value +// whether at least one value exists for the given key. +func (c *Context) GetPostFormMap(key string) (map[string]string, bool) { + req := c.Request + if err := req.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil { + if err != http.ErrNotMultipart { + debugPrint("error on parse multipart form map: %v", err) + } + } + return c.get(req.PostForm, key) +} + +// get is an internal method and returns a map which satisfy conditions. +func (c *Context) get(m map[string][]string, key string) (map[string]string, bool) { + dicts := make(map[string]string) + exist := false + for k, v := range m { + if i := strings.IndexByte(k, '['); i >= 1 && k[0:i] == key { + if j := strings.IndexByte(k[i+1:], ']'); j >= 1 { + exist = true + dicts[k[i+1:][:j]] = v[0] + } + } + } + return dicts, exist +} + +// FormFile returns the first file for the provided form key. +func (c *Context) FormFile(name string) (*multipart.FileHeader, error) { + if c.Request.MultipartForm == nil { + if err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil { + return nil, err + } + } + _, fh, err := c.Request.FormFile(name) + return fh, err +} + +// MultipartForm is the parsed multipart form, including file uploads. +func (c *Context) MultipartForm() (*multipart.Form, error) { + err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory) + return c.Request.MultipartForm, err +} + +// SaveUploadedFile uploads the form file to specific dst. +func (c *Context) SaveUploadedFile(file *multipart.FileHeader, dst string) error { + src, err := file.Open() + if err != nil { + return err + } + defer src.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, src) + return err +} + +// Bind checks the Content-Type to select a binding engine automatically, +// Depending the "Content-Type" header different bindings are used: +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// otherwise --> returns an error. +// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. +// It decodes the json payload into the struct specified as a pointer. +// It writes a 400 error and sets Content-Type header "text/plain" in the response if input is not valid. +func (c *Context) Bind(obj interface{}) error { + b := binding.Default(c.Request.Method, c.ContentType()) + return c.MustBindWith(obj, b) +} + +// BindJSON is a shortcut for c.MustBindWith(obj, binding.JSON). +func (c *Context) BindJSON(obj interface{}) error { + return c.MustBindWith(obj, binding.JSON) +} + +// BindXML is a shortcut for c.MustBindWith(obj, binding.BindXML). +func (c *Context) BindXML(obj interface{}) error { + return c.MustBindWith(obj, binding.XML) +} + +// BindQuery is a shortcut for c.MustBindWith(obj, binding.Query). +func (c *Context) BindQuery(obj interface{}) error { + return c.MustBindWith(obj, binding.Query) +} + +// BindYAML is a shortcut for c.MustBindWith(obj, binding.YAML). +func (c *Context) BindYAML(obj interface{}) error { + return c.MustBindWith(obj, binding.YAML) +} + +// BindUri binds the passed struct pointer using binding.Uri. +// It will abort the request with HTTP 400 if any error occurs. +func (c *Context) BindUri(obj interface{}) error { + if err := c.ShouldBindUri(obj); err != nil { + c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck + return err + } + return nil +} + +// MustBindWith binds the passed struct pointer using the specified binding engine. +// It will abort the request with HTTP 400 if any error occurs. +// See the binding package. +func (c *Context) MustBindWith(obj interface{}, b binding.Binding) error { + if err := c.ShouldBindWith(obj, b); err != nil { + c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck + return err + } + return nil +} + +// ShouldBind checks the Content-Type to select a binding engine automatically, +// Depending the "Content-Type" header different bindings are used: +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// otherwise --> returns an error +// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. +// It decodes the json payload into the struct specified as a pointer. +// Like c.Bind() but this method does not set the response status code to 400 and abort if the json is not valid. +func (c *Context) ShouldBind(obj interface{}) error { + b := binding.Default(c.Request.Method, c.ContentType()) + return c.ShouldBindWith(obj, b) +} + +// ShouldBindJSON is a shortcut for c.ShouldBindWith(obj, binding.JSON). +func (c *Context) ShouldBindJSON(obj interface{}) error { + return c.ShouldBindWith(obj, binding.JSON) +} + +// ShouldBindXML is a shortcut for c.ShouldBindWith(obj, binding.XML). +func (c *Context) ShouldBindXML(obj interface{}) error { + return c.ShouldBindWith(obj, binding.XML) +} + +// ShouldBindQuery is a shortcut for c.ShouldBindWith(obj, binding.Query). +func (c *Context) ShouldBindQuery(obj interface{}) error { + return c.ShouldBindWith(obj, binding.Query) +} + +// ShouldBindYAML is a shortcut for c.ShouldBindWith(obj, binding.YAML). +func (c *Context) ShouldBindYAML(obj interface{}) error { + return c.ShouldBindWith(obj, binding.YAML) +} + +// ShouldBindUri binds the passed struct pointer using the specified binding engine. +func (c *Context) ShouldBindUri(obj interface{}) error { + m := make(map[string][]string) + for _, v := range c.Params { + m[v.Key] = []string{v.Value} + } + return binding.Uri.BindUri(m, obj) +} + +// ShouldBindWith binds the passed struct pointer using the specified binding engine. +// See the binding package. +func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error { + return b.Bind(c.Request, obj) +} + +// ShouldBindBodyWith is similar with ShouldBindWith, but it stores the request +// body into the context, and reuse when it is called again. +// +// NOTE: This method reads the body before binding. So you should use +// ShouldBindWith for better performance if you need to call only once. +func (c *Context) ShouldBindBodyWith(obj interface{}, bb binding.BindingBody) (err error) { + var body []byte + if cb, ok := c.Get(BodyBytesKey); ok { + if cbb, ok := cb.([]byte); ok { + body = cbb + } + } + if body == nil { + body, err = ioutil.ReadAll(c.Request.Body) + if err != nil { + return err + } + c.Set(BodyBytesKey, body) + } + return bb.BindBody(body, obj) +} + +// ClientIP implements a best effort algorithm to return the real client IP, it parses +// X-Real-IP and X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy. +// Use X-Forwarded-For before X-Real-Ip as nginx uses X-Real-Ip with the proxy's IP. +func (c *Context) ClientIP() string { + if c.engine.ForwardedByClientIP { + clientIP := c.requestHeader("X-Forwarded-For") + clientIP = strings.TrimSpace(strings.Split(clientIP, ",")[0]) + if clientIP == "" { + clientIP = strings.TrimSpace(c.requestHeader("X-Real-Ip")) + } + if clientIP != "" { + return clientIP + } + } + + if c.engine.AppEngine { + if addr := c.requestHeader("X-Appengine-Remote-Addr"); addr != "" { + return addr + } + } + + if ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)); err == nil { + return ip + } + + return "" +} + +// ContentType returns the Content-Type header of the request. +func (c *Context) ContentType() string { + return filterFlags(c.requestHeader("Content-Type")) +} + +// IsWebsocket returns true if the request headers indicate that a websocket +// handshake is being initiated by the client. +func (c *Context) IsWebsocket() bool { + if strings.Contains(strings.ToLower(c.requestHeader("Connection")), "upgrade") && + strings.ToLower(c.requestHeader("Upgrade")) == "websocket" { + return true + } + return false +} + +func (c *Context) requestHeader(key string) string { + return c.Request.Header.Get(key) +} + +/************************************/ +/******** RESPONSE RENDERING ********/ +/************************************/ + +// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == http.StatusNoContent: + return false + case status == http.StatusNotModified: + return false + } + return true +} + +// Status sets the HTTP response code. +func (c *Context) Status(code int) { + c.writermem.WriteHeader(code) +} + +// Header is a intelligent shortcut for c.Writer.Header().Set(key, value). +// It writes a header in the response. +// If value == "", this method removes the header `c.Writer.Header().Del(key)` +func (c *Context) Header(key, value string) { + if value == "" { + c.Writer.Header().Del(key) + return + } + c.Writer.Header().Set(key, value) +} + +// GetHeader returns value from request headers. +func (c *Context) GetHeader(key string) string { + return c.requestHeader(key) +} + +// GetRawData return stream data. +func (c *Context) GetRawData() ([]byte, error) { + return ioutil.ReadAll(c.Request.Body) +} + +// SetCookie adds a Set-Cookie header to the ResponseWriter's headers. +// The provided cookie must have a valid Name. Invalid cookies may be +// silently dropped. +func (c *Context) SetCookie(name, value string, maxAge int, path, domain string, secure, httpOnly bool) { + if path == "" { + path = "/" + } + http.SetCookie(c.Writer, &http.Cookie{ + Name: name, + Value: url.QueryEscape(value), + MaxAge: maxAge, + Path: path, + Domain: domain, + Secure: secure, + HttpOnly: httpOnly, + }) +} + +// Cookie returns the named cookie provided in the request or +// ErrNoCookie if not found. And return the named cookie is unescaped. +// If multiple cookies match the given name, only one cookie will +// be returned. +func (c *Context) Cookie(name string) (string, error) { + cookie, err := c.Request.Cookie(name) + if err != nil { + return "", err + } + val, _ := url.QueryUnescape(cookie.Value) + return val, nil +} + +// Render writes the response headers and calls render.Render to render data. +func (c *Context) Render(code int, r render.Render) { + c.Status(code) + + if !bodyAllowedForStatus(code) { + r.WriteContentType(c.Writer) + c.Writer.WriteHeaderNow() + return + } + + if err := r.Render(c.Writer); err != nil { + panic(err) + } +} + +// HTML renders the HTTP template specified by its file name. +// It also updates the HTTP code and sets the Content-Type as "text/html". +// See http://golang.org/doc/articles/wiki/ +func (c *Context) HTML(code int, name string, obj interface{}) { + instance := c.engine.HTMLRender.Instance(name, obj) + c.Render(code, instance) +} + +// IndentedJSON serializes the given struct as pretty JSON (indented + endlines) into the response body. +// It also sets the Content-Type as "application/json". +// WARNING: we recommend to use this only for development purposes since printing pretty JSON is +// more CPU and bandwidth consuming. Use Context.JSON() instead. +func (c *Context) IndentedJSON(code int, obj interface{}) { + c.Render(code, render.IndentedJSON{Data: obj}) +} + +// SecureJSON serializes the given struct as Secure JSON into the response body. +// Default prepends "while(1)," to response body if the given struct is array values. +// It also sets the Content-Type as "application/json". +func (c *Context) SecureJSON(code int, obj interface{}) { + c.Render(code, render.SecureJSON{Prefix: c.engine.secureJsonPrefix, Data: obj}) +} + +// JSONP serializes the given struct as JSON into the response body. +// It add padding to response body to request data from a server residing in a different domain than the client. +// It also sets the Content-Type as "application/javascript". +func (c *Context) JSONP(code int, obj interface{}) { + callback := c.DefaultQuery("callback", "") + if callback == "" { + c.Render(code, render.JSON{Data: obj}) + return + } + c.Render(code, render.JsonpJSON{Callback: callback, Data: obj}) +} + +// JSON serializes the given struct as JSON into the response body. +// It also sets the Content-Type as "application/json". +func (c *Context) JSON(code int, obj interface{}) { + c.Render(code, render.JSON{Data: obj}) +} + +// AsciiJSON serializes the given struct as JSON into the response body with unicode to ASCII string. +// It also sets the Content-Type as "application/json". +func (c *Context) AsciiJSON(code int, obj interface{}) { + c.Render(code, render.AsciiJSON{Data: obj}) +} + +// PureJSON serializes the given struct as JSON into the response body. +// PureJSON, unlike JSON, does not replace special html characters with their unicode entities. +func (c *Context) PureJSON(code int, obj interface{}) { + c.Render(code, render.PureJSON{Data: obj}) +} + +// XML serializes the given struct as XML into the response body. +// It also sets the Content-Type as "application/xml". +func (c *Context) XML(code int, obj interface{}) { + c.Render(code, render.XML{Data: obj}) +} + +// YAML serializes the given struct as YAML into the response body. +func (c *Context) YAML(code int, obj interface{}) { + c.Render(code, render.YAML{Data: obj}) +} + +// ProtoBuf serializes the given struct as ProtoBuf into the response body. +func (c *Context) ProtoBuf(code int, obj interface{}) { + c.Render(code, render.ProtoBuf{Data: obj}) +} + +// String writes the given string into the response body. +func (c *Context) String(code int, format string, values ...interface{}) { + c.Render(code, render.String{Format: format, Data: values}) +} + +// Redirect returns a HTTP redirect to the specific location. +func (c *Context) Redirect(code int, location string) { + c.Render(-1, render.Redirect{ + Code: code, + Location: location, + Request: c.Request, + }) +} + +// Data writes some data into the body stream and updates the HTTP code. +func (c *Context) Data(code int, contentType string, data []byte) { + c.Render(code, render.Data{ + ContentType: contentType, + Data: data, + }) +} + +// DataFromReader writes the specified reader into the body stream and updates the HTTP code. +func (c *Context) DataFromReader(code int, contentLength int64, contentType string, reader io.Reader, extraHeaders map[string]string) { + c.Render(code, render.Reader{ + Headers: extraHeaders, + ContentType: contentType, + ContentLength: contentLength, + Reader: reader, + }) +} + +// File writes the specified file into the body stream in a efficient way. +func (c *Context) File(filepath string) { + http.ServeFile(c.Writer, c.Request, filepath) +} + +// FileAttachment writes the specified file into the body stream in an efficient way +// On the client side, the file will typically be downloaded with the given filename +func (c *Context) FileAttachment(filepath, filename string) { + c.Writer.Header().Set("content-disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename)) + http.ServeFile(c.Writer, c.Request, filepath) +} + +// SSEvent writes a Server-Sent Event into the body stream. +func (c *Context) SSEvent(name string, message interface{}) { + c.Render(-1, sse.Event{ + Event: name, + Data: message, + }) +} + +// Stream sends a streaming response and returns a boolean +// indicates "Is client disconnected in middle of stream" +func (c *Context) Stream(step func(w io.Writer) bool) bool { + w := c.Writer + clientGone := w.CloseNotify() + for { + select { + case <-clientGone: + return true + default: + keepOpen := step(w) + w.Flush() + if !keepOpen { + return false + } + } + } +} + +/************************************/ +/******** CONTENT NEGOTIATION *******/ +/************************************/ + +// Negotiate contains all negotiations data. +type Negotiate struct { + Offered []string + HTMLName string + HTMLData interface{} + JSONData interface{} + XMLData interface{} + Data interface{} +} + +// Negotiate calls different Render according acceptable Accept format. +func (c *Context) Negotiate(code int, config Negotiate) { + switch c.NegotiateFormat(config.Offered...) { + case binding.MIMEJSON: + data := chooseData(config.JSONData, config.Data) + c.JSON(code, data) + + case binding.MIMEHTML: + data := chooseData(config.HTMLData, config.Data) + c.HTML(code, config.HTMLName, data) + + case binding.MIMEXML: + data := chooseData(config.XMLData, config.Data) + c.XML(code, data) + + default: + c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) // nolint: errcheck + } +} + +// NegotiateFormat returns an acceptable Accept format. +func (c *Context) NegotiateFormat(offered ...string) string { + assert1(len(offered) > 0, "you must provide at least one offer") + + if c.Accepted == nil { + c.Accepted = parseAccept(c.requestHeader("Accept")) + } + if len(c.Accepted) == 0 { + return offered[0] + } + for _, accepted := range c.Accepted { + for _, offert := range offered { + // According to RFC 2616 and RFC 2396, non-ASCII characters are not allowed in headers, + // therefore we can just iterate over the string without casting it into []rune + i := 0 + for ; i < len(accepted); i++ { + if accepted[i] == '*' || offert[i] == '*' { + return offert + } + if accepted[i] != offert[i] { + break + } + } + if i == len(accepted) { + return offert + } + } + } + return "" +} + +// SetAccepted sets Accept header data. +func (c *Context) SetAccepted(formats ...string) { + c.Accepted = formats +} + +/************************************/ +/***** GOLANG.ORG/X/NET/CONTEXT *****/ +/************************************/ + +// Deadline returns the time when work done on behalf of this context +// should be canceled. Deadline returns ok==false when no deadline is +// set. Successive calls to Deadline return the same results. +func (c *Context) Deadline() (deadline time.Time, ok bool) { + return +} + +// Done returns a channel that's closed when work done on behalf of this +// context should be canceled. Done may return nil if this context can +// never be canceled. Successive calls to Done return the same value. +func (c *Context) Done() <-chan struct{} { + return nil +} + +// Err returns a non-nil error value after Done is closed, +// successive calls to Err return the same error. +// If Done is not yet closed, Err returns nil. +// If Done is closed, Err returns a non-nil error explaining why: +// Canceled if the context was canceled +// or DeadlineExceeded if the context's deadline passed. +func (c *Context) Err() error { + return nil +} + +// Value returns the value associated with this context for key, or nil +// if no value is associated with key. Successive calls to Value with +// the same key returns the same result. +func (c *Context) Value(key interface{}) interface{} { + if key == 0 { + return c.Request + } + if keyAsString, ok := key.(string); ok { + val, _ := c.Get(keyAsString) + return val + } + return nil +} diff --git a/backend/vendor/github.com/gin-gonic/gin/context_appengine.go b/backend/vendor/github.com/gin-gonic/gin/context_appengine.go new file mode 100644 index 00000000..38c189a0 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/context_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +func init() { + defaultAppEngine = true +} diff --git a/backend/vendor/github.com/gin-gonic/gin/debug.go b/backend/vendor/github.com/gin-gonic/gin/debug.go new file mode 100644 index 00000000..6d40a5da --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/debug.go @@ -0,0 +1,103 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "fmt" + "html/template" + "os" + "runtime" + "strconv" + "strings" +) + +const ginSupportMinGoVer = 8 + +// IsDebugging returns true if the framework is running in debug mode. +// Use SetMode(gin.ReleaseMode) to disable debug mode. +func IsDebugging() bool { + return ginMode == debugCode +} + +// DebugPrintRouteFunc indicates debug log output format. +var DebugPrintRouteFunc func(httpMethod, absolutePath, handlerName string, nuHandlers int) + +func debugPrintRoute(httpMethod, absolutePath string, handlers HandlersChain) { + if IsDebugging() { + nuHandlers := len(handlers) + handlerName := nameOfFunction(handlers.Last()) + if DebugPrintRouteFunc == nil { + debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers) + } else { + DebugPrintRouteFunc(httpMethod, absolutePath, handlerName, nuHandlers) + } + } +} + +func debugPrintLoadTemplate(tmpl *template.Template) { + if IsDebugging() { + var buf bytes.Buffer + for _, tmpl := range tmpl.Templates() { + buf.WriteString("\t- ") + buf.WriteString(tmpl.Name()) + buf.WriteString("\n") + } + debugPrint("Loaded HTML Templates (%d): \n%s\n", len(tmpl.Templates()), buf.String()) + } +} + +func debugPrint(format string, values ...interface{}) { + if IsDebugging() { + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + fmt.Fprintf(os.Stderr, "[GIN-debug] "+format, values...) + } +} + +func getMinVer(v string) (uint64, error) { + first := strings.IndexByte(v, '.') + last := strings.LastIndexByte(v, '.') + if first == last { + return strconv.ParseUint(v[first+1:], 10, 64) + } + return strconv.ParseUint(v[first+1:last], 10, 64) +} + +func debugPrintWARNINGDefault() { + if v, e := getMinVer(runtime.Version()); e == nil && v <= ginSupportMinGoVer { + debugPrint(`[WARNING] Now Gin requires Go 1.8 or later and Go 1.9 will be required soon. + +`) + } + debugPrint(`[WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. + +`) +} + +func debugPrintWARNINGNew() { + debugPrint(`[WARNING] Running in "debug" mode. Switch to "release" mode in production. + - using env: export GIN_MODE=release + - using code: gin.SetMode(gin.ReleaseMode) + +`) +} + +func debugPrintWARNINGSetHTMLTemplate() { + debugPrint(`[WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called +at initialization. ie. before any route is registered or the router is listening in a socket: + + router := gin.Default() + router.SetHTMLTemplate(template) // << good place + +`) +} + +func debugPrintError(err error) { + if err != nil { + debugPrint("[ERROR] %v\n", err) + } +} diff --git a/backend/vendor/github.com/gin-gonic/gin/deprecated.go b/backend/vendor/github.com/gin-gonic/gin/deprecated.go new file mode 100644 index 00000000..ab447429 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/deprecated.go @@ -0,0 +1,21 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "log" + + "github.com/gin-gonic/gin/binding" +) + +// BindWith binds the passed struct pointer using the specified binding engine. +// See the binding package. +func (c *Context) BindWith(obj interface{}, b binding.Binding) error { + log.Println(`BindWith(\"interface{}, binding.Binding\") error is going to + be deprecated, please check issue #662 and either use MustBindWith() if you + want HTTP 400 to be automatically returned if any error occur, or use + ShouldBindWith() if you need to manage the error.`) + return c.MustBindWith(obj, b) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/doc.go b/backend/vendor/github.com/gin-gonic/gin/doc.go new file mode 100644 index 00000000..1bd03864 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/doc.go @@ -0,0 +1,6 @@ +/* +Package gin implements a HTTP web framework called gin. + +See https://gin-gonic.com/ for more information about gin. +*/ +package gin // import "github.com/gin-gonic/gin" diff --git a/backend/vendor/github.com/gin-gonic/gin/errors.go b/backend/vendor/github.com/gin-gonic/gin/errors.go new file mode 100644 index 00000000..6070ff55 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/errors.go @@ -0,0 +1,169 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "fmt" + "reflect" + + "github.com/gin-gonic/gin/internal/json" +) + +// ErrorType is an unsigned 64-bit error code as defined in the gin spec. +type ErrorType uint64 + +const ( + // ErrorTypeBind is used when Context.Bind() fails. + ErrorTypeBind ErrorType = 1 << 63 + // ErrorTypeRender is used when Context.Render() fails. + ErrorTypeRender ErrorType = 1 << 62 + // ErrorTypePrivate indicates a private error. + ErrorTypePrivate ErrorType = 1 << 0 + // ErrorTypePublic indicates a public error. + ErrorTypePublic ErrorType = 1 << 1 + // ErrorTypeAny indicates any other error. + ErrorTypeAny ErrorType = 1<<64 - 1 + // ErrorTypeNu indicates any other error. + ErrorTypeNu = 2 +) + +// Error represents a error's specification. +type Error struct { + Err error + Type ErrorType + Meta interface{} +} + +type errorMsgs []*Error + +var _ error = &Error{} + +// SetType sets the error's type. +func (msg *Error) SetType(flags ErrorType) *Error { + msg.Type = flags + return msg +} + +// SetMeta sets the error's meta data. +func (msg *Error) SetMeta(data interface{}) *Error { + msg.Meta = data + return msg +} + +// JSON creates a properly formatted JSON +func (msg *Error) JSON() interface{} { + json := H{} + if msg.Meta != nil { + value := reflect.ValueOf(msg.Meta) + switch value.Kind() { + case reflect.Struct: + return msg.Meta + case reflect.Map: + for _, key := range value.MapKeys() { + json[key.String()] = value.MapIndex(key).Interface() + } + default: + json["meta"] = msg.Meta + } + } + if _, ok := json["error"]; !ok { + json["error"] = msg.Error() + } + return json +} + +// MarshalJSON implements the json.Marshaller interface. +func (msg *Error) MarshalJSON() ([]byte, error) { + return json.Marshal(msg.JSON()) +} + +// Error implements the error interface. +func (msg Error) Error() string { + return msg.Err.Error() +} + +// IsType judges one error. +func (msg *Error) IsType(flags ErrorType) bool { + return (msg.Type & flags) > 0 +} + +// ByType returns a readonly copy filtered the byte. +// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic. +func (a errorMsgs) ByType(typ ErrorType) errorMsgs { + if len(a) == 0 { + return nil + } + if typ == ErrorTypeAny { + return a + } + var result errorMsgs + for _, msg := range a { + if msg.IsType(typ) { + result = append(result, msg) + } + } + return result +} + +// Last returns the last error in the slice. It returns nil if the array is empty. +// Shortcut for errors[len(errors)-1]. +func (a errorMsgs) Last() *Error { + if length := len(a); length > 0 { + return a[length-1] + } + return nil +} + +// Errors returns an array will all the error messages. +// Example: +// c.Error(errors.New("first")) +// c.Error(errors.New("second")) +// c.Error(errors.New("third")) +// c.Errors.Errors() // == []string{"first", "second", "third"} +func (a errorMsgs) Errors() []string { + if len(a) == 0 { + return nil + } + errorStrings := make([]string, len(a)) + for i, err := range a { + errorStrings[i] = err.Error() + } + return errorStrings +} + +func (a errorMsgs) JSON() interface{} { + switch len(a) { + case 0: + return nil + case 1: + return a.Last().JSON() + default: + json := make([]interface{}, len(a)) + for i, err := range a { + json[i] = err.JSON() + } + return json + } +} + +// MarshalJSON implements the json.Marshaller interface. +func (a errorMsgs) MarshalJSON() ([]byte, error) { + return json.Marshal(a.JSON()) +} + +func (a errorMsgs) String() string { + if len(a) == 0 { + return "" + } + var buffer bytes.Buffer + for i, msg := range a { + fmt.Fprintf(&buffer, "Error #%02d: %s\n", i+1, msg.Err) + if msg.Meta != nil { + fmt.Fprintf(&buffer, " Meta: %v\n", msg.Meta) + } + } + return buffer.String() +} diff --git a/backend/vendor/github.com/gin-gonic/gin/fs.go b/backend/vendor/github.com/gin-gonic/gin/fs.go new file mode 100644 index 00000000..7a6738a6 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/fs.go @@ -0,0 +1,45 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "net/http" + "os" +) + +type onlyfilesFS struct { + fs http.FileSystem +} + +type neuteredReaddirFile struct { + http.File +} + +// Dir returns a http.Filesystem that can be used by http.FileServer(). It is used internally +// in router.Static(). +// if listDirectory == true, then it works the same as http.Dir() otherwise it returns +// a filesystem that prevents http.FileServer() to list the directory files. +func Dir(root string, listDirectory bool) http.FileSystem { + fs := http.Dir(root) + if listDirectory { + return fs + } + return &onlyfilesFS{fs} +} + +// Open conforms to http.Filesystem. +func (fs onlyfilesFS) Open(name string) (http.File, error) { + f, err := fs.fs.Open(name) + if err != nil { + return nil, err + } + return neuteredReaddirFile{f}, nil +} + +// Readdir overrides the http.File default implementation. +func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) { + // this disables directory listing + return nil, nil +} diff --git a/backend/vendor/github.com/gin-gonic/gin/gin.go b/backend/vendor/github.com/gin-gonic/gin/gin.go new file mode 100644 index 00000000..4dbe9836 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/gin.go @@ -0,0 +1,477 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "fmt" + "html/template" + "net" + "net/http" + "os" + "path" + "sync" + + "github.com/gin-gonic/gin/render" +) + +const defaultMultipartMemory = 32 << 20 // 32 MB + +var ( + default404Body = []byte("404 page not found") + default405Body = []byte("405 method not allowed") + defaultAppEngine bool +) + +// HandlerFunc defines the handler used by gin middleware as return value. +type HandlerFunc func(*Context) + +// HandlersChain defines a HandlerFunc array. +type HandlersChain []HandlerFunc + +// Last returns the last handler in the chain. ie. the last handler is the main own. +func (c HandlersChain) Last() HandlerFunc { + if length := len(c); length > 0 { + return c[length-1] + } + return nil +} + +// RouteInfo represents a request route's specification which contains method and path and its handler. +type RouteInfo struct { + Method string + Path string + Handler string + HandlerFunc HandlerFunc +} + +// RoutesInfo defines a RouteInfo array. +type RoutesInfo []RouteInfo + +// Engine is the framework's instance, it contains the muxer, middleware and configuration settings. +// Create an instance of Engine, by using New() or Default() +type Engine struct { + RouterGroup + + // Enables automatic redirection if the current route can't be matched but a + // handler for the path with (without) the trailing slash exists. + // For example if /foo/ is requested but a route only exists for /foo, the + // client is redirected to /foo with http status code 301 for GET requests + // and 307 for all other request methods. + RedirectTrailingSlash bool + + // If enabled, the router tries to fix the current request path, if no + // handle is registered for it. + // First superfluous path elements like ../ or // are removed. + // Afterwards the router does a case-insensitive lookup of the cleaned path. + // If a handle can be found for this route, the router makes a redirection + // to the corrected path with status code 301 for GET requests and 307 for + // all other request methods. + // For example /FOO and /..//Foo could be redirected to /foo. + // RedirectTrailingSlash is independent of this option. + RedirectFixedPath bool + + // If enabled, the router checks if another method is allowed for the + // current route, if the current request can not be routed. + // If this is the case, the request is answered with 'Method Not Allowed' + // and HTTP status code 405. + // If no other Method is allowed, the request is delegated to the NotFound + // handler. + HandleMethodNotAllowed bool + ForwardedByClientIP bool + + // #726 #755 If enabled, it will thrust some headers starting with + // 'X-AppEngine...' for better integration with that PaaS. + AppEngine bool + + // If enabled, the url.RawPath will be used to find parameters. + UseRawPath bool + + // If true, the path value will be unescaped. + // If UseRawPath is false (by default), the UnescapePathValues effectively is true, + // as url.Path gonna be used, which is already unescaped. + UnescapePathValues bool + + // Value of 'maxMemory' param that is given to http.Request's ParseMultipartForm + // method call. + MaxMultipartMemory int64 + + delims render.Delims + secureJsonPrefix string + HTMLRender render.HTMLRender + FuncMap template.FuncMap + allNoRoute HandlersChain + allNoMethod HandlersChain + noRoute HandlersChain + noMethod HandlersChain + pool sync.Pool + trees methodTrees +} + +var _ IRouter = &Engine{} + +// New returns a new blank Engine instance without any middleware attached. +// By default the configuration is: +// - RedirectTrailingSlash: true +// - RedirectFixedPath: false +// - HandleMethodNotAllowed: false +// - ForwardedByClientIP: true +// - UseRawPath: false +// - UnescapePathValues: true +func New() *Engine { + debugPrintWARNINGNew() + engine := &Engine{ + RouterGroup: RouterGroup{ + Handlers: nil, + basePath: "/", + root: true, + }, + FuncMap: template.FuncMap{}, + RedirectTrailingSlash: true, + RedirectFixedPath: false, + HandleMethodNotAllowed: false, + ForwardedByClientIP: true, + AppEngine: defaultAppEngine, + UseRawPath: false, + UnescapePathValues: true, + MaxMultipartMemory: defaultMultipartMemory, + trees: make(methodTrees, 0, 9), + delims: render.Delims{Left: "{{", Right: "}}"}, + secureJsonPrefix: "while(1);", + } + engine.RouterGroup.engine = engine + engine.pool.New = func() interface{} { + return engine.allocateContext() + } + return engine +} + +// Default returns an Engine instance with the Logger and Recovery middleware already attached. +func Default() *Engine { + debugPrintWARNINGDefault() + engine := New() + engine.Use(Logger(), Recovery()) + return engine +} + +func (engine *Engine) allocateContext() *Context { + return &Context{engine: engine} +} + +// Delims sets template left and right delims and returns a Engine instance. +func (engine *Engine) Delims(left, right string) *Engine { + engine.delims = render.Delims{Left: left, Right: right} + return engine +} + +// SecureJsonPrefix sets the secureJsonPrefix used in Context.SecureJSON. +func (engine *Engine) SecureJsonPrefix(prefix string) *Engine { + engine.secureJsonPrefix = prefix + return engine +} + +// LoadHTMLGlob loads HTML files identified by glob pattern +// and associates the result with HTML renderer. +func (engine *Engine) LoadHTMLGlob(pattern string) { + left := engine.delims.Left + right := engine.delims.Right + templ := template.Must(template.New("").Delims(left, right).Funcs(engine.FuncMap).ParseGlob(pattern)) + + if IsDebugging() { + debugPrintLoadTemplate(templ) + engine.HTMLRender = render.HTMLDebug{Glob: pattern, FuncMap: engine.FuncMap, Delims: engine.delims} + return + } + + engine.SetHTMLTemplate(templ) +} + +// LoadHTMLFiles loads a slice of HTML files +// and associates the result with HTML renderer. +func (engine *Engine) LoadHTMLFiles(files ...string) { + if IsDebugging() { + engine.HTMLRender = render.HTMLDebug{Files: files, FuncMap: engine.FuncMap, Delims: engine.delims} + return + } + + templ := template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseFiles(files...)) + engine.SetHTMLTemplate(templ) +} + +// SetHTMLTemplate associate a template with HTML renderer. +func (engine *Engine) SetHTMLTemplate(templ *template.Template) { + if len(engine.trees) > 0 { + debugPrintWARNINGSetHTMLTemplate() + } + + engine.HTMLRender = render.HTMLProduction{Template: templ.Funcs(engine.FuncMap)} +} + +// SetFuncMap sets the FuncMap used for template.FuncMap. +func (engine *Engine) SetFuncMap(funcMap template.FuncMap) { + engine.FuncMap = funcMap +} + +// NoRoute adds handlers for NoRoute. It return a 404 code by default. +func (engine *Engine) NoRoute(handlers ...HandlerFunc) { + engine.noRoute = handlers + engine.rebuild404Handlers() +} + +// NoMethod sets the handlers called when... TODO. +func (engine *Engine) NoMethod(handlers ...HandlerFunc) { + engine.noMethod = handlers + engine.rebuild405Handlers() +} + +// Use attaches a global middleware to the router. ie. the middleware attached though Use() will be +// included in the handlers chain for every single request. Even 404, 405, static files... +// For example, this is the right place for a logger or error management middleware. +func (engine *Engine) Use(middleware ...HandlerFunc) IRoutes { + engine.RouterGroup.Use(middleware...) + engine.rebuild404Handlers() + engine.rebuild405Handlers() + return engine +} + +func (engine *Engine) rebuild404Handlers() { + engine.allNoRoute = engine.combineHandlers(engine.noRoute) +} + +func (engine *Engine) rebuild405Handlers() { + engine.allNoMethod = engine.combineHandlers(engine.noMethod) +} + +func (engine *Engine) addRoute(method, path string, handlers HandlersChain) { + assert1(path[0] == '/', "path must begin with '/'") + assert1(method != "", "HTTP method can not be empty") + assert1(len(handlers) > 0, "there must be at least one handler") + + debugPrintRoute(method, path, handlers) + root := engine.trees.get(method) + if root == nil { + root = new(node) + engine.trees = append(engine.trees, methodTree{method: method, root: root}) + } + root.addRoute(path, handlers) +} + +// Routes returns a slice of registered routes, including some useful information, such as: +// the http method, path and the handler name. +func (engine *Engine) Routes() (routes RoutesInfo) { + for _, tree := range engine.trees { + routes = iterate("", tree.method, routes, tree.root) + } + return routes +} + +func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo { + path += root.path + if len(root.handlers) > 0 { + handlerFunc := root.handlers.Last() + routes = append(routes, RouteInfo{ + Method: method, + Path: path, + Handler: nameOfFunction(handlerFunc), + HandlerFunc: handlerFunc, + }) + } + for _, child := range root.children { + routes = iterate(path, method, routes, child) + } + return routes +} + +// Run attaches the router to a http.Server and starts listening and serving HTTP requests. +// It is a shortcut for http.ListenAndServe(addr, router) +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) Run(addr ...string) (err error) { + defer func() { debugPrintError(err) }() + + address := resolveAddress(addr) + debugPrint("Listening and serving HTTP on %s\n", address) + err = http.ListenAndServe(address, engine) + return +} + +// RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests. +// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router) +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) RunTLS(addr, certFile, keyFile string) (err error) { + debugPrint("Listening and serving HTTPS on %s\n", addr) + defer func() { debugPrintError(err) }() + + err = http.ListenAndServeTLS(addr, certFile, keyFile, engine) + return +} + +// RunUnix attaches the router to a http.Server and starts listening and serving HTTP requests +// through the specified unix socket (ie. a file). +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) RunUnix(file string) (err error) { + debugPrint("Listening and serving HTTP on unix:/%s", file) + defer func() { debugPrintError(err) }() + + os.Remove(file) + listener, err := net.Listen("unix", file) + if err != nil { + return + } + defer listener.Close() + os.Chmod(file, 0777) + err = http.Serve(listener, engine) + return +} + +// RunFd attaches the router to a http.Server and starts listening and serving HTTP requests +// through the specified file descriptor. +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) RunFd(fd int) (err error) { + debugPrint("Listening and serving HTTP on fd@%d", fd) + defer func() { debugPrintError(err) }() + + f := os.NewFile(uintptr(fd), fmt.Sprintf("fd@%d", fd)) + listener, err := net.FileListener(f) + if err != nil { + return + } + defer listener.Close() + err = http.Serve(listener, engine) + return +} + +// ServeHTTP conforms to the http.Handler interface. +func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) { + c := engine.pool.Get().(*Context) + c.writermem.reset(w) + c.Request = req + c.reset() + + engine.handleHTTPRequest(c) + + engine.pool.Put(c) +} + +// HandleContext re-enter a context that has been rewritten. +// This can be done by setting c.Request.URL.Path to your new target. +// Disclaimer: You can loop yourself to death with this, use wisely. +func (engine *Engine) HandleContext(c *Context) { + oldIndexValue := c.index + c.reset() + engine.handleHTTPRequest(c) + + c.index = oldIndexValue +} + +func (engine *Engine) handleHTTPRequest(c *Context) { + httpMethod := c.Request.Method + rPath := c.Request.URL.Path + unescape := false + if engine.UseRawPath && len(c.Request.URL.RawPath) > 0 { + rPath = c.Request.URL.RawPath + unescape = engine.UnescapePathValues + } + rPath = cleanPath(rPath) + + // Find root of the tree for the given HTTP method + t := engine.trees + for i, tl := 0, len(t); i < tl; i++ { + if t[i].method != httpMethod { + continue + } + root := t[i].root + // Find route in tree + handlers, params, tsr := root.getValue(rPath, c.Params, unescape) + if handlers != nil { + c.handlers = handlers + c.Params = params + c.Next() + c.writermem.WriteHeaderNow() + return + } + if httpMethod != "CONNECT" && rPath != "/" { + if tsr && engine.RedirectTrailingSlash { + redirectTrailingSlash(c) + return + } + if engine.RedirectFixedPath && redirectFixedPath(c, root, engine.RedirectFixedPath) { + return + } + } + break + } + + if engine.HandleMethodNotAllowed { + for _, tree := range engine.trees { + if tree.method == httpMethod { + continue + } + if handlers, _, _ := tree.root.getValue(rPath, nil, unescape); handlers != nil { + c.handlers = engine.allNoMethod + serveError(c, http.StatusMethodNotAllowed, default405Body) + return + } + } + } + c.handlers = engine.allNoRoute + serveError(c, http.StatusNotFound, default404Body) +} + +var mimePlain = []string{MIMEPlain} + +func serveError(c *Context, code int, defaultMessage []byte) { + c.writermem.status = code + c.Next() + if c.writermem.Written() { + return + } + if c.writermem.Status() == code { + c.writermem.Header()["Content-Type"] = mimePlain + _, err := c.Writer.Write(defaultMessage) + if err != nil { + debugPrint("cannot write message to writer during serve error: %v", err) + } + return + } + c.writermem.WriteHeaderNow() + return +} + +func redirectTrailingSlash(c *Context) { + req := c.Request + p := req.URL.Path + if prefix := path.Clean(c.Request.Header.Get("X-Forwarded-Prefix")); prefix != "." { + p = prefix + "/" + req.URL.Path + } + code := http.StatusMovedPermanently // Permanent redirect, request with GET method + if req.Method != "GET" { + code = http.StatusTemporaryRedirect + } + + req.URL.Path = p + "/" + if length := len(p); length > 1 && p[length-1] == '/' { + req.URL.Path = p[:length-1] + } + debugPrint("redirecting request %d: %s --> %s", code, p, req.URL.String()) + http.Redirect(c.Writer, req, req.URL.String(), code) + c.writermem.WriteHeaderNow() +} + +func redirectFixedPath(c *Context, root *node, trailingSlash bool) bool { + req := c.Request + rPath := req.URL.Path + + if fixedPath, ok := root.findCaseInsensitivePath(cleanPath(rPath), trailingSlash); ok { + code := http.StatusMovedPermanently // Permanent redirect, request with GET method + if req.Method != "GET" { + code = http.StatusTemporaryRedirect + } + req.URL.Path = string(fixedPath) + debugPrint("redirecting request %d: %s --> %s", code, rPath, req.URL.String()) + http.Redirect(c.Writer, req, req.URL.String(), code) + c.writermem.WriteHeaderNow() + return true + } + return false +} diff --git a/backend/vendor/github.com/gin-gonic/gin/go.mod b/backend/vendor/github.com/gin-gonic/gin/go.mod new file mode 100644 index 00000000..1c5e995c --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/go.mod @@ -0,0 +1,18 @@ +module github.com/gin-gonic/gin + +go 1.12 + +require ( + github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 + github.com/golang/protobuf v1.3.1 + github.com/json-iterator/go v1.1.6 + github.com/mattn/go-isatty v0.0.7 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/stretchr/testify v1.3.0 + github.com/ugorji/go v1.1.4 + golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c + gopkg.in/go-playground/assert.v1 v1.2.1 // indirect + gopkg.in/go-playground/validator.v8 v8.18.2 + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/backend/vendor/github.com/gin-gonic/gin/go.sum b/backend/vendor/github.com/gin-gonic/gin/go.sum new file mode 100644 index 00000000..58104682 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/go.sum @@ -0,0 +1,36 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/backend/vendor/github.com/gin-gonic/gin/internal/json/json.go b/backend/vendor/github.com/gin-gonic/gin/internal/json/json.go new file mode 100644 index 00000000..480e8bff --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/internal/json/json.go @@ -0,0 +1,22 @@ +// Copyright 2017 Bo-Yi Wu. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +// +build !jsoniter + +package json + +import "encoding/json" + +var ( + // Marshal is exported by gin/json package. + Marshal = json.Marshal + // Unmarshal is exported by gin/json package. + Unmarshal = json.Unmarshal + // MarshalIndent is exported by gin/json package. + MarshalIndent = json.MarshalIndent + // NewDecoder is exported by gin/json package. + NewDecoder = json.NewDecoder + // NewEncoder is exported by gin/json package. + NewEncoder = json.NewEncoder +) diff --git a/backend/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go b/backend/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go new file mode 100644 index 00000000..fabd7b84 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go @@ -0,0 +1,23 @@ +// Copyright 2017 Bo-Yi Wu. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +// +build jsoniter + +package json + +import "github.com/json-iterator/go" + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + // Marshal is exported by gin/json package. + Marshal = json.Marshal + // Unmarshal is exported by gin/json package. + Unmarshal = json.Unmarshal + // MarshalIndent is exported by gin/json package. + MarshalIndent = json.MarshalIndent + // NewDecoder is exported by gin/json package. + NewDecoder = json.NewDecoder + // NewEncoder is exported by gin/json package. + NewEncoder = json.NewEncoder +) diff --git a/backend/vendor/github.com/gin-gonic/gin/logger.go b/backend/vendor/github.com/gin-gonic/gin/logger.go new file mode 100644 index 00000000..5ab4639e --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/logger.go @@ -0,0 +1,270 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/mattn/go-isatty" +) + +type consoleColorModeValue int + +const ( + autoColor consoleColorModeValue = iota + disableColor + forceColor +) + +var ( + green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109}) + white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109}) + yellow = string([]byte{27, 91, 57, 48, 59, 52, 51, 109}) + red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109}) + blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109}) + magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109}) + cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109}) + reset = string([]byte{27, 91, 48, 109}) + consoleColorMode = autoColor +) + +// LoggerConfig defines the config for Logger middleware. +type LoggerConfig struct { + // Optional. Default value is gin.defaultLogFormatter + Formatter LogFormatter + + // Output is a writer where logs are written. + // Optional. Default value is gin.DefaultWriter. + Output io.Writer + + // SkipPaths is a url path array which logs are not written. + // Optional. + SkipPaths []string +} + +// LogFormatter gives the signature of the formatter function passed to LoggerWithFormatter +type LogFormatter func(params LogFormatterParams) string + +// LogFormatterParams is the structure any formatter will be handed when time to log comes +type LogFormatterParams struct { + Request *http.Request + + // TimeStamp shows the time after the server returns a response. + TimeStamp time.Time + // StatusCode is HTTP response code. + StatusCode int + // Latency is how much time the server cost to process a certain request. + Latency time.Duration + // ClientIP equals Context's ClientIP method. + ClientIP string + // Method is the HTTP method given to the request. + Method string + // Path is a path the client requests. + Path string + // ErrorMessage is set if error has occurred in processing the request. + ErrorMessage string + // isTerm shows whether does gin's output descriptor refers to a terminal. + isTerm bool + // BodySize is the size of the Response Body + BodySize int + // Keys are the keys set on the request's context. + Keys map[string]interface{} +} + +// StatusCodeColor is the ANSI color for appropriately logging http status code to a terminal. +func (p *LogFormatterParams) StatusCodeColor() string { + code := p.StatusCode + + switch { + case code >= http.StatusOK && code < http.StatusMultipleChoices: + return green + case code >= http.StatusMultipleChoices && code < http.StatusBadRequest: + return white + case code >= http.StatusBadRequest && code < http.StatusInternalServerError: + return yellow + default: + return red + } +} + +// MethodColor is the ANSI color for appropriately logging http method to a terminal. +func (p *LogFormatterParams) MethodColor() string { + method := p.Method + + switch method { + case "GET": + return blue + case "POST": + return cyan + case "PUT": + return yellow + case "DELETE": + return red + case "PATCH": + return green + case "HEAD": + return magenta + case "OPTIONS": + return white + default: + return reset + } +} + +// ResetColor resets all escape attributes. +func (p *LogFormatterParams) ResetColor() string { + return reset +} + +// IsOutputColor indicates whether can colors be outputted to the log. +func (p *LogFormatterParams) IsOutputColor() bool { + return consoleColorMode == forceColor || (consoleColorMode == autoColor && p.isTerm) +} + +// defaultLogFormatter is the default log format function Logger middleware uses. +var defaultLogFormatter = func(param LogFormatterParams) string { + var statusColor, methodColor, resetColor string + if param.IsOutputColor() { + statusColor = param.StatusCodeColor() + methodColor = param.MethodColor() + resetColor = param.ResetColor() + } + + if param.Latency > time.Minute { + // Truncate in a golang < 1.8 safe way + param.Latency = param.Latency - param.Latency%time.Second + } + return fmt.Sprintf("[GIN] %v |%s %3d %s| %13v | %15s |%s %-7s %s %s\n%s", + param.TimeStamp.Format("2006/01/02 - 15:04:05"), + statusColor, param.StatusCode, resetColor, + param.Latency, + param.ClientIP, + methodColor, param.Method, resetColor, + param.Path, + param.ErrorMessage, + ) +} + +// DisableConsoleColor disables color output in the console. +func DisableConsoleColor() { + consoleColorMode = disableColor +} + +// ForceConsoleColor force color output in the console. +func ForceConsoleColor() { + consoleColorMode = forceColor +} + +// ErrorLogger returns a handlerfunc for any error type. +func ErrorLogger() HandlerFunc { + return ErrorLoggerT(ErrorTypeAny) +} + +// ErrorLoggerT returns a handlerfunc for a given error type. +func ErrorLoggerT(typ ErrorType) HandlerFunc { + return func(c *Context) { + c.Next() + errors := c.Errors.ByType(typ) + if len(errors) > 0 { + c.JSON(-1, errors) + } + } +} + +// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter. +// By default gin.DefaultWriter = os.Stdout. +func Logger() HandlerFunc { + return LoggerWithConfig(LoggerConfig{}) +} + +// LoggerWithFormatter instance a Logger middleware with the specified log format function. +func LoggerWithFormatter(f LogFormatter) HandlerFunc { + return LoggerWithConfig(LoggerConfig{ + Formatter: f, + }) +} + +// LoggerWithWriter instance a Logger middleware with the specified writer buffer. +// Example: os.Stdout, a file opened in write mode, a socket... +func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc { + return LoggerWithConfig(LoggerConfig{ + Output: out, + SkipPaths: notlogged, + }) +} + +// LoggerWithConfig instance a Logger middleware with config. +func LoggerWithConfig(conf LoggerConfig) HandlerFunc { + formatter := conf.Formatter + if formatter == nil { + formatter = defaultLogFormatter + } + + out := conf.Output + if out == nil { + out = DefaultWriter + } + + notlogged := conf.SkipPaths + + isTerm := true + + if w, ok := out.(*os.File); !ok || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd())) { + isTerm = false + } + + var skip map[string]struct{} + + if length := len(notlogged); length > 0 { + skip = make(map[string]struct{}, length) + + for _, path := range notlogged { + skip[path] = struct{}{} + } + } + + return func(c *Context) { + // Start timer + start := time.Now() + path := c.Request.URL.Path + raw := c.Request.URL.RawQuery + + // Process request + c.Next() + + // Log only when path is not being skipped + if _, ok := skip[path]; !ok { + param := LogFormatterParams{ + Request: c.Request, + isTerm: isTerm, + Keys: c.Keys, + } + + // Stop timer + param.TimeStamp = time.Now() + param.Latency = param.TimeStamp.Sub(start) + + param.ClientIP = c.ClientIP() + param.Method = c.Request.Method + param.StatusCode = c.Writer.Status() + param.ErrorMessage = c.Errors.ByType(ErrorTypePrivate).String() + + param.BodySize = c.Writer.Size() + + if raw != "" { + path = path + "?" + raw + } + + param.Path = path + + fmt.Fprint(out, formatter(param)) + } + } +} diff --git a/backend/vendor/github.com/gin-gonic/gin/mode.go b/backend/vendor/github.com/gin-gonic/gin/mode.go new file mode 100644 index 00000000..8aa84aa8 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/mode.go @@ -0,0 +1,83 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "io" + "os" + + "github.com/gin-gonic/gin/binding" +) + +// EnvGinMode indicates environment name for gin mode. +const EnvGinMode = "GIN_MODE" + +const ( + // DebugMode indicates gin mode is debug. + DebugMode = "debug" + // ReleaseMode indicates gin mode is release. + ReleaseMode = "release" + // TestMode indicates gin mode is test. + TestMode = "test" +) +const ( + debugCode = iota + releaseCode + testCode +) + +// DefaultWriter is the default io.Writer used by Gin for debug output and +// middleware output like Logger() or Recovery(). +// Note that both Logger and Recovery provides custom ways to configure their +// output io.Writer. +// To support coloring in Windows use: +// import "github.com/mattn/go-colorable" +// gin.DefaultWriter = colorable.NewColorableStdout() +var DefaultWriter io.Writer = os.Stdout + +// DefaultErrorWriter is the default io.Writer used by Gin to debug errors +var DefaultErrorWriter io.Writer = os.Stderr + +var ginMode = debugCode +var modeName = DebugMode + +func init() { + mode := os.Getenv(EnvGinMode) + SetMode(mode) +} + +// SetMode sets gin mode according to input string. +func SetMode(value string) { + switch value { + case DebugMode, "": + ginMode = debugCode + case ReleaseMode: + ginMode = releaseCode + case TestMode: + ginMode = testCode + default: + panic("gin mode unknown: " + value) + } + if value == "" { + value = DebugMode + } + modeName = value +} + +// DisableBindValidation closes the default validator. +func DisableBindValidation() { + binding.Validator = nil +} + +// EnableJsonDecoderUseNumber sets true for binding.EnableDecoderUseNumberto to +// call the UseNumber method on the JSON Decoder instance. +func EnableJsonDecoderUseNumber() { + binding.EnableDecoderUseNumber = true +} + +// Mode returns currently gin mode. +func Mode() string { + return modeName +} diff --git a/backend/vendor/github.com/gin-gonic/gin/path.go b/backend/vendor/github.com/gin-gonic/gin/path.go new file mode 100644 index 00000000..d1f59622 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/path.go @@ -0,0 +1,123 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE. + +package gin + +// cleanPath is the URL version of path.Clean, it returns a canonical URL path +// for p, eliminating . and .. elements. +// +// The following rules are applied iteratively until no further processing can +// be done: +// 1. Replace multiple slashes with a single slash. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. +// +// If the result of this process is an empty string, "/" is returned. +func cleanPath(p string) string { + // Turn empty string into "/" + if p == "" { + return "/" + } + + n := len(p) + var buf []byte + + // Invariants: + // reading from path; r is index of next byte to process. + // writing to buf; w is index of next byte to write. + + // path must start with '/' + r := 1 + w := 1 + + if p[0] != '/' { + r = 0 + buf = make([]byte, n+1) + buf[0] = '/' + } + + trailing := n > 1 && p[n-1] == '/' + + // A bit more clunky without a 'lazybuf' like the path package, but the loop + // gets completely inlined (bufApp). So in contrast to the path package this + // loop has no expensive function calls (except 1x make) + + for r < n { + switch { + case p[r] == '/': + // empty path element, trailing slash is added after the end + r++ + + case p[r] == '.' && r+1 == n: + trailing = true + r++ + + case p[r] == '.' && p[r+1] == '/': + // . element + r += 2 + + case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): + // .. element: remove to last / + r += 3 + + if w > 1 { + // can backtrack + w-- + + if buf == nil { + for w > 1 && p[w] != '/' { + w-- + } + } else { + for w > 1 && buf[w] != '/' { + w-- + } + } + } + + default: + // real path element. + // add slash if needed + if w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + // copy element + for r < n && p[r] != '/' { + bufApp(&buf, p, w, p[r]) + w++ + r++ + } + } + } + + // re-append trailing slash + if trailing && w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + if buf == nil { + return p[:w] + } + return string(buf[:w]) +} + +// internal helper to lazily create a buffer if necessary. +func bufApp(buf *[]byte, s string, w int, c byte) { + if *buf == nil { + if s[w] == c { + return + } + + *buf = make([]byte, len(s)) + copy(*buf, s[:w]) + } + (*buf)[w] = c +} diff --git a/backend/vendor/github.com/gin-gonic/gin/recovery.go b/backend/vendor/github.com/gin-gonic/gin/recovery.go new file mode 100644 index 00000000..bc946c03 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/recovery.go @@ -0,0 +1,151 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httputil" + "os" + "runtime" + "strings" + "time" +) + +var ( + dunno = []byte("???") + centerDot = []byte("·") + dot = []byte(".") + slash = []byte("/") +) + +// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one. +func Recovery() HandlerFunc { + return RecoveryWithWriter(DefaultErrorWriter) +} + +// RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one. +func RecoveryWithWriter(out io.Writer) HandlerFunc { + var logger *log.Logger + if out != nil { + logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags) + } + return func(c *Context) { + defer func() { + if err := recover(); err != nil { + // Check for a broken connection, as it is not really a + // condition that warrants a panic stack trace. + var brokenPipe bool + if ne, ok := err.(*net.OpError); ok { + if se, ok := ne.Err.(*os.SyscallError); ok { + if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { + brokenPipe = true + } + } + } + if logger != nil { + stack := stack(3) + httpRequest, _ := httputil.DumpRequest(c.Request, false) + headers := strings.Split(string(httpRequest), "\r\n") + for idx, header := range headers { + current := strings.Split(header, ":") + if current[0] == "Authorization" { + headers[idx] = current[0] + ": *" + } + } + if brokenPipe { + logger.Printf("%s\n%s%s", err, string(httpRequest), reset) + } else if IsDebugging() { + logger.Printf("[Recovery] %s panic recovered:\n%s\n%s\n%s%s", + timeFormat(time.Now()), strings.Join(headers, "\r\n"), err, stack, reset) + } else { + logger.Printf("[Recovery] %s panic recovered:\n%s\n%s%s", + timeFormat(time.Now()), err, stack, reset) + } + } + + // If the connection is dead, we can't write a status to it. + if brokenPipe { + c.Error(err.(error)) // nolint: errcheck + c.Abort() + } else { + c.AbortWithStatus(http.StatusInternalServerError) + } + } + }() + c.Next() + } +} + +// stack returns a nicely formatted stack frame, skipping skip frames. +func stack(skip int) []byte { + buf := new(bytes.Buffer) // the returned data + // As we loop, we open files and read them. These variables record the currently + // loaded file. + var lines [][]byte + var lastFile string + for i := skip; ; i++ { // Skip the expected number of frames + pc, file, line, ok := runtime.Caller(i) + if !ok { + break + } + // Print this much at least. If we can't find the source, it won't show. + fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc) + if file != lastFile { + data, err := ioutil.ReadFile(file) + if err != nil { + continue + } + lines = bytes.Split(data, []byte{'\n'}) + lastFile = file + } + fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line)) + } + return buf.Bytes() +} + +// source returns a space-trimmed slice of the n'th line. +func source(lines [][]byte, n int) []byte { + n-- // in stack trace, lines are 1-indexed but our array is 0-indexed + if n < 0 || n >= len(lines) { + return dunno + } + return bytes.TrimSpace(lines[n]) +} + +// function returns, if possible, the name of the function containing the PC. +func function(pc uintptr) []byte { + fn := runtime.FuncForPC(pc) + if fn == nil { + return dunno + } + name := []byte(fn.Name()) + // The name includes the path name to the package, which is unnecessary + // since the file name is already included. Plus, it has center dots. + // That is, we see + // runtime/debug.*T·ptrmethod + // and want + // *T.ptrmethod + // Also the package path might contains dot (e.g. code.google.com/...), + // so first eliminate the path prefix + if lastSlash := bytes.LastIndex(name, slash); lastSlash >= 0 { + name = name[lastSlash+1:] + } + if period := bytes.Index(name, dot); period >= 0 { + name = name[period+1:] + } + name = bytes.Replace(name, centerDot, dot, -1) + return name +} + +func timeFormat(t time.Time) string { + var timeString = t.Format("2006/01/02 - 15:04:05") + return timeString +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/data.go b/backend/vendor/github.com/gin-gonic/gin/render/data.go new file mode 100644 index 00000000..6ba657ba --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/data.go @@ -0,0 +1,25 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import "net/http" + +// Data contains ContentType and bytes data. +type Data struct { + ContentType string + Data []byte +} + +// Render (Data) writes data with custom ContentType. +func (r Data) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + _, err = w.Write(r.Data) + return +} + +// WriteContentType (Data) writes custom ContentType. +func (r Data) WriteContentType(w http.ResponseWriter) { + writeContentType(w, []string{r.ContentType}) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/html.go b/backend/vendor/github.com/gin-gonic/gin/render/html.go new file mode 100644 index 00000000..6696ece9 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/html.go @@ -0,0 +1,92 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "html/template" + "net/http" +) + +// Delims represents a set of Left and Right delimiters for HTML template rendering. +type Delims struct { + // Left delimiter, defaults to {{. + Left string + // Right delimiter, defaults to }}. + Right string +} + +// HTMLRender interface is to be implemented by HTMLProduction and HTMLDebug. +type HTMLRender interface { + // Instance returns an HTML instance. + Instance(string, interface{}) Render +} + +// HTMLProduction contains template reference and its delims. +type HTMLProduction struct { + Template *template.Template + Delims Delims +} + +// HTMLDebug contains template delims and pattern and function with file list. +type HTMLDebug struct { + Files []string + Glob string + Delims Delims + FuncMap template.FuncMap +} + +// HTML contains template reference and its name with given interface object. +type HTML struct { + Template *template.Template + Name string + Data interface{} +} + +var htmlContentType = []string{"text/html; charset=utf-8"} + +// Instance (HTMLProduction) returns an HTML instance which it realizes Render interface. +func (r HTMLProduction) Instance(name string, data interface{}) Render { + return HTML{ + Template: r.Template, + Name: name, + Data: data, + } +} + +// Instance (HTMLDebug) returns an HTML instance which it realizes Render interface. +func (r HTMLDebug) Instance(name string, data interface{}) Render { + return HTML{ + Template: r.loadTemplate(), + Name: name, + Data: data, + } +} +func (r HTMLDebug) loadTemplate() *template.Template { + if r.FuncMap == nil { + r.FuncMap = template.FuncMap{} + } + if len(r.Files) > 0 { + return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseFiles(r.Files...)) + } + if r.Glob != "" { + return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseGlob(r.Glob)) + } + panic("the HTML debug render was created without files or glob pattern") +} + +// Render (HTML) executes template and writes its result with custom ContentType for response. +func (r HTML) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + + if r.Name == "" { + return r.Template.Execute(w, r.Data) + } + return r.Template.ExecuteTemplate(w, r.Name, r.Data) +} + +// WriteContentType (HTML) writes HTML ContentType. +func (r HTML) WriteContentType(w http.ResponseWriter) { + writeContentType(w, htmlContentType) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/json.go b/backend/vendor/github.com/gin-gonic/gin/render/json.go new file mode 100644 index 00000000..18f27fa9 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/json.go @@ -0,0 +1,194 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + + "github.com/gin-gonic/gin/internal/json" +) + +// JSON contains the given interface object. +type JSON struct { + Data interface{} +} + +// IndentedJSON contains the given interface object. +type IndentedJSON struct { + Data interface{} +} + +// SecureJSON contains the given interface object and its prefix. +type SecureJSON struct { + Prefix string + Data interface{} +} + +// JsonpJSON contains the given interface object its callback. +type JsonpJSON struct { + Callback string + Data interface{} +} + +// AsciiJSON contains the given interface object. +type AsciiJSON struct { + Data interface{} +} + +// SecureJSONPrefix is a string which represents SecureJSON prefix. +type SecureJSONPrefix string + +// PureJSON contains the given interface object. +type PureJSON struct { + Data interface{} +} + +var jsonContentType = []string{"application/json; charset=utf-8"} +var jsonpContentType = []string{"application/javascript; charset=utf-8"} +var jsonAsciiContentType = []string{"application/json"} + +// Render (JSON) writes data with custom ContentType. +func (r JSON) Render(w http.ResponseWriter) (err error) { + if err = WriteJSON(w, r.Data); err != nil { + panic(err) + } + return +} + +// WriteContentType (JSON) writes JSON ContentType. +func (r JSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} + +// WriteJSON marshals the given interface object and writes it with custom ContentType. +func WriteJSON(w http.ResponseWriter, obj interface{}) error { + writeContentType(w, jsonContentType) + jsonBytes, err := json.Marshal(obj) + if err != nil { + return err + } + _, err = w.Write(jsonBytes) + return err +} + +// Render (IndentedJSON) marshals the given interface object and writes it with custom ContentType. +func (r IndentedJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + jsonBytes, err := json.MarshalIndent(r.Data, "", " ") + if err != nil { + return err + } + _, err = w.Write(jsonBytes) + return err +} + +// WriteContentType (IndentedJSON) writes JSON ContentType. +func (r IndentedJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} + +// Render (SecureJSON) marshals the given interface object and writes it with custom ContentType. +func (r SecureJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + jsonBytes, err := json.Marshal(r.Data) + if err != nil { + return err + } + // if the jsonBytes is array values + if bytes.HasPrefix(jsonBytes, []byte("[")) && bytes.HasSuffix(jsonBytes, []byte("]")) { + _, err = w.Write([]byte(r.Prefix)) + if err != nil { + return err + } + } + _, err = w.Write(jsonBytes) + return err +} + +// WriteContentType (SecureJSON) writes JSON ContentType. +func (r SecureJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} + +// Render (JsonpJSON) marshals the given interface object and writes it and its callback with custom ContentType. +func (r JsonpJSON) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + ret, err := json.Marshal(r.Data) + if err != nil { + return err + } + + if r.Callback == "" { + _, err = w.Write(ret) + return err + } + + callback := template.JSEscapeString(r.Callback) + _, err = w.Write([]byte(callback)) + if err != nil { + return err + } + _, err = w.Write([]byte("(")) + if err != nil { + return err + } + _, err = w.Write(ret) + if err != nil { + return err + } + _, err = w.Write([]byte(")")) + if err != nil { + return err + } + + return nil +} + +// WriteContentType (JsonpJSON) writes Javascript ContentType. +func (r JsonpJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonpContentType) +} + +// Render (AsciiJSON) marshals the given interface object and writes it with custom ContentType. +func (r AsciiJSON) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + ret, err := json.Marshal(r.Data) + if err != nil { + return err + } + + var buffer bytes.Buffer + for _, r := range string(ret) { + cvt := string(r) + if r >= 128 { + cvt = fmt.Sprintf("\\u%04x", int64(r)) + } + buffer.WriteString(cvt) + } + + _, err = w.Write(buffer.Bytes()) + return err +} + +// WriteContentType (AsciiJSON) writes JSON ContentType. +func (r AsciiJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonAsciiContentType) +} + +// Render (PureJSON) writes custom ContentType and encodes the given interface object. +func (r PureJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + encoder := json.NewEncoder(w) + encoder.SetEscapeHTML(false) + return encoder.Encode(r.Data) +} + +// WriteContentType (PureJSON) writes custom ContentType. +func (r PureJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/msgpack.go b/backend/vendor/github.com/gin-gonic/gin/render/msgpack.go new file mode 100644 index 00000000..dc681fcf --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/msgpack.go @@ -0,0 +1,35 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "net/http" + + "github.com/ugorji/go/codec" +) + +// MsgPack contains the given interface object. +type MsgPack struct { + Data interface{} +} + +var msgpackContentType = []string{"application/msgpack; charset=utf-8"} + +// WriteContentType (MsgPack) writes MsgPack ContentType. +func (r MsgPack) WriteContentType(w http.ResponseWriter) { + writeContentType(w, msgpackContentType) +} + +// Render (MsgPack) encodes the given interface object and writes data with custom ContentType. +func (r MsgPack) Render(w http.ResponseWriter) error { + return WriteMsgPack(w, r.Data) +} + +// WriteMsgPack writes MsgPack ContentType and encodes the given interface object. +func WriteMsgPack(w http.ResponseWriter, obj interface{}) error { + writeContentType(w, msgpackContentType) + var mh codec.MsgpackHandle + return codec.NewEncoder(w, &mh).Encode(obj) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/protobuf.go b/backend/vendor/github.com/gin-gonic/gin/render/protobuf.go new file mode 100644 index 00000000..15aca995 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/protobuf.go @@ -0,0 +1,36 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "net/http" + + "github.com/golang/protobuf/proto" +) + +// ProtoBuf contains the given interface object. +type ProtoBuf struct { + Data interface{} +} + +var protobufContentType = []string{"application/x-protobuf"} + +// Render (ProtoBuf) marshals the given interface object and writes data with custom ContentType. +func (r ProtoBuf) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + + bytes, err := proto.Marshal(r.Data.(proto.Message)) + if err != nil { + return err + } + + _, err = w.Write(bytes) + return err +} + +// WriteContentType (ProtoBuf) writes ProtoBuf ContentType. +func (r ProtoBuf) WriteContentType(w http.ResponseWriter) { + writeContentType(w, protobufContentType) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/reader.go b/backend/vendor/github.com/gin-gonic/gin/render/reader.go new file mode 100644 index 00000000..312af741 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/reader.go @@ -0,0 +1,43 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "io" + "net/http" + "strconv" +) + +// Reader contains the IO reader and its length, and custom ContentType and other headers. +type Reader struct { + ContentType string + ContentLength int64 + Reader io.Reader + Headers map[string]string +} + +// Render (Reader) writes data with custom ContentType and headers. +func (r Reader) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + r.Headers["Content-Length"] = strconv.FormatInt(r.ContentLength, 10) + r.writeHeaders(w, r.Headers) + _, err = io.Copy(w, r.Reader) + return +} + +// WriteContentType (Reader) writes custom ContentType. +func (r Reader) WriteContentType(w http.ResponseWriter) { + writeContentType(w, []string{r.ContentType}) +} + +// writeHeaders writes custom Header. +func (r Reader) writeHeaders(w http.ResponseWriter, headers map[string]string) { + header := w.Header() + for k, v := range headers { + if header.Get(k) == "" { + header.Set(k, v) + } + } +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/redirect.go b/backend/vendor/github.com/gin-gonic/gin/render/redirect.go new file mode 100644 index 00000000..c006691c --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/redirect.go @@ -0,0 +1,29 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "fmt" + "net/http" +) + +// Redirect contains the http request reference and redirects status code and location. +type Redirect struct { + Code int + Request *http.Request + Location string +} + +// Render (Redirect) redirects the http request to new location and writes redirect response. +func (r Redirect) Render(w http.ResponseWriter) error { + if (r.Code < http.StatusMultipleChoices || r.Code > http.StatusPermanentRedirect) && r.Code != http.StatusCreated { + panic(fmt.Sprintf("Cannot redirect with status code %d", r.Code)) + } + http.Redirect(w, r.Request, r.Location, r.Code) + return nil +} + +// WriteContentType (Redirect) don't write any ContentType. +func (r Redirect) WriteContentType(http.ResponseWriter) {} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/render.go b/backend/vendor/github.com/gin-gonic/gin/render/render.go new file mode 100644 index 00000000..abfc79fc --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/render.go @@ -0,0 +1,41 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import "net/http" + +// Render interface is to be implemented by JSON, XML, HTML, YAML and so on. +type Render interface { + // Render writes data with custom ContentType. + Render(http.ResponseWriter) error + // WriteContentType writes custom ContentType. + WriteContentType(w http.ResponseWriter) +} + +var ( + _ Render = JSON{} + _ Render = IndentedJSON{} + _ Render = SecureJSON{} + _ Render = JsonpJSON{} + _ Render = XML{} + _ Render = String{} + _ Render = Redirect{} + _ Render = Data{} + _ Render = HTML{} + _ HTMLRender = HTMLDebug{} + _ HTMLRender = HTMLProduction{} + _ Render = YAML{} + _ Render = MsgPack{} + _ Render = Reader{} + _ Render = AsciiJSON{} + _ Render = ProtoBuf{} +) + +func writeContentType(w http.ResponseWriter, value []string) { + header := w.Header() + if val := header["Content-Type"]; len(val) == 0 { + header["Content-Type"] = value + } +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/text.go b/backend/vendor/github.com/gin-gonic/gin/render/text.go new file mode 100644 index 00000000..4e52d4c5 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/text.go @@ -0,0 +1,40 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "fmt" + "io" + "net/http" +) + +// String contains the given interface object slice and its format. +type String struct { + Format string + Data []interface{} +} + +var plainContentType = []string{"text/plain; charset=utf-8"} + +// Render (String) writes data with custom ContentType. +func (r String) Render(w http.ResponseWriter) error { + return WriteString(w, r.Format, r.Data) +} + +// WriteContentType (String) writes Plain ContentType. +func (r String) WriteContentType(w http.ResponseWriter) { + writeContentType(w, plainContentType) +} + +// WriteString writes data according to its format and write custom ContentType. +func WriteString(w http.ResponseWriter, format string, data []interface{}) (err error) { + writeContentType(w, plainContentType) + if len(data) > 0 { + _, err = fmt.Fprintf(w, format, data...) + return + } + _, err = io.WriteString(w, format) + return +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/xml.go b/backend/vendor/github.com/gin-gonic/gin/render/xml.go new file mode 100644 index 00000000..cc5390a2 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/xml.go @@ -0,0 +1,28 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "encoding/xml" + "net/http" +) + +// XML contains the given interface object. +type XML struct { + Data interface{} +} + +var xmlContentType = []string{"application/xml; charset=utf-8"} + +// Render (XML) encodes the given interface object and writes data with custom ContentType. +func (r XML) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + return xml.NewEncoder(w).Encode(r.Data) +} + +// WriteContentType (XML) writes XML ContentType for response. +func (r XML) WriteContentType(w http.ResponseWriter) { + writeContentType(w, xmlContentType) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/render/yaml.go b/backend/vendor/github.com/gin-gonic/gin/render/yaml.go new file mode 100644 index 00000000..0df78360 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/render/yaml.go @@ -0,0 +1,36 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "net/http" + + "gopkg.in/yaml.v2" +) + +// YAML contains the given interface object. +type YAML struct { + Data interface{} +} + +var yamlContentType = []string{"application/x-yaml; charset=utf-8"} + +// Render (YAML) marshals the given interface object and writes data with custom ContentType. +func (r YAML) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + + bytes, err := yaml.Marshal(r.Data) + if err != nil { + return err + } + + _, err = w.Write(bytes) + return err +} + +// WriteContentType (YAML) writes YAML ContentType for response. +func (r YAML) WriteContentType(w http.ResponseWriter) { + writeContentType(w, yamlContentType) +} diff --git a/backend/vendor/github.com/gin-gonic/gin/response_writer.go b/backend/vendor/github.com/gin-gonic/gin/response_writer.go new file mode 100644 index 00000000..26826689 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/response_writer.go @@ -0,0 +1,126 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + noWritten = -1 + defaultStatus = http.StatusOK +) + +// ResponseWriter ... +type ResponseWriter interface { + http.ResponseWriter + http.Hijacker + http.Flusher + http.CloseNotifier + + // Returns the HTTP response status code of the current request. + Status() int + + // Returns the number of bytes already written into the response http body. + // See Written() + Size() int + + // Writes the string into the response body. + WriteString(string) (int, error) + + // Returns true if the response body was already written. + Written() bool + + // Forces to write the http header (status code + headers). + WriteHeaderNow() + + // get the http.Pusher for server push + Pusher() http.Pusher +} + +type responseWriter struct { + http.ResponseWriter + size int + status int +} + +var _ ResponseWriter = &responseWriter{} + +func (w *responseWriter) reset(writer http.ResponseWriter) { + w.ResponseWriter = writer + w.size = noWritten + w.status = defaultStatus +} + +func (w *responseWriter) WriteHeader(code int) { + if code > 0 && w.status != code { + if w.Written() { + debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", w.status, code) + } + w.status = code + } +} + +func (w *responseWriter) WriteHeaderNow() { + if !w.Written() { + w.size = 0 + w.ResponseWriter.WriteHeader(w.status) + } +} + +func (w *responseWriter) Write(data []byte) (n int, err error) { + w.WriteHeaderNow() + n, err = w.ResponseWriter.Write(data) + w.size += n + return +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + w.WriteHeaderNow() + n, err = io.WriteString(w.ResponseWriter, s) + w.size += n + return +} + +func (w *responseWriter) Status() int { + return w.status +} + +func (w *responseWriter) Size() int { + return w.size +} + +func (w *responseWriter) Written() bool { + return w.size != noWritten +} + +// Hijack implements the http.Hijacker interface. +func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if w.size < 0 { + w.size = 0 + } + return w.ResponseWriter.(http.Hijacker).Hijack() +} + +// CloseNotify implements the http.CloseNotify interface. +func (w *responseWriter) CloseNotify() <-chan bool { + return w.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +// Flush implements the http.Flush interface. +func (w *responseWriter) Flush() { + w.WriteHeaderNow() + w.ResponseWriter.(http.Flusher).Flush() +} + +func (w *responseWriter) Pusher() (pusher http.Pusher) { + if pusher, ok := w.ResponseWriter.(http.Pusher); ok { + return pusher + } + return nil +} diff --git a/backend/vendor/github.com/gin-gonic/gin/routergroup.go b/backend/vendor/github.com/gin-gonic/gin/routergroup.go new file mode 100644 index 00000000..a1e6c928 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/routergroup.go @@ -0,0 +1,228 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "net/http" + "path" + "regexp" + "strings" +) + +// IRouter defines all router handle interface includes single and group router. +type IRouter interface { + IRoutes + Group(string, ...HandlerFunc) *RouterGroup +} + +// IRoutes defines all router handle interface. +type IRoutes interface { + Use(...HandlerFunc) IRoutes + + Handle(string, string, ...HandlerFunc) IRoutes + Any(string, ...HandlerFunc) IRoutes + GET(string, ...HandlerFunc) IRoutes + POST(string, ...HandlerFunc) IRoutes + DELETE(string, ...HandlerFunc) IRoutes + PATCH(string, ...HandlerFunc) IRoutes + PUT(string, ...HandlerFunc) IRoutes + OPTIONS(string, ...HandlerFunc) IRoutes + HEAD(string, ...HandlerFunc) IRoutes + + StaticFile(string, string) IRoutes + Static(string, string) IRoutes + StaticFS(string, http.FileSystem) IRoutes +} + +// RouterGroup is used internally to configure router, a RouterGroup is associated with +// a prefix and an array of handlers (middleware). +type RouterGroup struct { + Handlers HandlersChain + basePath string + engine *Engine + root bool +} + +var _ IRouter = &RouterGroup{} + +// Use adds middleware to the group, see example code in GitHub. +func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes { + group.Handlers = append(group.Handlers, middleware...) + return group.returnObj() +} + +// Group creates a new router group. You should add all the routes that have common middlewares or the same path prefix. +// For example, all the routes that use a common middleware for authorization could be grouped. +func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup { + return &RouterGroup{ + Handlers: group.combineHandlers(handlers), + basePath: group.calculateAbsolutePath(relativePath), + engine: group.engine, + } +} + +// BasePath returns the base path of router group. +// For example, if v := router.Group("/rest/n/v1/api"), v.BasePath() is "/rest/n/v1/api". +func (group *RouterGroup) BasePath() string { + return group.basePath +} + +func (group *RouterGroup) handle(httpMethod, relativePath string, handlers HandlersChain) IRoutes { + absolutePath := group.calculateAbsolutePath(relativePath) + handlers = group.combineHandlers(handlers) + group.engine.addRoute(httpMethod, absolutePath, handlers) + return group.returnObj() +} + +// Handle registers a new request handle and middleware with the given path and method. +// The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes. +// See the example code in GitHub. +// +// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut +// functions can be used. +// +// This function is intended for bulk loading and to allow the usage of less +// frequently used, non-standardized or custom methods (e.g. for internal +// communication with a proxy). +func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes { + if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil { + panic("http method " + httpMethod + " is not valid") + } + return group.handle(httpMethod, relativePath, handlers) +} + +// POST is a shortcut for router.Handle("POST", path, handle). +func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("POST", relativePath, handlers) +} + +// GET is a shortcut for router.Handle("GET", path, handle). +func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("GET", relativePath, handlers) +} + +// DELETE is a shortcut for router.Handle("DELETE", path, handle). +func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("DELETE", relativePath, handlers) +} + +// PATCH is a shortcut for router.Handle("PATCH", path, handle). +func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("PATCH", relativePath, handlers) +} + +// PUT is a shortcut for router.Handle("PUT", path, handle). +func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("PUT", relativePath, handlers) +} + +// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle). +func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("OPTIONS", relativePath, handlers) +} + +// HEAD is a shortcut for router.Handle("HEAD", path, handle). +func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle("HEAD", relativePath, handlers) +} + +// Any registers a route that matches all the HTTP methods. +// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE. +func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes { + group.handle("GET", relativePath, handlers) + group.handle("POST", relativePath, handlers) + group.handle("PUT", relativePath, handlers) + group.handle("PATCH", relativePath, handlers) + group.handle("HEAD", relativePath, handlers) + group.handle("OPTIONS", relativePath, handlers) + group.handle("DELETE", relativePath, handlers) + group.handle("CONNECT", relativePath, handlers) + group.handle("TRACE", relativePath, handlers) + return group.returnObj() +} + +// StaticFile registers a single route in order to serve a single file of the local filesystem. +// router.StaticFile("favicon.ico", "./resources/favicon.ico") +func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes { + if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { + panic("URL parameters can not be used when serving a static file") + } + handler := func(c *Context) { + c.File(filepath) + } + group.GET(relativePath, handler) + group.HEAD(relativePath, handler) + return group.returnObj() +} + +// Static serves files from the given file system root. +// Internally a http.FileServer is used, therefore http.NotFound is used instead +// of the Router's NotFound handler. +// To use the operating system's file system implementation, +// use : +// router.Static("/static", "/var/www") +func (group *RouterGroup) Static(relativePath, root string) IRoutes { + return group.StaticFS(relativePath, Dir(root, false)) +} + +// StaticFS works just like `Static()` but a custom `http.FileSystem` can be used instead. +// Gin by default user: gin.Dir() +func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRoutes { + if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { + panic("URL parameters can not be used when serving a static folder") + } + handler := group.createStaticHandler(relativePath, fs) + urlPattern := path.Join(relativePath, "/*filepath") + + // Register GET and HEAD handlers + group.GET(urlPattern, handler) + group.HEAD(urlPattern, handler) + return group.returnObj() +} + +func (group *RouterGroup) createStaticHandler(relativePath string, fs http.FileSystem) HandlerFunc { + absolutePath := group.calculateAbsolutePath(relativePath) + fileServer := http.StripPrefix(absolutePath, http.FileServer(fs)) + + return func(c *Context) { + if _, nolisting := fs.(*onlyfilesFS); nolisting { + c.Writer.WriteHeader(http.StatusNotFound) + } + + file := c.Param("filepath") + // Check if file exists and/or if we have permission to access it + if _, err := fs.Open(file); err != nil { + c.Writer.WriteHeader(http.StatusNotFound) + c.handlers = group.engine.noRoute + // Reset index + c.index = -1 + return + } + + fileServer.ServeHTTP(c.Writer, c.Request) + } +} + +func (group *RouterGroup) combineHandlers(handlers HandlersChain) HandlersChain { + finalSize := len(group.Handlers) + len(handlers) + if finalSize >= int(abortIndex) { + panic("too many handlers") + } + mergedHandlers := make(HandlersChain, finalSize) + copy(mergedHandlers, group.Handlers) + copy(mergedHandlers[len(group.Handlers):], handlers) + return mergedHandlers +} + +func (group *RouterGroup) calculateAbsolutePath(relativePath string) string { + return joinPaths(group.basePath, relativePath) +} + +func (group *RouterGroup) returnObj() IRoutes { + if group.root { + return group.engine + } + return group +} diff --git a/backend/vendor/github.com/gin-gonic/gin/test_helpers.go b/backend/vendor/github.com/gin-gonic/gin/test_helpers.go new file mode 100644 index 00000000..3a7a5ddf --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/test_helpers.go @@ -0,0 +1,16 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import "net/http" + +// CreateTestContext returns a fresh engine and context for testing purposes +func CreateTestContext(w http.ResponseWriter) (c *Context, r *Engine) { + r = New() + c = r.allocateContext() + c.reset() + c.writermem.reset(w) + return +} diff --git a/backend/vendor/github.com/gin-gonic/gin/tree.go b/backend/vendor/github.com/gin-gonic/gin/tree.go new file mode 100644 index 00000000..ada62ceb --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/tree.go @@ -0,0 +1,627 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE + +package gin + +import ( + "net/url" + "strings" + "unicode" +) + +// Param is a single URL parameter, consisting of a key and a value. +type Param struct { + Key string + Value string +} + +// Params is a Param-slice, as returned by the router. +// The slice is ordered, the first URL parameter is also the first slice value. +// It is therefore safe to read values by the index. +type Params []Param + +// Get returns the value of the first Param which key matches the given name. +// If no matching Param is found, an empty string is returned. +func (ps Params) Get(name string) (string, bool) { + for _, entry := range ps { + if entry.Key == name { + return entry.Value, true + } + } + return "", false +} + +// ByName returns the value of the first Param which key matches the given name. +// If no matching Param is found, an empty string is returned. +func (ps Params) ByName(name string) (va string) { + va, _ = ps.Get(name) + return +} + +type methodTree struct { + method string + root *node +} + +type methodTrees []methodTree + +func (trees methodTrees) get(method string) *node { + for _, tree := range trees { + if tree.method == method { + return tree.root + } + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +func countParams(path string) uint8 { + var n uint + for i := 0; i < len(path); i++ { + if path[i] != ':' && path[i] != '*' { + continue + } + n++ + } + if n >= 255 { + return 255 + } + return uint8(n) +} + +type nodeType uint8 + +const ( + static nodeType = iota // default + root + param + catchAll +) + +type node struct { + path string + indices string + children []*node + handlers HandlersChain + priority uint32 + nType nodeType + maxParams uint8 + wildChild bool +} + +// increments priority of the given child and reorders if necessary. +func (n *node) incrementChildPrio(pos int) int { + n.children[pos].priority++ + prio := n.children[pos].priority + + // adjust position (move to front) + newPos := pos + for newPos > 0 && n.children[newPos-1].priority < prio { + // swap node positions + n.children[newPos-1], n.children[newPos] = n.children[newPos], n.children[newPos-1] + + newPos-- + } + + // build new index char string + if newPos != pos { + n.indices = n.indices[:newPos] + // unchanged prefix, might be empty + n.indices[pos:pos+1] + // the index char we move + n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' + } + + return newPos +} + +// addRoute adds a node with the given handle to the path. +// Not concurrency-safe! +func (n *node) addRoute(path string, handlers HandlersChain) { + fullPath := path + n.priority++ + numParams := countParams(path) + + // non-empty tree + if len(n.path) > 0 || len(n.children) > 0 { + walk: + for { + // Update maxParams of the current node + if numParams > n.maxParams { + n.maxParams = numParams + } + + // Find the longest common prefix. + // This also implies that the common prefix contains no ':' or '*' + // since the existing key can't contain those chars. + i := 0 + max := min(len(path), len(n.path)) + for i < max && path[i] == n.path[i] { + i++ + } + + // Split edge + if i < len(n.path) { + child := node{ + path: n.path[i:], + wildChild: n.wildChild, + indices: n.indices, + children: n.children, + handlers: n.handlers, + priority: n.priority - 1, + } + + // Update maxParams (max of all children) + for i := range child.children { + if child.children[i].maxParams > child.maxParams { + child.maxParams = child.children[i].maxParams + } + } + + n.children = []*node{&child} + // []byte for proper unicode char conversion, see #65 + n.indices = string([]byte{n.path[i]}) + n.path = path[:i] + n.handlers = nil + n.wildChild = false + } + + // Make new node a child of this node + if i < len(path) { + path = path[i:] + + if n.wildChild { + n = n.children[0] + n.priority++ + + // Update maxParams of the child node + if numParams > n.maxParams { + n.maxParams = numParams + } + numParams-- + + // Check if the wildcard matches + if len(path) >= len(n.path) && n.path == path[:len(n.path)] { + // check for longer wildcard, e.g. :name and :names + if len(n.path) >= len(path) || path[len(n.path)] == '/' { + continue walk + } + } + + pathSeg := path + if n.nType != catchAll { + pathSeg = strings.SplitN(path, "/", 2)[0] + } + prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path + panic("'" + pathSeg + + "' in new path '" + fullPath + + "' conflicts with existing wildcard '" + n.path + + "' in existing prefix '" + prefix + + "'") + } + + c := path[0] + + // slash after param + if n.nType == param && c == '/' && len(n.children) == 1 { + n = n.children[0] + n.priority++ + continue walk + } + + // Check if a child with the next path byte exists + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + i = n.incrementChildPrio(i) + n = n.children[i] + continue walk + } + } + + // Otherwise insert it + if c != ':' && c != '*' { + // []byte for proper unicode char conversion, see #65 + n.indices += string([]byte{c}) + child := &node{ + maxParams: numParams, + } + n.children = append(n.children, child) + n.incrementChildPrio(len(n.indices) - 1) + n = child + } + n.insertChild(numParams, path, fullPath, handlers) + return + + } else if i == len(path) { // Make node a (in-path) leaf + if n.handlers != nil { + panic("handlers are already registered for path '" + fullPath + "'") + } + n.handlers = handlers + } + return + } + } else { // Empty tree + n.insertChild(numParams, path, fullPath, handlers) + n.nType = root + } +} + +func (n *node) insertChild(numParams uint8, path string, fullPath string, handlers HandlersChain) { + var offset int // already handled bytes of the path + + // find prefix until first wildcard (beginning with ':' or '*') + for i, max := 0, len(path); numParams > 0; i++ { + c := path[i] + if c != ':' && c != '*' { + continue + } + + // find wildcard end (either '/' or path end) + end := i + 1 + for end < max && path[end] != '/' { + switch path[end] { + // the wildcard name must not contain ':' and '*' + case ':', '*': + panic("only one wildcard per path segment is allowed, has: '" + + path[i:] + "' in path '" + fullPath + "'") + default: + end++ + } + } + + // check if this Node existing children which would be + // unreachable if we insert the wildcard here + if len(n.children) > 0 { + panic("wildcard route '" + path[i:end] + + "' conflicts with existing children in path '" + fullPath + "'") + } + + // check if the wildcard has a name + if end-i < 2 { + panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") + } + + if c == ':' { // param + // split path at the beginning of the wildcard + if i > 0 { + n.path = path[offset:i] + offset = i + } + + child := &node{ + nType: param, + maxParams: numParams, + } + n.children = []*node{child} + n.wildChild = true + n = child + n.priority++ + numParams-- + + // if the path doesn't end with the wildcard, then there + // will be another non-wildcard subpath starting with '/' + if end < max { + n.path = path[offset:end] + offset = end + + child := &node{ + maxParams: numParams, + priority: 1, + } + n.children = []*node{child} + n = child + } + + } else { // catchAll + if end != max || numParams > 1 { + panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") + } + + if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { + panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") + } + + // currently fixed width 1 for '/' + i-- + if path[i] != '/' { + panic("no / before catch-all in path '" + fullPath + "'") + } + + n.path = path[offset:i] + + // first node: catchAll node with empty path + child := &node{ + wildChild: true, + nType: catchAll, + maxParams: 1, + } + n.children = []*node{child} + n.indices = string(path[i]) + n = child + n.priority++ + + // second node: node holding the variable + child = &node{ + path: path[i:], + nType: catchAll, + maxParams: 1, + handlers: handlers, + priority: 1, + } + n.children = []*node{child} + + return + } + } + + // insert remaining path part and handle to the leaf + n.path = path[offset:] + n.handlers = handlers +} + +// getValue returns the handle registered with the given path (key). The values of +// wildcards are saved to a map. +// If no handle can be found, a TSR (trailing slash redirect) recommendation is +// made if a handle exists with an extra (without the) trailing slash for the +// given path. +func (n *node) getValue(path string, po Params, unescape bool) (handlers HandlersChain, p Params, tsr bool) { + p = po +walk: // Outer loop for walking the tree + for { + if len(path) > len(n.path) { + if path[:len(n.path)] == n.path { + path = path[len(n.path):] + // If this node does not have a wildcard (param or catchAll) + // child, we can just look up the next child node and continue + // to walk down the tree + if !n.wildChild { + c := path[0] + for i := 0; i < len(n.indices); i++ { + if c == n.indices[i] { + n = n.children[i] + continue walk + } + } + + // Nothing found. + // We can recommend to redirect to the same URL without a + // trailing slash if a leaf exists for that path. + tsr = path == "/" && n.handlers != nil + return + } + + // handle wildcard child + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + end := 0 + for end < len(path) && path[end] != '/' { + end++ + } + + // save param value + if cap(p) < int(n.maxParams) { + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[1:] + val := path[:end] + if unescape { + var err error + if p[i].Value, err = url.QueryUnescape(val); err != nil { + p[i].Value = val // fallback, in case of error + } + } else { + p[i].Value = val + } + + // we need to go deeper! + if end < len(path) { + if len(n.children) > 0 { + path = path[end:] + n = n.children[0] + continue walk + } + + // ... but we can't + tsr = len(path) == end+1 + return + } + + if handlers = n.handlers; handlers != nil { + return + } + if len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists for TSR recommendation + n = n.children[0] + tsr = n.path == "/" && n.handlers != nil + } + + return + + case catchAll: + // save param value + if cap(p) < int(n.maxParams) { + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[2:] + if unescape { + var err error + if p[i].Value, err = url.QueryUnescape(path); err != nil { + p[i].Value = path // fallback, in case of error + } + } else { + p[i].Value = path + } + + handlers = n.handlers + return + + default: + panic("invalid node type") + } + } + } else if path == n.path { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if handlers = n.handlers; handlers != nil { + return + } + + if path == "/" && n.wildChild && n.nType != root { + tsr = true + return + } + + // No handle found. Check if a handle for this path + a + // trailing slash exists for trailing slash recommendation + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + tsr = (len(n.path) == 1 && n.handlers != nil) || + (n.nType == catchAll && n.children[0].handlers != nil) + return + } + } + + return + } + + // Nothing found. We can recommend to redirect to the same URL with an + // extra trailing slash if a leaf exists for that path + tsr = (path == "/") || + (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && + path == n.path[:len(n.path)-1] && n.handlers != nil) + return + } +} + +// findCaseInsensitivePath makes a case-insensitive lookup of the given path and tries to find a handler. +// It can optionally also fix trailing slashes. +// It returns the case-corrected path and a bool indicating whether the lookup +// was successful. +func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { + ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory + + // Outer loop for walking the tree + for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) { + path = path[len(n.path):] + ciPath = append(ciPath, n.path...) + + if len(path) > 0 { + // If this node does not have a wildcard (param or catchAll) child, + // we can just look up the next child node and continue to walk down + // the tree + if !n.wildChild { + r := unicode.ToLower(rune(path[0])) + for i, index := range n.indices { + // must use recursive approach since both index and + // ToLower(index) could exist. We must check both. + if r == unicode.ToLower(index) { + out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) + if found { + return append(ciPath, out...), true + } + } + } + + // Nothing found. We can recommend to redirect to the same URL + // without a trailing slash if a leaf exists for that path + found = fixTrailingSlash && path == "/" && n.handlers != nil + return + } + + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + k := 0 + for k < len(path) && path[k] != '/' { + k++ + } + + // add param value to case insensitive path + ciPath = append(ciPath, path[:k]...) + + // we need to go deeper! + if k < len(path) { + if len(n.children) > 0 { + path = path[k:] + n = n.children[0] + continue + } + + // ... but we can't + if fixTrailingSlash && len(path) == k+1 { + return ciPath, true + } + return + } + + if n.handlers != nil { + return ciPath, true + } else if fixTrailingSlash && len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists + n = n.children[0] + if n.path == "/" && n.handlers != nil { + return append(ciPath, '/'), true + } + } + return + + case catchAll: + return append(ciPath, path...), true + + default: + panic("invalid node type") + } + } else { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if n.handlers != nil { + return ciPath, true + } + + // No handle found. + // Try to fix the path by adding a trailing slash + if fixTrailingSlash { + for i := 0; i < len(n.indices); i++ { + if n.indices[i] == '/' { + n = n.children[i] + if (len(n.path) == 1 && n.handlers != nil) || + (n.nType == catchAll && n.children[0].handlers != nil) { + return append(ciPath, '/'), true + } + return + } + } + } + return + } + } + + // Nothing found. + // Try to fix the path by adding / removing a trailing slash + if fixTrailingSlash { + if path == "/" { + return ciPath, true + } + if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && + strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) && + n.handlers != nil { + return append(ciPath, n.path...), true + } + } + return +} diff --git a/backend/vendor/github.com/gin-gonic/gin/utils.go b/backend/vendor/github.com/gin-gonic/gin/utils.go new file mode 100644 index 00000000..f4532d56 --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/utils.go @@ -0,0 +1,151 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "encoding/xml" + "net/http" + "os" + "path" + "reflect" + "runtime" + "strings" +) + +// BindKey indicates a default bind key. +const BindKey = "_gin-gonic/gin/bindkey" + +// Bind is a helper function for given interface object and returns a Gin middleware. +func Bind(val interface{}) HandlerFunc { + value := reflect.ValueOf(val) + if value.Kind() == reflect.Ptr { + panic(`Bind struct can not be a pointer. Example: + Use: gin.Bind(Struct{}) instead of gin.Bind(&Struct{}) +`) + } + typ := value.Type() + + return func(c *Context) { + obj := reflect.New(typ).Interface() + if c.Bind(obj) == nil { + c.Set(BindKey, obj) + } + } +} + +// WrapF is a helper function for wrapping http.HandlerFunc and returns a Gin middleware. +func WrapF(f http.HandlerFunc) HandlerFunc { + return func(c *Context) { + f(c.Writer, c.Request) + } +} + +// WrapH is a helper function for wrapping http.Handler and returns a Gin middleware. +func WrapH(h http.Handler) HandlerFunc { + return func(c *Context) { + h.ServeHTTP(c.Writer, c.Request) + } +} + +// H is a shortcut for map[string]interface{} +type H map[string]interface{} + +// MarshalXML allows type H to be used with xml.Marshal. +func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name = xml.Name{ + Space: "", + Local: "map", + } + if err := e.EncodeToken(start); err != nil { + return err + } + for key, value := range h { + elem := xml.StartElement{ + Name: xml.Name{Space: "", Local: key}, + Attr: []xml.Attr{}, + } + if err := e.EncodeElement(value, elem); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +func assert1(guard bool, text string) { + if !guard { + panic(text) + } +} + +func filterFlags(content string) string { + for i, char := range content { + if char == ' ' || char == ';' { + return content[:i] + } + } + return content +} + +func chooseData(custom, wildcard interface{}) interface{} { + if custom == nil { + if wildcard == nil { + panic("negotiation config is invalid") + } + return wildcard + } + return custom +} + +func parseAccept(acceptHeader string) []string { + parts := strings.Split(acceptHeader, ",") + out := make([]string, 0, len(parts)) + for _, part := range parts { + if part = strings.TrimSpace(strings.Split(part, ";")[0]); part != "" { + out = append(out, part) + } + } + return out +} + +func lastChar(str string) uint8 { + if str == "" { + panic("The length of the string can't be 0") + } + return str[len(str)-1] +} + +func nameOfFunction(f interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() +} + +func joinPaths(absolutePath, relativePath string) string { + if relativePath == "" { + return absolutePath + } + + finalPath := path.Join(absolutePath, relativePath) + appendSlash := lastChar(relativePath) == '/' && lastChar(finalPath) != '/' + if appendSlash { + return finalPath + "/" + } + return finalPath +} + +func resolveAddress(addr []string) string { + switch len(addr) { + case 0: + if port := os.Getenv("PORT"); port != "" { + debugPrint("Environment variable PORT=\"%s\"", port) + return ":" + port + } + debugPrint("Environment variable PORT is undefined. Using port :8080 by default") + return ":8080" + case 1: + return addr[0] + default: + panic("too much parameters") + } +} diff --git a/backend/vendor/github.com/gin-gonic/gin/version.go b/backend/vendor/github.com/gin-gonic/gin/version.go new file mode 100644 index 00000000..07e7859f --- /dev/null +++ b/backend/vendor/github.com/gin-gonic/gin/version.go @@ -0,0 +1,8 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +// Version is the current gin framework's version. +const Version = "v1.4.0" diff --git a/backend/vendor/github.com/globalsign/mgo/.gitignore b/backend/vendor/github.com/globalsign/mgo/.gitignore new file mode 100644 index 00000000..9a3120f6 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/.gitignore @@ -0,0 +1,2 @@ +_harness +.vscode \ No newline at end of file diff --git a/backend/vendor/github.com/globalsign/mgo/.travis.yml b/backend/vendor/github.com/globalsign/mgo/.travis.yml new file mode 100644 index 00000000..f1f89e96 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/.travis.yml @@ -0,0 +1,49 @@ +language: go + +go_import_path: github.com/globalsign/mgo + +go: + - 1.9.x + - 1.10.x + +env: + global: + - BUCKET=https://s3.eu-west-2.amazonaws.com/globalsign-mgo + - FASTDL=https://fastdl.mongodb.org/linux + matrix: + - MONGODB=x86_64-ubuntu1404-3.0.15 + - MONGODB=x86_64-ubuntu1404-3.2.17 + - MONGODB=x86_64-ubuntu1404-3.4.10 + - MONGODB=x86_64-ubuntu1404-3.6.0 + +install: + + - wget $FASTDL/mongodb-linux-$MONGODB.tgz + - tar xzvf mongodb-linux-$MONGODB.tgz + - export PATH=$PWD/mongodb-linux-$MONGODB/bin:$PATH + + - wget $BUCKET/daemontools.tar.gz + - tar xzvf daemontools.tar.gz + - export PATH=$PWD/daemontools:$PATH + + - go get gopkg.in/check.v1 + - go get gopkg.in/yaml.v2 + - go get gopkg.in/tomb.v2 + - go get golang.org/x/lint/golint + +before_script: + - golint ./... | grep -v 'ID' | cat + - go vet github.com/globalsign/mgo/bson github.com/globalsign/mgo/txn github.com/globalsign/mgo + - export NOIPV6=1 + - make startdb + +script: + - (cd bson && go test -check.v) + - go test -check.v -fast + - (cd txn && go test -check.v) + - make stopdb + +git: + depth: 3 + +# vim:sw=4:ts=4:et diff --git a/backend/vendor/github.com/globalsign/mgo/CONTRIBUTING.md b/backend/vendor/github.com/globalsign/mgo/CONTRIBUTING.md new file mode 100644 index 00000000..79539955 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/CONTRIBUTING.md @@ -0,0 +1,14 @@ +Contributing +------------------------- + +We really appreciate contributions, but they must meet the following requirements: + +* A PR should have a brief description of the problem/feature being proposed +* Pull requests should target the `development` branch +* Existing tests should pass and any new code should be covered with it's own test(s) (use [travis-ci](https://travis-ci.org)) +* New functions should be [documented](https://blog.golang.org/godoc-documenting-go-code) clearly +* Code should pass `golint`, `go vet` and `go fmt` + +We merge PRs into `development`, which is then tested in a sharded, replicated environment in our datacenter for regressions. Once everyone is happy, we merge to master - this is to maintain a bit of quality control past the usual PR process. + +**Thanks** for helping! diff --git a/backend/vendor/github.com/globalsign/mgo/LICENSE b/backend/vendor/github.com/globalsign/mgo/LICENSE new file mode 100644 index 00000000..770c7672 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/LICENSE @@ -0,0 +1,25 @@ +mgo - MongoDB driver for Go + +Copyright (c) 2010-2013 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/vendor/github.com/globalsign/mgo/Makefile b/backend/vendor/github.com/globalsign/mgo/Makefile new file mode 100644 index 00000000..d1027d45 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/Makefile @@ -0,0 +1,5 @@ +startdb: + @harness/setup.sh start + +stopdb: + @harness/setup.sh stop diff --git a/backend/vendor/github.com/globalsign/mgo/README.md b/backend/vendor/github.com/globalsign/mgo/README.md new file mode 100644 index 00000000..76fd0554 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/README.md @@ -0,0 +1,105 @@ +[![Build Status](https://travis-ci.org/globalsign/mgo.svg?branch=master)](https://travis-ci.org/globalsign/mgo) [![GoDoc](https://godoc.org/github.com/globalsign/mgo?status.svg)](https://godoc.org/github.com/globalsign/mgo) + +The MongoDB driver for Go +------------------------- + +This fork has had a few improvements by ourselves as well as several PR's merged from the original mgo repo that are currently awaiting review. +Changes are mostly geared towards performance improvements and bug fixes, though a few new features have been added. + +Further PR's (with tests) are welcome, but please maintain backwards compatibility. + +Detailed documentation of the API is available at +[GoDoc](https://godoc.org/github.com/globalsign/mgo). + +A [sub-package](https://godoc.org/github.com/globalsign/mgo/bson) that implements the [BSON](http://bsonspec.org) specification is also included, and may be used independently of the driver. + +## Supported Versions + +`mgo` is known to work well on (and has integration tests against) MongoDB v3.0, 3.2, 3.4 and 3.6. + +MongoDB 4.0 is currently experimental - we would happily accept PRs to help improve support! + +## Changes +* Fixes attempting to authenticate before every query ([details](https://github.com/go-mgo/mgo/issues/254)) +* Removes bulk update / delete batch size limitations ([details](https://github.com/go-mgo/mgo/issues/288)) +* Adds native support for `time.Duration` marshalling ([details](https://github.com/go-mgo/mgo/pull/373)) +* Reduce memory footprint / garbage collection pressure by reusing buffers ([details](https://github.com/go-mgo/mgo/pull/229), [more](https://github.com/globalsign/mgo/pull/56)) +* Support majority read concerns ([details](https://github.com/globalsign/mgo/pull/2)) +* Improved connection handling ([details](https://github.com/globalsign/mgo/pull/5)) +* Hides SASL warnings ([details](https://github.com/globalsign/mgo/pull/7)) +* Support for partial indexes ([details](https://github.com/domodwyer/mgo/commit/5efe8eccb028238d93c222828cae4806aeae9f51)) +* Fixes timezone handling ([details](https://github.com/go-mgo/mgo/pull/464)) +* Integration tests run against MongoDB 3.2 & 3.4 releases ([details](https://github.com/globalsign/mgo/pull/4), [more](https://github.com/globalsign/mgo/pull/24), [more](https://github.com/globalsign/mgo/pull/35)) +* Improved multi-document transaction performance ([details](https://github.com/globalsign/mgo/pull/10), [more](https://github.com/globalsign/mgo/pull/11), [more](https://github.com/globalsign/mgo/pull/16)) +* Fixes cursor timeouts ([details](https://jira.mongodb.org/browse/SERVER-24899)) +* Support index hints and timeouts for count queries ([details](https://github.com/globalsign/mgo/pull/17)) +* Don't panic when handling indexed `int64` fields ([details](https://github.com/go-mgo/mgo/issues/475)) +* Supports dropping all indexes on a collection ([details](https://github.com/globalsign/mgo/pull/25)) +* Annotates log entries/profiler output with optional appName on 3.4+ ([details](https://github.com/globalsign/mgo/pull/28)) +* Support for read-only [views](https://docs.mongodb.com/manual/core/views/) in 3.4+ ([details](https://github.com/globalsign/mgo/pull/33)) +* Support for [collations](https://docs.mongodb.com/manual/reference/collation/) in 3.4+ ([details](https://github.com/globalsign/mgo/pull/37), [more](https://github.com/globalsign/mgo/pull/166)) +* Provide BSON constants for convenience/sanity ([details](https://github.com/globalsign/mgo/pull/41)) +* Consistently unmarshal time.Time values as UTC ([details](https://github.com/globalsign/mgo/pull/42)) +* Enforces best practise coding guidelines ([details](https://github.com/globalsign/mgo/pull/44)) +* GetBSON correctly handles structs with both fields and pointers ([details](https://github.com/globalsign/mgo/pull/40)) +* Improved bson.Raw unmarshalling performance ([details](https://github.com/globalsign/mgo/pull/49)) +* Minimise socket connection timeouts due to excessive locking ([details](https://github.com/globalsign/mgo/pull/52)) +* Natively support X509 client authentication ([details](https://github.com/globalsign/mgo/pull/55)) +* Gracefully recover from a temporarily unreachable server ([details](https://github.com/globalsign/mgo/pull/69)) +* Use JSON tags when no explicit BSON are tags set ([details](https://github.com/globalsign/mgo/pull/91)) +* Support [$changeStream](https://docs.mongodb.com/manual/changeStreams/) tailing on 3.6+ ([details](https://github.com/globalsign/mgo/pull/97)) +* Fix deadlock in cluster synchronisation ([details](https://github.com/globalsign/mgo/issues/120)) +* Implement `maxIdleTimeout` for pooled connections ([details](https://github.com/globalsign/mgo/pull/116)) +* Connection pool waiting improvements ([details](https://github.com/globalsign/mgo/pull/115)) +* Fixes BSON encoding for `$in` and friends ([details](https://github.com/globalsign/mgo/pull/128)) +* Add BSON stream encoders ([details](https://github.com/globalsign/mgo/pull/127)) +* Add integer map key support in the BSON encoder ([details](https://github.com/globalsign/mgo/pull/140)) +* Support aggregation [collations](https://docs.mongodb.com/manual/reference/collation/) ([details](https://github.com/globalsign/mgo/pull/144)) +* Support encoding of inline struct references ([details](https://github.com/globalsign/mgo/pull/146)) +* Improved windows test harness ([details](https://github.com/globalsign/mgo/pull/158)) +* Improved type and nil handling in the BSON codec ([details](https://github.com/globalsign/mgo/pull/147/files), [more](https://github.com/globalsign/mgo/pull/181)) +* Separated network read/write timeouts ([details](https://github.com/globalsign/mgo/pull/161)) +* Expanded dial string configuration options ([details](https://github.com/globalsign/mgo/pull/162)) +* Implement MongoTimestamp ([details](https://github.com/globalsign/mgo/pull/171)) +* Support setting `writeConcern` for `findAndModify` operations ([details](https://github.com/globalsign/mgo/pull/185)) +* Add `ssl` to the dial string options ([details](https://github.com/globalsign/mgo/pull/184)) + + +--- + +### Thanks to +* @aksentyev +* @bachue +* @bozaro +* @BenLubar +* @carldunham +* @carter2000 +* @cedric-cordenier +* @cezarsa +* @DaytonG +* @ddspog +* @drichelson +* @dvic +* @eaglerayp +* @feliixx +* @fmpwizard +* @gazoon +* @gedge +* @gnawux +* @idy +* @jameinel +* @jefferickson +* @johnlawsharrison +* @KJTsanaktsidis +* @larrycinnabar +* @mapete94 +* @maxnoel +* @mcspring +* @Mei-Zhao +* @peterdeka +* @Reenjii +* @roobre +* @smoya +* @steve-gray +* @tbruyelle +* @wgallagher diff --git a/backend/vendor/github.com/globalsign/mgo/auth.go b/backend/vendor/github.com/globalsign/mgo/auth.go new file mode 100644 index 00000000..75d2ebc3 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/auth.go @@ -0,0 +1,467 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "crypto/md5" + "crypto/sha1" + "encoding/hex" + "errors" + "fmt" + "sync" + + "github.com/globalsign/mgo/bson" + "github.com/globalsign/mgo/internal/scram" +) + +type authCmd struct { + Authenticate int + + Nonce string + User string + Key string +} + +type startSaslCmd struct { + StartSASL int `bson:"startSasl"` +} + +type authResult struct { + ErrMsg string + Ok bool +} + +type getNonceCmd struct { + GetNonce int +} + +type getNonceResult struct { + Nonce string + Err string `bson:"$err"` + Code int +} + +type logoutCmd struct { + Logout int +} + +type saslCmd struct { + Start int `bson:"saslStart,omitempty"` + Continue int `bson:"saslContinue,omitempty"` + ConversationId int `bson:"conversationId,omitempty"` + Mechanism string `bson:"mechanism,omitempty"` + Payload []byte +} + +type saslResult struct { + Ok bool `bson:"ok"` + NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?) + Done bool + + ConversationId int `bson:"conversationId"` + Payload []byte + ErrMsg string +} + +type saslStepper interface { + Step(serverData []byte) (clientData []byte, done bool, err error) + Close() +} + +func (socket *mongoSocket) getNonce() (nonce string, err error) { + socket.Lock() + for socket.cachedNonce == "" && socket.dead == nil { + debugf("Socket %p to %s: waiting for nonce", socket, socket.addr) + socket.gotNonce.Wait() + } + if socket.cachedNonce == "mongos" { + socket.Unlock() + return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth") + } + debugf("Socket %p to %s: got nonce", socket, socket.addr) + nonce, err = socket.cachedNonce, socket.dead + socket.cachedNonce = "" + socket.Unlock() + if err != nil { + nonce = "" + } + return +} + +func (socket *mongoSocket) resetNonce() { + debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr) + op := &queryOp{} + op.query = &getNonceCmd{GetNonce: 1} + op.collection = "admin.$cmd" + op.limit = -1 + op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { + if err != nil { + socket.kill(errors.New("getNonce: "+err.Error()), true) + return + } + result := &getNonceResult{} + err = bson.Unmarshal(docData, &result) + if err != nil { + socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true) + return + } + debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result) + if result.Code == 13390 { + // mongos doesn't yet support auth (see http://j.mp/mongos-auth) + result.Nonce = "mongos" + } else if result.Nonce == "" { + var msg string + if result.Err != "" { + msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code) + } else { + msg = "Got an empty nonce" + } + socket.kill(errors.New(msg), true) + return + } + socket.Lock() + if socket.cachedNonce != "" { + socket.Unlock() + panic("resetNonce: nonce already cached") + } + socket.cachedNonce = result.Nonce + socket.gotNonce.Signal() + socket.Unlock() + } + err := socket.Query(op) + if err != nil { + socket.kill(errors.New("resetNonce: "+err.Error()), true) + } +} + +func (socket *mongoSocket) Login(cred Credential) error { + socket.Lock() + if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 { + cred.Mechanism = "SCRAM-SHA-1" + } + for _, sockCred := range socket.creds { + if sockCred == cred { + debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username) + socket.Unlock() + return nil + } + } + if socket.dropLogout(cred) { + debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username) + socket.creds = append(socket.creds, cred) + socket.Unlock() + return nil + } + socket.Unlock() + + debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username) + + var err error + switch cred.Mechanism { + case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501. + err = socket.loginClassic(cred) + case "PLAIN": + err = socket.loginPlain(cred) + case "MONGODB-X509": + err = socket.loginX509(cred) + default: + // Try SASL for everything else, if it is available. + err = socket.loginSASL(cred) + } + + if err != nil { + debugf("Socket %p to %s: login error: %s", socket, socket.addr, err) + } else { + debugf("Socket %p to %s: login successful", socket, socket.addr) + } + return err +} + +func (socket *mongoSocket) loginClassic(cred Credential) error { + // Note that this only works properly because this function is + // synchronous, which means the nonce won't get reset while we're + // using it and any other login requests will block waiting for a + // new nonce provided in the defer call below. + nonce, err := socket.getNonce() + if err != nil { + return err + } + defer socket.resetNonce() + + psum := md5.New() + psum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) + + ksum := md5.New() + ksum.Write([]byte(nonce + cred.Username)) + ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil)))) + + key := hex.EncodeToString(ksum.Sum(nil)) + + cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key} + res := authResult{} + return socket.loginRun(cred.Source, &cmd, &res, func() error { + if !res.Ok { + return errors.New(res.ErrMsg) + } + socket.Lock() + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + socket.Unlock() + return nil + }) +} + +type authX509Cmd struct { + Authenticate int + User string + Mechanism string +} + +func (socket *mongoSocket) loginX509(cred Credential) error { + cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"} + res := authResult{} + return socket.loginRun(cred.Source, &cmd, &res, func() error { + if !res.Ok { + return errors.New(res.ErrMsg) + } + socket.Lock() + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + socket.Unlock() + return nil + }) +} + +func (socket *mongoSocket) loginPlain(cred Credential) error { + cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)} + res := authResult{} + return socket.loginRun(cred.Source, &cmd, &res, func() error { + if !res.Ok { + return errors.New(res.ErrMsg) + } + socket.Lock() + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + socket.Unlock() + return nil + }) +} + +func (socket *mongoSocket) loginSASL(cred Credential) error { + var sasl saslStepper + var err error + if cred.Mechanism == "SCRAM-SHA-1" { + // SCRAM is handled without external libraries. + sasl = saslNewScram(cred) + } else if len(cred.ServiceHost) > 0 { + sasl, err = saslNew(cred, cred.ServiceHost) + } else { + sasl, err = saslNew(cred, socket.Server().Addr) + } + if err != nil { + return err + } + defer sasl.Close() + + // The goal of this logic is to carry a locked socket until the + // local SASL step confirms the auth is valid; the socket needs to be + // locked so that concurrent action doesn't leave the socket in an + // auth state that doesn't reflect the operations that took place. + // As a simple case, imagine inverting login=>logout to logout=>login. + // + // The logic below works because the lock func isn't called concurrently. + locked := false + lock := func(b bool) { + if locked != b { + locked = b + if b { + socket.Lock() + } else { + socket.Unlock() + } + } + } + + lock(true) + defer lock(false) + + start := 1 + cmd := saslCmd{} + res := saslResult{} + for { + payload, done, err := sasl.Step(res.Payload) + if err != nil { + return err + } + if done && res.Done { + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + break + } + lock(false) + + cmd = saslCmd{ + Start: start, + Continue: 1 - start, + ConversationId: res.ConversationId, + Mechanism: cred.Mechanism, + Payload: payload, + } + start = 0 + err = socket.loginRun(cred.Source, &cmd, &res, func() error { + // See the comment on lock for why this is necessary. + lock(true) + if !res.Ok || res.NotOk { + return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg) + } + return nil + }) + if err != nil { + return err + } + if done && res.Done { + socket.dropAuth(cred.Source) + socket.creds = append(socket.creds, cred) + break + } + } + + return nil +} + +func saslNewScram(cred Credential) *saslScram { + credsum := md5.New() + credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password)) + client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil))) + return &saslScram{cred: cred, client: client} +} + +type saslScram struct { + cred Credential + client *scram.Client +} + +func (s *saslScram) Close() {} + +func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) { + more := s.client.Step(serverData) + return s.client.Out(), !more, s.client.Err() +} + +func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error { + var mutex sync.Mutex + var replyErr error + mutex.Lock() + + op := queryOp{} + op.query = query + op.collection = db + ".$cmd" + op.limit = -1 + op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) { + defer mutex.Unlock() + + if err != nil { + replyErr = err + return + } + + err = bson.Unmarshal(docData, result) + if err != nil { + replyErr = err + } else { + // Must handle this within the read loop for the socket, so + // that concurrent login requests are properly ordered. + replyErr = f() + } + } + + err := socket.Query(&op) + if err != nil { + return err + } + mutex.Lock() // Wait. + return replyErr +} + +func (socket *mongoSocket) Logout(db string) { + socket.Lock() + cred, found := socket.dropAuth(db) + if found { + debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db) + socket.logout = append(socket.logout, cred) + } + socket.Unlock() +} + +func (socket *mongoSocket) LogoutAll() { + socket.Lock() + if l := len(socket.creds); l > 0 { + debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l) + socket.logout = append(socket.logout, socket.creds...) + socket.creds = socket.creds[0:0] + } + socket.Unlock() +} + +func (socket *mongoSocket) flushLogout() (ops []interface{}) { + socket.Lock() + if l := len(socket.logout); l > 0 { + debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l) + for i := 0; i != l; i++ { + op := queryOp{} + op.query = &logoutCmd{1} + op.collection = socket.logout[i].Source + ".$cmd" + op.limit = -1 + ops = append(ops, &op) + } + socket.logout = socket.logout[0:0] + } + socket.Unlock() + return +} + +func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) { + for i, sockCred := range socket.creds { + if sockCred.Source == db { + copy(socket.creds[i:], socket.creds[i+1:]) + socket.creds = socket.creds[:len(socket.creds)-1] + return sockCred, true + } + } + return cred, false +} + +func (socket *mongoSocket) dropLogout(cred Credential) (found bool) { + for i, sockCred := range socket.logout { + if sockCred == cred { + copy(socket.logout[i:], socket.logout[i+1:]) + socket.logout = socket.logout[:len(socket.logout)-1] + return true + } + } + return false +} diff --git a/backend/vendor/github.com/globalsign/mgo/bson/LICENSE b/backend/vendor/github.com/globalsign/mgo/bson/LICENSE new file mode 100644 index 00000000..89032601 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/LICENSE @@ -0,0 +1,25 @@ +BSON library for Go + +Copyright (c) 2010-2012 - Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/vendor/github.com/globalsign/mgo/bson/README.md b/backend/vendor/github.com/globalsign/mgo/bson/README.md new file mode 100644 index 00000000..5c5819e6 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/README.md @@ -0,0 +1,12 @@ +[![GoDoc](https://godoc.org/github.com/globalsign/mgo/bson?status.svg)](https://godoc.org/github.com/globalsign/mgo/bson) + +An Implementation of BSON for Go +-------------------------------- + +Package bson is an implementation of the [BSON specification](http://bsonspec.org) for Go. + +While the BSON package implements the BSON spec as faithfully as possible, there +is some MongoDB specific behaviour (such as map keys `$in`, `$all`, etc) in the +`bson` package. The priority is for backwards compatibility for the `mgo` +driver, though fixes for obviously buggy behaviour is welcome (and features, etc +behind feature flags). diff --git a/backend/vendor/github.com/globalsign/mgo/bson/bson.go b/backend/vendor/github.com/globalsign/mgo/bson/bson.go new file mode 100644 index 00000000..eb87ef62 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/bson.go @@ -0,0 +1,836 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package bson is an implementation of the BSON specification for Go: +// +// http://bsonspec.org +// +// It was created as part of the mgo MongoDB driver for Go, but is standalone +// and may be used on its own without the driver. +package bson + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "encoding/binary" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "os" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" +) + +//go:generate go run bson_corpus_spec_test_generator.go + +// -------------------------------------------------------------------------- +// The public API. + +// Element types constants from BSON specification. +const ( + ElementFloat64 byte = 0x01 + ElementString byte = 0x02 + ElementDocument byte = 0x03 + ElementArray byte = 0x04 + ElementBinary byte = 0x05 + Element06 byte = 0x06 + ElementObjectId byte = 0x07 + ElementBool byte = 0x08 + ElementDatetime byte = 0x09 + ElementNil byte = 0x0A + ElementRegEx byte = 0x0B + ElementDBPointer byte = 0x0C + ElementJavaScriptWithoutScope byte = 0x0D + ElementSymbol byte = 0x0E + ElementJavaScriptWithScope byte = 0x0F + ElementInt32 byte = 0x10 + ElementTimestamp byte = 0x11 + ElementInt64 byte = 0x12 + ElementDecimal128 byte = 0x13 + ElementMinKey byte = 0xFF + ElementMaxKey byte = 0x7F + + BinaryGeneric byte = 0x00 + BinaryFunction byte = 0x01 + BinaryBinaryOld byte = 0x02 + BinaryUUIDOld byte = 0x03 + BinaryUUID byte = 0x04 + BinaryMD5 byte = 0x05 + BinaryUserDefined byte = 0x80 +) + +// Getter interface: a value implementing the bson.Getter interface will have its GetBSON +// method called when the given value has to be marshalled, and the result +// of this method will be marshaled in place of the actual object. +// +// If GetBSON returns return a non-nil error, the marshalling procedure +// will stop and error out with the provided value. +type Getter interface { + GetBSON() (interface{}, error) +} + +// Setter interface: a value implementing the bson.Setter interface will receive the BSON +// value via the SetBSON method during unmarshaling, and the object +// itself will not be changed as usual. +// +// If setting the value works, the method should return nil or alternatively +// bson.ErrSetZero to set the respective field to its zero value (nil for +// pointer types). If SetBSON returns a value of type bson.TypeError, the +// BSON value will be omitted from a map or slice being decoded and the +// unmarshalling will continue. If it returns any other non-nil error, the +// unmarshalling procedure will stop and error out with the provided value. +// +// This interface is generally useful in pointer receivers, since the method +// will want to change the receiver. A type field that implements the Setter +// interface doesn't have to be a pointer, though. +// +// Unlike the usual behavior, unmarshalling onto a value that implements a +// Setter interface will NOT reset the value to its zero state. This allows +// the value to decide by itself how to be unmarshalled. +// +// For example: +// +// type MyString string +// +// func (s *MyString) SetBSON(raw bson.Raw) error { +// return raw.Unmarshal(s) +// } +// +type Setter interface { + SetBSON(raw Raw) error +} + +// ErrSetZero may be returned from a SetBSON method to have the value set to +// its respective zero value. When used in pointer values, this will set the +// field to nil rather than to the pre-allocated value. +var ErrSetZero = errors.New("set to zero") + +// M is a convenient alias for a map[string]interface{} map, useful for +// dealing with BSON in a native way. For instance: +// +// bson.M{"a": 1, "b": true} +// +// There's no special handling for this type in addition to what's done anyway +// for an equivalent map type. Elements in the map will be dumped in an +// undefined ordered. See also the bson.D type for an ordered alternative. +type M map[string]interface{} + +// D represents a BSON document containing ordered elements. For example: +// +// bson.D{{"a", 1}, {"b", true}} +// +// In some situations, such as when creating indexes for MongoDB, the order in +// which the elements are defined is important. If the order is not important, +// using a map is generally more comfortable. See bson.M and bson.RawD. +type D []DocElem + +// DocElem is an element of the bson.D document representation. +type DocElem struct { + Name string + Value interface{} +} + +// Map returns a map out of the ordered element name/value pairs in d. +func (d D) Map() (m M) { + m = make(M, len(d)) + for _, item := range d { + m[item.Name] = item.Value + } + return m +} + +// The Raw type represents raw unprocessed BSON documents and elements. +// Kind is the kind of element as defined per the BSON specification, and +// Data is the raw unprocessed data for the respective element. +// Using this type it is possible to unmarshal or marshal values partially. +// +// Relevant documentation: +// +// http://bsonspec.org/#/specification +// +type Raw struct { + Kind byte + Data []byte +} + +// RawD represents a BSON document containing raw unprocessed elements. +// This low-level representation may be useful when lazily processing +// documents of uncertain content, or when manipulating the raw content +// documents in general. +type RawD []RawDocElem + +// RawDocElem elements of RawD type. +type RawDocElem struct { + Name string + Value Raw +} + +// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes +// long. MongoDB objects by default have such a property set in their "_id" +// property. +// +// http://www.mongodb.org/display/DOCS/Object+Ids +type ObjectId string + +// ObjectIdHex returns an ObjectId from the provided hex representation. +// Calling this function with an invalid hex representation will +// cause a runtime panic. See the IsObjectIdHex function. +func ObjectIdHex(s string) ObjectId { + d, err := hex.DecodeString(s) + if err != nil || len(d) != 12 { + panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s)) + } + return ObjectId(d) +} + +// IsObjectIdHex returns whether s is a valid hex representation of +// an ObjectId. See the ObjectIdHex function. +func IsObjectIdHex(s string) bool { + if len(s) != 24 { + return false + } + _, err := hex.DecodeString(s) + return err == nil +} + +// objectIdCounter is atomically incremented when generating a new ObjectId +// using NewObjectId() function. It's used as a counter part of an id. +var objectIdCounter = readRandomUint32() + +// readRandomUint32 returns a random objectIdCounter. +func readRandomUint32() uint32 { + var b [4]byte + _, err := io.ReadFull(rand.Reader, b[:]) + if err != nil { + panic(fmt.Errorf("cannot read random object id: %v", err)) + } + return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)) +} + +// machineId stores machine id generated once and used in subsequent calls +// to NewObjectId function. +var machineId = readMachineId() +var processId = os.Getpid() + +// readMachineId generates and returns a machine id. +// If this function fails to get the hostname it will cause a runtime error. +func readMachineId() []byte { + var sum [3]byte + id := sum[:] + hostname, err1 := os.Hostname() + if err1 != nil { + _, err2 := io.ReadFull(rand.Reader, id) + if err2 != nil { + panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2)) + } + return id + } + hw := md5.New() + hw.Write([]byte(hostname)) + copy(id, hw.Sum(nil)) + return id +} + +// NewObjectId returns a new unique ObjectId. +func NewObjectId() ObjectId { + var b [12]byte + // Timestamp, 4 bytes, big endian + binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix())) + // Machine, first 3 bytes of md5(hostname) + b[4] = machineId[0] + b[5] = machineId[1] + b[6] = machineId[2] + // Pid, 2 bytes, specs don't specify endianness, but we use big endian. + b[7] = byte(processId >> 8) + b[8] = byte(processId) + // Increment, 3 bytes, big endian + i := atomic.AddUint32(&objectIdCounter, 1) + b[9] = byte(i >> 16) + b[10] = byte(i >> 8) + b[11] = byte(i) + return ObjectId(b[:]) +} + +// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled +// with the provided number of seconds from epoch UTC, and all other parts +// filled with zeroes. It's not safe to insert a document with an id generated +// by this method, it is useful only for queries to find documents with ids +// generated before or after the specified timestamp. +func NewObjectIdWithTime(t time.Time) ObjectId { + var b [12]byte + binary.BigEndian.PutUint32(b[:4], uint32(t.Unix())) + return ObjectId(string(b[:])) +} + +// String returns a hex string representation of the id. +// Example: ObjectIdHex("4d88e15b60f486e428412dc9"). +func (id ObjectId) String() string { + return fmt.Sprintf(`ObjectIdHex("%x")`, string(id)) +} + +// Hex returns a hex representation of the ObjectId. +func (id ObjectId) Hex() string { + return hex.EncodeToString([]byte(id)) +} + +// MarshalJSON turns a bson.ObjectId into a json.Marshaller. +func (id ObjectId) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%x"`, string(id))), nil +} + +var nullBytes = []byte("null") + +// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller. +func (id *ObjectId) UnmarshalJSON(data []byte) error { + if len(data) > 0 && (data[0] == '{' || data[0] == 'O') { + var v struct { + Id json.RawMessage `json:"$oid"` + Func struct { + Id json.RawMessage + } `json:"$oidFunc"` + } + err := jdec(data, &v) + if err == nil { + if len(v.Id) > 0 { + data = []byte(v.Id) + } else { + data = []byte(v.Func.Id) + } + } + } + if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) { + *id = "" + return nil + } + if len(data) != 26 || data[0] != '"' || data[25] != '"' { + return fmt.Errorf("invalid ObjectId in JSON: %s", string(data)) + } + var buf [12]byte + _, err := hex.Decode(buf[:], data[1:25]) + if err != nil { + return fmt.Errorf("invalid ObjectId in JSON: %s (%s)", string(data), err) + } + *id = ObjectId(string(buf[:])) + return nil +} + +// MarshalText turns bson.ObjectId into an encoding.TextMarshaler. +func (id ObjectId) MarshalText() ([]byte, error) { + return []byte(fmt.Sprintf("%x", string(id))), nil +} + +// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler. +func (id *ObjectId) UnmarshalText(data []byte) error { + if len(data) == 1 && data[0] == ' ' || len(data) == 0 { + *id = "" + return nil + } + if len(data) != 24 { + return fmt.Errorf("invalid ObjectId: %s", data) + } + var buf [12]byte + _, err := hex.Decode(buf[:], data[:]) + if err != nil { + return fmt.Errorf("invalid ObjectId: %s (%s)", data, err) + } + *id = ObjectId(string(buf[:])) + return nil +} + +// Valid returns true if id is valid. A valid id must contain exactly 12 bytes. +func (id ObjectId) Valid() bool { + return len(id) == 12 +} + +// byteSlice returns byte slice of id from start to end. +// Calling this function with an invalid id will cause a runtime panic. +func (id ObjectId) byteSlice(start, end int) []byte { + if len(id) != 12 { + panic(fmt.Sprintf("invalid ObjectId: %q", string(id))) + } + return []byte(string(id)[start:end]) +} + +// Time returns the timestamp part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ObjectId) Time() time.Time { + // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch. + secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4))) + return time.Unix(secs, 0) +} + +// Machine returns the 3-byte machine id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ObjectId) Machine() []byte { + return id.byteSlice(4, 7) +} + +// Pid returns the process id part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ObjectId) Pid() uint16 { + return binary.BigEndian.Uint16(id.byteSlice(7, 9)) +} + +// Counter returns the incrementing value part of the id. +// It's a runtime error to call this method with an invalid id. +func (id ObjectId) Counter() int32 { + b := id.byteSlice(9, 12) + // Counter is stored as big-endian 3-byte value + return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2])) +} + +// The Symbol type is similar to a string and is used in languages with a +// distinct symbol type. +type Symbol string + +// Now returns the current time with millisecond precision. MongoDB stores +// timestamps with the same precision, so a Time returned from this method +// will not change after a roundtrip to the database. That's the only reason +// why this function exists. Using the time.Now function also works fine +// otherwise. +func Now() time.Time { + return time.Unix(0, time.Now().UnixNano()/1e6*1e6) +} + +// MongoTimestamp is a special internal type used by MongoDB that for some +// strange reason has its own datatype defined in BSON. +type MongoTimestamp int64 + +// Time returns the time part of ts which is stored with second precision. +func (ts MongoTimestamp) Time() time.Time { + return time.Unix(int64(uint64(ts)>>32), 0) +} + +// Counter returns the counter part of ts. +func (ts MongoTimestamp) Counter() uint32 { + return uint32(ts) +} + +// NewMongoTimestamp creates a timestamp using the given +// date `t` (with second precision) and counter `c` (unique for `t`). +// +// Returns an error if time `t` is not between 1970-01-01T00:00:00Z +// and 2106-02-07T06:28:15Z (inclusive). +// +// Note that two MongoTimestamps should never have the same (time, counter) combination: +// the caller must ensure the counter `c` is increased if creating multiple MongoTimestamp +// values for the same time `t` (ignoring fractions of seconds). +func NewMongoTimestamp(t time.Time, c uint32) (MongoTimestamp, error) { + u := t.Unix() + if u < 0 || u > math.MaxUint32 { + return -1, errors.New("invalid value for time") + } + + i := int64(u<<32 | int64(c)) + + return MongoTimestamp(i), nil +} + +type orderKey int64 + +// MaxKey is a special value that compares higher than all other possible BSON +// values in a MongoDB database. +var MaxKey = orderKey(1<<63 - 1) + +// MinKey is a special value that compares lower than all other possible BSON +// values in a MongoDB database. +var MinKey = orderKey(-1 << 63) + +type undefined struct{} + +// Undefined represents the undefined BSON value. +var Undefined undefined + +// Binary is a representation for non-standard binary values. Any kind should +// work, but the following are known as of this writing: +// +// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}. +// 0x01 - Function (!?) +// 0x02 - Obsolete generic. +// 0x03 - UUID +// 0x05 - MD5 +// 0x80 - User defined. +// +type Binary struct { + Kind byte + Data []byte +} + +// RegEx represents a regular expression. The Options field may contain +// individual characters defining the way in which the pattern should be +// applied, and must be sorted. Valid options as of this writing are 'i' for +// case insensitive matching, 'm' for multi-line matching, 'x' for verbose +// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all +// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match +// unicode. The value of the Options parameter is not verified before being +// marshaled into the BSON format. +type RegEx struct { + Pattern string + Options string +} + +// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it +// will be marshaled as a mapping from identifiers to values that may be +// used when evaluating the provided Code. +type JavaScript struct { + Code string + Scope interface{} +} + +// DBPointer refers to a document id in a namespace. +// +// This type is deprecated in the BSON specification and should not be used +// except for backwards compatibility with ancient applications. +type DBPointer struct { + Namespace string + Id ObjectId +} + +const initialBufferSize = 64 + +func handleErr(err *error) { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } else if _, ok := r.(externalPanic); ok { + panic(r) + } else if s, ok := r.(string); ok { + *err = errors.New(s) + } else if e, ok := r.(error); ok { + *err = e + } else { + panic(r) + } + } +} + +// Marshal serializes the in value, which may be a map or a struct value. +// In the case of struct values, only exported fields will be serialized, +// and the order of serialized fields will match that of the struct itself. +// The lowercased field name is used as the key for each exported field, +// but this behavior may be changed using the respective field tag. +// The tag may also contain flags to tweak the marshalling behavior for +// the field. The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// +// minsize Marshal an int64 value as an int32, if that's feasible +// while preserving the numeric value. +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the bson keys of other struct fields. +// +// Some examples: +// +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } +// +func Marshal(in interface{}) (out []byte, err error) { + return MarshalBuffer(in, make([]byte, 0, initialBufferSize)) +} + +// MarshalBuffer behaves the same way as Marshal, except that instead of +// allocating a new byte slice it tries to use the received byte slice and +// only allocates more memory if necessary to fit the marshaled value. +func MarshalBuffer(in interface{}, buf []byte) (out []byte, err error) { + defer handleErr(&err) + e := &encoder{buf} + e.addDoc(reflect.ValueOf(in)) + return e.out, nil +} + +// Unmarshal deserializes data from in into the out value. The out value +// must be a map, a pointer to a struct, or a pointer to a bson.D value. +// In the case of struct values, only exported fields will be deserialized. +// The lowercased field name is used as the key for each exported field, +// but this behavior may be changed using the respective field tag. +// The tag may also contain flags to tweak the marshalling behavior for +// the field. The tag formats accepted are: +// +// "[][,[,]]" +// +// `(...) bson:"[][,[,]]" (...)` +// +// The following flags are currently supported during unmarshal (see the +// Marshal method for other flags): +// +// inline Inline the field, which must be a struct or a map. +// Inlined structs are handled as if its fields were part +// of the outer struct. An inlined map causes keys that do +// not match any other struct field to be inserted in the +// map rather than being discarded as usual. +// +// The target field or element types of out may not necessarily match +// the BSON values of the provided data. The following conversions are +// made automatically: +// +// - Numeric types are converted if at least the integer part of the +// value would be preserved correctly +// - Bools are converted to numeric types as 1 or 0 +// - Numeric types are converted to bools as true if not 0 or false otherwise +// - Binary and string BSON data is converted to a string, array or byte slice +// +// If the value would not fit the type and cannot be converted, it's +// silently skipped. +// +// Pointer values are initialized when necessary. +func Unmarshal(in []byte, out interface{}) (err error) { + if raw, ok := out.(*Raw); ok { + raw.Kind = 3 + raw.Data = in + return nil + } + defer handleErr(&err) + v := reflect.ValueOf(out) + switch v.Kind() { + case reflect.Ptr: + fallthrough + case reflect.Map: + d := newDecoder(in) + d.readDocTo(v) + if d.i < len(d.in) { + return errors.New("document is corrupted") + } + case reflect.Struct: + return errors.New("unmarshal can't deal with struct values. Use a pointer") + default: + return errors.New("unmarshal needs a map or a pointer to a struct") + } + return nil +} + +// Unmarshal deserializes raw into the out value. If the out value type +// is not compatible with raw, a *bson.TypeError is returned. +// +// See the Unmarshal function documentation for more details on the +// unmarshalling process. +func (raw Raw) Unmarshal(out interface{}) (err error) { + defer handleErr(&err) + v := reflect.ValueOf(out) + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + fallthrough + case reflect.Map: + d := newDecoder(raw.Data) + good := d.readElemTo(v, raw.Kind) + if !good { + return &TypeError{v.Type(), raw.Kind} + } + case reflect.Struct: + return errors.New("raw Unmarshal can't deal with struct values. Use a pointer") + default: + return errors.New("raw Unmarshal needs a map or a valid pointer") + } + return nil +} + +// TypeError store details for type error occuring +// during unmarshaling +type TypeError struct { + Type reflect.Type + Kind byte +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String()) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + InlineMap int + Zero reflect.Value +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + MinSize bool + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var structMapMutex sync.RWMutex + +type externalPanic string + +func (e externalPanic) String() string { + return string(e) +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + structMapMutex.RLock() + sinfo, found := structMap[st] + structMapMutex.RUnlock() + if found { + return sinfo, nil + } + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("bson") + + // Fall-back to JSON struct tag, if feature flag is set. + if tag == "" && useJSONTagFallback { + tag = field.Tag.Get("json") + } + + // If there's no bson/json tag available. + if tag == "" { + // If there's no tag, and also no tag: value splits (i.e. no colon) + // then assume the entire tag is the value + if strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + } + + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "minsize": + info.MinSize = true + case "inline": + inline = true + default: + msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st) + panic(externalPanic(msg)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Ptr: + // allow only pointer to struct + if kind := field.Type.Elem().Kind(); kind != reflect.Struct { + return nil, errors.New("Option ,inline allows a pointer only to a struct, was given pointer to " + kind.String()) + } + + field.Type = field.Type.Elem() + fallthrough + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + panic("Option ,inline needs a struct value or a pointer to a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + sinfo = &structInfo{ + fieldsMap, + fieldsList, + inlineMap, + reflect.New(st).Elem(), + } + structMapMutex.Lock() + structMap[st] = sinfo + structMapMutex.Unlock() + return sinfo, nil +} diff --git a/backend/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go b/backend/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go new file mode 100644 index 00000000..3525a004 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go @@ -0,0 +1,294 @@ +// +build ignore + +package main + +import ( + "bytes" + "fmt" + "go/format" + "html/template" + "io/ioutil" + "log" + "path/filepath" + "strings" + + "github.com/globalsign/mgo/internal/json" +) + +func main() { + log.SetFlags(0) + log.SetPrefix(name + ": ") + + var g Generator + + fmt.Fprintf(&g, "// Code generated by \"%s.go\"; DO NOT EDIT\n\n", name) + + src := g.generate() + + err := ioutil.WriteFile(fmt.Sprintf("%s.go", strings.TrimSuffix(name, "_generator")), src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } +} + +// Generator holds the state of the analysis. Primarily used to buffer +// the output for format.Source. +type Generator struct { + bytes.Buffer // Accumulated output. +} + +// format returns the gofmt-ed contents of the Generator's buffer. +func (g *Generator) format() []byte { + src, err := format.Source(g.Bytes()) + if err != nil { + // Should never happen, but can arise when developing this code. + // The user can compile the output to see the error. + log.Printf("warning: internal error: invalid Go generated: %s", err) + log.Printf("warning: compile the package to analyze the error") + return g.Bytes() + } + return src +} + +// EVERYTHING ABOVE IS CONSTANT BETWEEN THE GENERATORS + +const name = "bson_corpus_spec_test_generator" + +func (g *Generator) generate() []byte { + + testFiles, err := filepath.Glob("./specdata/specifications/source/bson-corpus/tests/*.json") + if err != nil { + log.Fatalf("error reading bson-corpus files: %s", err) + } + + tests, err := g.loadTests(testFiles) + if err != nil { + log.Fatalf("error loading tests: %s", err) + } + + tmpl, err := g.getTemplate() + if err != nil { + log.Fatalf("error loading template: %s", err) + } + + tmpl.Execute(&g.Buffer, tests) + + return g.format() +} + +func (g *Generator) loadTests(filenames []string) ([]*testDef, error) { + var tests []*testDef + for _, filename := range filenames { + test, err := g.loadTest(filename) + if err != nil { + return nil, err + } + + tests = append(tests, test) + } + + return tests, nil +} + +func (g *Generator) loadTest(filename string) (*testDef, error) { + content, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + + var testDef testDef + err = json.Unmarshal(content, &testDef) + if err != nil { + return nil, err + } + + names := make(map[string]struct{}) + + for i := len(testDef.Valid) - 1; i >= 0; i-- { + if testDef.BsonType == "0x05" && testDef.Valid[i].Description == "subtype 0x02" { + testDef.Valid = append(testDef.Valid[:i], testDef.Valid[i+1:]...) + continue + } + + name := cleanupFuncName(testDef.Description + "_" + testDef.Valid[i].Description) + nameIdx := name + j := 1 + for { + if _, ok := names[nameIdx]; !ok { + break + } + + nameIdx = fmt.Sprintf("%s_%d", name, j) + } + + names[nameIdx] = struct{}{} + + testDef.Valid[i].TestDef = &testDef + testDef.Valid[i].Name = nameIdx + testDef.Valid[i].StructTest = testDef.TestKey != "" && + (testDef.BsonType != "0x05" || strings.Contains(testDef.Valid[i].Description, "0x00")) && + !testDef.Deprecated + } + + for i := len(testDef.DecodeErrors) - 1; i >= 0; i-- { + if strings.Contains(testDef.DecodeErrors[i].Description, "UTF-8") { + testDef.DecodeErrors = append(testDef.DecodeErrors[:i], testDef.DecodeErrors[i+1:]...) + continue + } + + name := cleanupFuncName(testDef.Description + "_" + testDef.DecodeErrors[i].Description) + nameIdx := name + j := 1 + for { + if _, ok := names[nameIdx]; !ok { + break + } + + nameIdx = fmt.Sprintf("%s_%d", name, j) + } + names[nameIdx] = struct{}{} + + testDef.DecodeErrors[i].Name = nameIdx + } + + return &testDef, nil +} + +func (g *Generator) getTemplate() (*template.Template, error) { + content := `package bson_test + +import ( + "encoding/hex" + "time" + + . "gopkg.in/check.v1" + "github.com/globalsign/mgo/bson" +) + +func testValid(c *C, in []byte, expected []byte, result interface{}) { + err := bson.Unmarshal(in, result) + c.Assert(err, IsNil) + + out, err := bson.Marshal(result) + c.Assert(err, IsNil) + + c.Assert(string(expected), Equals, string(out), Commentf("roundtrip failed for %T, expected '%x' but got '%x'", result, expected, out)) +} + +func testDecodeSkip(c *C, in []byte) { + err := bson.Unmarshal(in, &struct{}{}) + c.Assert(err, IsNil) +} + +func testDecodeError(c *C, in []byte, result interface{}) { + err := bson.Unmarshal(in, result) + c.Assert(err, Not(IsNil)) +} + +{{range .}} +{{range .Valid}} +func (s *S) Test{{.Name}}(c *C) { + b, err := hex.DecodeString("{{.Bson}}") + c.Assert(err, IsNil) + + {{if .CanonicalBson}} + cb, err := hex.DecodeString("{{.CanonicalBson}}") + c.Assert(err, IsNil) + {{else}} + cb := b + {{end}} + + var resultD bson.D + testValid(c, b, cb, &resultD) + {{if .StructTest}}var resultS struct { + Element {{.TestDef.GoType}} ` + "`bson:\"{{.TestDef.TestKey}}\"`" + ` + } + testValid(c, b, cb, &resultS){{end}} + + testDecodeSkip(c, b) +} +{{end}} + +{{range .DecodeErrors}} +func (s *S) Test{{.Name}}(c *C) { + b, err := hex.DecodeString("{{.Bson}}") + c.Assert(err, IsNil) + + var resultD bson.D + testDecodeError(c, b, &resultD) +} +{{end}} +{{end}} +` + tmpl, err := template.New("").Parse(content) + if err != nil { + return nil, err + } + return tmpl, nil +} + +func cleanupFuncName(name string) string { + return strings.Map(func(r rune) rune { + if (r >= 48 && r <= 57) || (r >= 65 && r <= 90) || (r >= 97 && r <= 122) { + return r + } + return '_' + }, name) +} + +type testDef struct { + Description string `json:"description"` + BsonType string `json:"bson_type"` + TestKey string `json:"test_key"` + Valid []*valid `json:"valid"` + DecodeErrors []*decodeError `json:"decodeErrors"` + Deprecated bool `json:"deprecated"` +} + +func (t *testDef) GoType() string { + switch t.BsonType { + case "0x01": + return "float64" + case "0x02": + return "string" + case "0x03": + return "bson.D" + case "0x04": + return "[]interface{}" + case "0x05": + return "[]byte" + case "0x07": + return "bson.ObjectId" + case "0x08": + return "bool" + case "0x09": + return "time.Time" + case "0x0E": + return "string" + case "0x10": + return "int32" + case "0x12": + return "int64" + case "0x13": + return "bson.Decimal" + default: + return "interface{}" + } +} + +type valid struct { + Description string `json:"description"` + Bson string `json:"bson"` + CanonicalBson string `json:"canonical_bson"` + + Name string + StructTest bool + TestDef *testDef +} + +type decodeError struct { + Description string `json:"description"` + Bson string `json:"bson"` + + Name string +} diff --git a/backend/vendor/github.com/globalsign/mgo/bson/compatibility.go b/backend/vendor/github.com/globalsign/mgo/bson/compatibility.go new file mode 100644 index 00000000..66efd465 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/compatibility.go @@ -0,0 +1,29 @@ +package bson + +// Current state of the JSON tag fallback option. +var useJSONTagFallback = false +var useRespectNilValues = false + +// SetJSONTagFallback enables or disables the JSON-tag fallback for structure tagging. When this is enabled, structures +// without BSON tags on a field will fall-back to using the JSON tag (if present). +func SetJSONTagFallback(state bool) { + useJSONTagFallback = state +} + +// JSONTagFallbackState returns the current status of the JSON tag fallback compatability option. See SetJSONTagFallback +// for more information. +func JSONTagFallbackState() bool { + return useJSONTagFallback +} + +// SetRespectNilValues enables or disables serializing nil slices or maps to `null` values. +// In other words it enables `encoding/json` compatible behaviour. +func SetRespectNilValues(state bool) { + useRespectNilValues = state +} + +// RespectNilValuesState returns the current status of the JSON nil slices and maps fallback compatibility option. +// See SetRespectNilValues for more information. +func RespectNilValuesState() bool { + return useRespectNilValues +} diff --git a/backend/vendor/github.com/globalsign/mgo/bson/decimal.go b/backend/vendor/github.com/globalsign/mgo/bson/decimal.go new file mode 100644 index 00000000..672ba182 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/decimal.go @@ -0,0 +1,312 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package bson + +import ( + "fmt" + "strconv" + "strings" +) + +// Decimal128 holds decimal128 BSON values. +type Decimal128 struct { + h, l uint64 +} + +func (d Decimal128) String() string { + var pos int // positive sign + var e int // exponent + var h, l uint64 // significand high/low + + if d.h>>63&1 == 0 { + pos = 1 + } + + switch d.h >> 58 & (1<<5 - 1) { + case 0x1F: + return "NaN" + case 0x1E: + return "-Inf"[pos:] + } + + l = d.l + if d.h>>61&3 == 3 { + // Bits: 1*sign 2*ignored 14*exponent 111*significand. + // Implicit 0b100 prefix in significand. + e = int(d.h>>47&(1<<14-1)) - 6176 + //h = 4<<47 | d.h&(1<<47-1) + // Spec says all of these values are out of range. + h, l = 0, 0 + } else { + // Bits: 1*sign 14*exponent 113*significand + e = int(d.h>>49&(1<<14-1)) - 6176 + h = d.h & (1<<49 - 1) + } + + // Would be handled by the logic below, but that's trivial and common. + if h == 0 && l == 0 && e == 0 { + return "-0"[pos:] + } + + var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero. + var last = len(repr) + var i = len(repr) + var dot = len(repr) + e + var rem uint32 +Loop: + for d9 := 0; d9 < 5; d9++ { + h, l, rem = divmod(h, l, 1e9) + for d1 := 0; d1 < 9; d1++ { + // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc. + if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) { + e += len(repr) - i + i-- + repr[i] = '.' + last = i - 1 + dot = len(repr) // Unmark. + } + c := '0' + byte(rem%10) + rem /= 10 + i-- + repr[i] = c + // Handle "0E+3", "1E+3", etc. + if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) { + last = i + break Loop + } + if c != '0' { + last = i + } + // Break early. Works without it, but why. + if dot > i && l == 0 && h == 0 && rem == 0 { + break Loop + } + } + } + repr[last-1] = '-' + last-- + + if e > 0 { + return string(repr[last+pos:]) + "E+" + strconv.Itoa(e) + } + if e < 0 { + return string(repr[last+pos:]) + "E" + strconv.Itoa(e) + } + return string(repr[last+pos:]) +} + +func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { + div64 := uint64(div) + a := h >> 32 + aq := a / div64 + ar := a % div64 + b := ar<<32 + h&(1<<32-1) + bq := b / div64 + br := b % div64 + c := br<<32 + l>>32 + cq := c / div64 + cr := c % div64 + d := cr<<32 + l&(1<<32-1) + dq := d / div64 + dr := d % div64 + return (aq<<32 | bq), (cq<<32 | dq), uint32(dr) +} + +var dNaN = Decimal128{0x1F << 58, 0} +var dPosInf = Decimal128{0x1E << 58, 0} +var dNegInf = Decimal128{0x3E << 58, 0} + +func dErr(s string) (Decimal128, error) { + return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s) +} + +// ParseDecimal128 parse a string and return the corresponding value as +// a decimal128 +func ParseDecimal128(s string) (Decimal128, error) { + orig := s + if s == "" { + return dErr(orig) + } + neg := s[0] == '-' + if neg || s[0] == '+' { + s = s[1:] + } + + if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') { + if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") { + return dNaN, nil + } + if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") { + if neg { + return dNegInf, nil + } + return dPosInf, nil + } + return dErr(orig) + } + + var h, l uint64 + var e int + + var add, ovr uint32 + var mul uint32 = 1 + var dot = -1 + var digits = 0 + var i = 0 + for i < len(s) { + c := s[i] + if mul == 1e9 { + h, l, ovr = muladd(h, l, mul, add) + mul, add = 1, 0 + if ovr > 0 || h&((1<<15-1)<<49) > 0 { + return dErr(orig) + } + } + if c >= '0' && c <= '9' { + i++ + if c > '0' || digits > 0 { + digits++ + } + if digits > 34 { + if c == '0' { + // Exact rounding. + e++ + continue + } + return dErr(orig) + } + mul *= 10 + add *= 10 + add += uint32(c - '0') + continue + } + if c == '.' { + i++ + if dot >= 0 || i == 1 && len(s) == 1 { + return dErr(orig) + } + if i == len(s) { + break + } + if s[i] < '0' || s[i] > '9' || e > 0 { + return dErr(orig) + } + dot = i + continue + } + break + } + if i == 0 { + return dErr(orig) + } + if mul > 1 { + h, l, ovr = muladd(h, l, mul, add) + if ovr > 0 || h&((1<<15-1)<<49) > 0 { + return dErr(orig) + } + } + if dot >= 0 { + e += dot - i + } + if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') { + i++ + eneg := s[i] == '-' + if eneg || s[i] == '+' { + i++ + if i == len(s) { + return dErr(orig) + } + } + n := 0 + for i < len(s) && n < 1e4 { + c := s[i] + i++ + if c < '0' || c > '9' { + return dErr(orig) + } + n *= 10 + n += int(c - '0') + } + if eneg { + n = -n + } + e += n + for e < -6176 { + // Subnormal. + var div uint32 = 1 + for div < 1e9 && e < -6176 { + div *= 10 + e++ + } + var rem uint32 + h, l, rem = divmod(h, l, div) + if rem > 0 { + return dErr(orig) + } + } + for e > 6111 { + // Clamped. + var mul uint32 = 1 + for mul < 1e9 && e > 6111 { + mul *= 10 + e-- + } + h, l, ovr = muladd(h, l, mul, 0) + if ovr > 0 || h&((1<<15-1)<<49) > 0 { + return dErr(orig) + } + } + if e < -6176 || e > 6111 { + return dErr(orig) + } + } + + if i < len(s) { + return dErr(orig) + } + + h |= uint64(e+6176) & uint64(1<<14-1) << 49 + if neg { + h |= 1 << 63 + } + return Decimal128{h, l}, nil +} + +func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) { + mul64 := uint64(mul) + a := mul64 * (l & (1<<32 - 1)) + b := a>>32 + mul64*(l>>32) + c := b>>32 + mul64*(h&(1<<32-1)) + d := c>>32 + mul64*(h>>32) + + a = a&(1<<32-1) + uint64(add) + b = b&(1<<32-1) + a>>32 + c = c&(1<<32-1) + b>>32 + d = d&(1<<32-1) + c>>32 + + return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32) +} diff --git a/backend/vendor/github.com/globalsign/mgo/bson/decode.go b/backend/vendor/github.com/globalsign/mgo/bson/decode.go new file mode 100644 index 00000000..658856ad --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/decode.go @@ -0,0 +1,1055 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// gobson - BSON library for Go. + +package bson + +import ( + "errors" + "fmt" + "io" + "math" + "net/url" + "reflect" + "strconv" + "sync" + "time" +) + +type decoder struct { + in []byte + i int + docType reflect.Type +} + +var typeM = reflect.TypeOf(M{}) + +func newDecoder(in []byte) *decoder { + return &decoder{in, 0, typeM} +} + +// -------------------------------------------------------------------------- +// Some helper functions. + +func corrupted() { + panic("Document is corrupted") +} + +// -------------------------------------------------------------------------- +// Unmarshaling of documents. + +const ( + setterUnknown = iota + setterNone + setterType + setterAddr +) + +var setterStyles map[reflect.Type]int +var setterIface reflect.Type +var setterMutex sync.RWMutex + +func init() { + var iface Setter + setterIface = reflect.TypeOf(&iface).Elem() + setterStyles = make(map[reflect.Type]int) +} + +func setterStyle(outt reflect.Type) int { + setterMutex.RLock() + style := setterStyles[outt] + setterMutex.RUnlock() + if style != setterUnknown { + return style + } + + setterMutex.Lock() + defer setterMutex.Unlock() + if outt.Implements(setterIface) { + style = setterType + } else if reflect.PtrTo(outt).Implements(setterIface) { + style = setterAddr + } else { + style = setterNone + } + setterStyles[outt] = style + return style +} + +func getSetter(outt reflect.Type, out reflect.Value) Setter { + style := setterStyle(outt) + if style == setterNone { + return nil + } + if style == setterAddr { + if !out.CanAddr() { + return nil + } + out = out.Addr() + } else if outt.Kind() == reflect.Ptr && out.IsNil() { + out.Set(reflect.New(outt.Elem())) + } + return out.Interface().(Setter) +} + +func clearMap(m reflect.Value) { + var none reflect.Value + for _, k := range m.MapKeys() { + m.SetMapIndex(k, none) + } +} + +func (d *decoder) readDocTo(out reflect.Value) { + var elemType reflect.Type + outt := out.Type() + outk := outt.Kind() + + for { + if outk == reflect.Ptr && out.IsNil() { + out.Set(reflect.New(outt.Elem())) + } + if setter := getSetter(outt, out); setter != nil { + raw := d.readRaw(ElementDocument) + err := setter.SetBSON(raw) + if _, ok := err.(*TypeError); err != nil && !ok { + panic(err) + } + return + } + if outk == reflect.Ptr { + out = out.Elem() + outt = out.Type() + outk = out.Kind() + continue + } + break + } + + var fieldsMap map[string]fieldInfo + var inlineMap reflect.Value + if outt == typeRaw { + out.Set(reflect.ValueOf(d.readRaw(ElementDocument))) + return + } + + origout := out + if outk == reflect.Interface { + if d.docType.Kind() == reflect.Map { + mv := reflect.MakeMap(d.docType) + out.Set(mv) + out = mv + } else { + dv := reflect.New(d.docType).Elem() + out.Set(dv) + out = dv + } + outt = out.Type() + outk = outt.Kind() + } + + docType := d.docType + keyType := typeString + convertKey := false + switch outk { + case reflect.Map: + keyType = outt.Key() + if keyType != typeString { + convertKey = true + } + elemType = outt.Elem() + if elemType == typeIface { + d.docType = outt + } + if out.IsNil() { + out.Set(reflect.MakeMap(out.Type())) + } else if out.Len() > 0 { + clearMap(out) + } + case reflect.Struct: + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + fieldsMap = sinfo.FieldsMap + out.Set(sinfo.Zero) + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + if !inlineMap.IsNil() && inlineMap.Len() > 0 { + clearMap(inlineMap) + } + elemType = inlineMap.Type().Elem() + if elemType == typeIface { + d.docType = inlineMap.Type() + } + } + case reflect.Slice: + switch outt.Elem() { + case typeDocElem: + origout.Set(d.readDocElems(outt)) + return + case typeRawDocElem: + origout.Set(d.readRawDocElems(outt)) + return + } + fallthrough + default: + panic("Unsupported document type for unmarshalling: " + out.Type().String()) + } + + end := int(d.readInt32()) + end += d.i - 4 + if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { + corrupted() + } + for d.in[d.i] != '\x00' { + kind := d.readByte() + name := d.readCStr() + if d.i >= end { + corrupted() + } + + switch outk { + case reflect.Map: + e := reflect.New(elemType).Elem() + if d.readElemTo(e, kind) { + k := reflect.ValueOf(name) + if convertKey { + mapKeyType := out.Type().Key() + mapKeyKind := mapKeyType.Kind() + + switch mapKeyKind { + case reflect.Int: + fallthrough + case reflect.Int8: + fallthrough + case reflect.Int16: + fallthrough + case reflect.Int32: + fallthrough + case reflect.Int64: + fallthrough + case reflect.Uint: + fallthrough + case reflect.Uint8: + fallthrough + case reflect.Uint16: + fallthrough + case reflect.Uint32: + fallthrough + case reflect.Uint64: + fallthrough + case reflect.Float32: + fallthrough + case reflect.Float64: + parsed := d.parseMapKeyAsFloat(k, mapKeyKind) + k = reflect.ValueOf(parsed) + case reflect.String: + mapKeyType = keyType + default: + panic("BSON map must have string or decimal keys. Got: " + outt.String()) + } + + k = k.Convert(mapKeyType) + } + out.SetMapIndex(k, e) + } + case reflect.Struct: + if info, ok := fieldsMap[name]; ok { + if info.Inline == nil { + d.readElemTo(out.Field(info.Num), kind) + } else { + d.readElemTo(out.FieldByIndex(info.Inline), kind) + } + } else if inlineMap.IsValid() { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + e := reflect.New(elemType).Elem() + if d.readElemTo(e, kind) { + inlineMap.SetMapIndex(reflect.ValueOf(name), e) + } + } else { + d.dropElem(kind) + } + case reflect.Slice: + } + + if d.i >= end { + corrupted() + } + } + d.i++ // '\x00' + if d.i != end { + corrupted() + } + d.docType = docType +} + +func (decoder) parseMapKeyAsFloat(k reflect.Value, mapKeyKind reflect.Kind) float64 { + parsed, err := strconv.ParseFloat(k.String(), 64) + if err != nil { + panic("Map key is defined to be a decimal type (" + mapKeyKind.String() + ") but got error " + + err.Error()) + } + + return parsed +} + +func (d *decoder) readArrayDocTo(out reflect.Value) { + end := int(d.readInt32()) + end += d.i - 4 + if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { + corrupted() + } + i := 0 + l := out.Len() + for d.in[d.i] != '\x00' { + if i >= l { + panic("Length mismatch on array field") + } + kind := d.readByte() + for d.i < end && d.in[d.i] != '\x00' { + d.i++ + } + if d.i >= end { + corrupted() + } + d.i++ + d.readElemTo(out.Index(i), kind) + if d.i >= end { + corrupted() + } + i++ + } + if i != l { + panic("Length mismatch on array field") + } + d.i++ // '\x00' + if d.i != end { + corrupted() + } +} + +func (d *decoder) readSliceDoc(t reflect.Type) interface{} { + tmp := make([]reflect.Value, 0, 8) + elemType := t.Elem() + if elemType == typeRawDocElem { + d.dropElem(ElementArray) + return reflect.Zero(t).Interface() + } + if elemType == typeRaw { + return d.readSliceOfRaw() + } + + end := int(d.readInt32()) + end += d.i - 4 + if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { + corrupted() + } + for d.in[d.i] != '\x00' { + kind := d.readByte() + for d.i < end && d.in[d.i] != '\x00' { + d.i++ + } + if d.i >= end { + corrupted() + } + d.i++ + e := reflect.New(elemType).Elem() + if d.readElemTo(e, kind) { + tmp = append(tmp, e) + } + if d.i >= end { + corrupted() + } + } + d.i++ // '\x00' + if d.i != end { + corrupted() + } + + n := len(tmp) + slice := reflect.MakeSlice(t, n, n) + for i := 0; i != n; i++ { + slice.Index(i).Set(tmp[i]) + } + return slice.Interface() +} + +func BSONElementSize(kind byte, offset int, buffer []byte) (int, error) { + switch kind { + case ElementFloat64: // Float64 + return 8, nil + case ElementJavaScriptWithoutScope: // JavaScript without scope + fallthrough + case ElementSymbol: // Symbol + fallthrough + case ElementString: // UTF-8 string + size, err := getSize(offset, buffer) + if err != nil { + return 0, err + } + if size < 1 { + return 0, errors.New("String size can't be less then one byte") + } + size += 4 + if offset+size > len(buffer) { + return 0, io.ErrUnexpectedEOF + } + if buffer[offset+size-1] != 0 { + return 0, errors.New("Invalid string: non zero-terminated") + } + return size, nil + case ElementArray: // Array + fallthrough + case ElementDocument: // Document + size, err := getSize(offset, buffer) + if err != nil { + return 0, err + } + if size < 5 { + return 0, errors.New("Declared document size is too small") + } + return size, nil + case ElementBinary: // Binary + size, err := getSize(offset, buffer) + if err != nil { + return 0, err + } + if size < 0 { + return 0, errors.New("Binary data size can't be negative") + } + return size + 5, nil + case Element06: // Undefined (obsolete, but still seen in the wild) + return 0, nil + case ElementObjectId: // ObjectId + return 12, nil + case ElementBool: // Bool + return 1, nil + case ElementDatetime: // Timestamp + return 8, nil + case ElementNil: // Nil + return 0, nil + case ElementRegEx: // RegEx + end := offset + for i := 0; i < 2; i++ { + for end < len(buffer) && buffer[end] != '\x00' { + end++ + } + end++ + } + if end > len(buffer) { + return 0, io.ErrUnexpectedEOF + } + return end - offset, nil + case ElementDBPointer: // DBPointer + size, err := getSize(offset, buffer) + if err != nil { + return 0, err + } + if size < 1 { + return 0, errors.New("String size can't be less then one byte") + } + return size + 12 + 4, nil + case ElementJavaScriptWithScope: // JavaScript with scope + size, err := getSize(offset, buffer) + if err != nil { + return 0, err + } + if size < 4+5+5 { + return 0, errors.New("Declared document element is too small") + } + return size, nil + case ElementInt32: // Int32 + return 4, nil + case ElementTimestamp: // Mongo-specific timestamp + return 8, nil + case ElementInt64: // Int64 + return 8, nil + case ElementDecimal128: // Decimal128 + return 16, nil + case ElementMaxKey: // Max key + return 0, nil + case ElementMinKey: // Min key + return 0, nil + default: + return 0, errors.New(fmt.Sprintf("Unknown element kind (0x%02X)", kind)) + } +} + +func (d *decoder) readRaw(kind byte) Raw { + size, err := BSONElementSize(kind, d.i, d.in) + if err != nil { + corrupted() + } + if d.i+size > len(d.in) { + corrupted() + } + d.i += size + return Raw{ + Kind: kind, + Data: d.in[d.i-size : d.i], + } +} + +func (d *decoder) readSliceOfRaw() interface{} { + tmp := make([]Raw, 0, 8) + end := int(d.readInt32()) + end += d.i - 4 + if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { + corrupted() + } + for d.in[d.i] != '\x00' { + kind := d.readByte() + for d.i < end && d.in[d.i] != '\x00' { + d.i++ + } + if d.i >= end { + corrupted() + } + d.i++ + e := d.readRaw(kind) + tmp = append(tmp, e) + if d.i >= end { + corrupted() + } + } + d.i++ // '\x00' + if d.i != end { + corrupted() + } + return tmp +} + +var typeSlice = reflect.TypeOf([]interface{}{}) +var typeIface = typeSlice.Elem() + +func (d *decoder) readDocElems(typ reflect.Type) reflect.Value { + docType := d.docType + d.docType = typ + slice := make([]DocElem, 0, 8) + d.readDocWith(func(kind byte, name string) { + e := DocElem{Name: name} + v := reflect.ValueOf(&e.Value) + if d.readElemTo(v.Elem(), kind) { + slice = append(slice, e) + } + }) + slicev := reflect.New(typ).Elem() + slicev.Set(reflect.ValueOf(slice)) + d.docType = docType + return slicev +} + +func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value { + docType := d.docType + d.docType = typ + slice := make([]RawDocElem, 0, 8) + d.readDocWith(func(kind byte, name string) { + e := RawDocElem{Name: name, Value: d.readRaw(kind)} + slice = append(slice, e) + }) + slicev := reflect.New(typ).Elem() + slicev.Set(reflect.ValueOf(slice)) + d.docType = docType + return slicev +} + +func (d *decoder) readDocWith(f func(kind byte, name string)) { + end := int(d.readInt32()) + end += d.i - 4 + if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' { + corrupted() + } + for d.in[d.i] != '\x00' { + kind := d.readByte() + name := d.readCStr() + if d.i >= end { + corrupted() + } + f(kind, name) + if d.i >= end { + corrupted() + } + } + d.i++ // '\x00' + if d.i != end { + corrupted() + } +} + +// -------------------------------------------------------------------------- +// Unmarshaling of individual elements within a document. +func (d *decoder) dropElem(kind byte) { + size, err := BSONElementSize(kind, d.i, d.in) + if err != nil { + corrupted() + } + if d.i+size > len(d.in) { + corrupted() + } + d.i += size +} + +// Attempt to decode an element from the document and put it into out. +// If the types are not compatible, the returned ok value will be +// false and out will be unchanged. +func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) { + outt := out.Type() + + if outt == typeRaw { + out.Set(reflect.ValueOf(d.readRaw(kind))) + return true + } + + if outt == typeRawPtr { + raw := d.readRaw(kind) + out.Set(reflect.ValueOf(&raw)) + return true + } + + if kind == ElementDocument { + // Delegate unmarshaling of documents. + outt := out.Type() + outk := out.Kind() + switch outk { + case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map: + d.readDocTo(out) + return true + } + if setterStyle(outt) != setterNone { + d.readDocTo(out) + return true + } + if outk == reflect.Slice { + switch outt.Elem() { + case typeDocElem: + out.Set(d.readDocElems(outt)) + case typeRawDocElem: + out.Set(d.readRawDocElems(outt)) + default: + d.dropElem(kind) + } + return true + } + d.dropElem(kind) + return true + } + + if setter := getSetter(outt, out); setter != nil { + err := setter.SetBSON(d.readRaw(kind)) + if err == ErrSetZero { + out.Set(reflect.Zero(outt)) + return true + } + if err == nil { + return true + } + if _, ok := err.(*TypeError); !ok { + panic(err) + } + return false + } + + var in interface{} + + switch kind { + case ElementFloat64: + in = d.readFloat64() + case ElementString: + in = d.readStr() + case ElementDocument: + panic("Can't happen. Handled above.") + case ElementArray: + outt := out.Type() + if setterStyle(outt) != setterNone { + // Skip the value so its data is handed to the setter below. + d.dropElem(kind) + break + } + for outt.Kind() == reflect.Ptr { + outt = outt.Elem() + } + switch outt.Kind() { + case reflect.Array: + d.readArrayDocTo(out) + return true + case reflect.Slice: + in = d.readSliceDoc(outt) + default: + in = d.readSliceDoc(typeSlice) + } + case ElementBinary: + b := d.readBinary() + if b.Kind == BinaryGeneric || b.Kind == BinaryBinaryOld { + in = b.Data + } else { + in = b + } + case Element06: // Undefined (obsolete, but still seen in the wild) + in = Undefined + case ElementObjectId: + in = ObjectId(d.readBytes(12)) + case ElementBool: + in = d.readBool() + case ElementDatetime: // Timestamp + // MongoDB handles timestamps as milliseconds. + i := d.readInt64() + if i == -62135596800000 { + in = time.Time{} // In UTC for convenience. + } else { + in = time.Unix(i/1e3, i%1e3*1e6).UTC() + } + case ElementNil: + in = nil + case ElementRegEx: + in = d.readRegEx() + case ElementDBPointer: + in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))} + case ElementJavaScriptWithoutScope: + in = JavaScript{Code: d.readStr()} + case ElementSymbol: + in = Symbol(d.readStr()) + case ElementJavaScriptWithScope: + start := d.i + l := int(d.readInt32()) + js := JavaScript{d.readStr(), make(M)} + d.readDocTo(reflect.ValueOf(js.Scope)) + if d.i != start+l { + corrupted() + } + in = js + case ElementInt32: + in = int(d.readInt32()) + case ElementTimestamp: // Mongo-specific timestamp + in = MongoTimestamp(d.readInt64()) + case ElementInt64: + switch out.Type() { + case typeTimeDuration: + in = time.Duration(time.Duration(d.readInt64()) * time.Millisecond) + default: + in = d.readInt64() + } + case ElementDecimal128: + in = Decimal128{ + l: uint64(d.readInt64()), + h: uint64(d.readInt64()), + } + case ElementMaxKey: + in = MaxKey + case ElementMinKey: + in = MinKey + default: + panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind)) + } + + if in == nil { + out.Set(reflect.Zero(outt)) + return true + } + + outk := outt.Kind() + + // Dereference and initialize pointer if necessary. + first := true + for outk == reflect.Ptr { + if !out.IsNil() { + out = out.Elem() + } else { + elem := reflect.New(outt.Elem()) + if first { + // Only set if value is compatible. + first = false + defer func(out, elem reflect.Value) { + if good { + out.Set(elem) + } + }(out, elem) + } else { + out.Set(elem) + } + out = elem + } + outt = out.Type() + outk = outt.Kind() + } + + inv := reflect.ValueOf(in) + if outt == inv.Type() { + out.Set(inv) + return true + } + + switch outk { + case reflect.Interface: + out.Set(inv) + return true + case reflect.String: + switch inv.Kind() { + case reflect.String: + out.SetString(inv.String()) + return true + case reflect.Slice: + if b, ok := in.([]byte); ok { + out.SetString(string(b)) + return true + } + case reflect.Int, reflect.Int64: + if outt == typeJSONNumber { + out.SetString(strconv.FormatInt(inv.Int(), 10)) + return true + } + case reflect.Float64: + if outt == typeJSONNumber { + out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64)) + return true + } + } + case reflect.Slice, reflect.Array: + // Remember, array (0x04) slices are built with the correct + // element type. If we are here, must be a cross BSON kind + // conversion (e.g. 0x05 unmarshalling on string). + if outt.Elem().Kind() != reflect.Uint8 { + break + } + switch inv.Kind() { + case reflect.String: + slice := []byte(inv.String()) + out.Set(reflect.ValueOf(slice)) + return true + case reflect.Slice: + switch outt.Kind() { + case reflect.Array: + reflect.Copy(out, inv) + case reflect.Slice: + out.SetBytes(inv.Bytes()) + } + return true + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch inv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + out.SetInt(inv.Int()) + return true + case reflect.Float32, reflect.Float64: + out.SetInt(int64(inv.Float())) + return true + case reflect.Bool: + if inv.Bool() { + out.SetInt(1) + } else { + out.SetInt(0) + } + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + panic("can't happen: no uint types in BSON (!?)") + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch inv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + out.SetUint(uint64(inv.Int())) + return true + case reflect.Float32, reflect.Float64: + out.SetUint(uint64(inv.Float())) + return true + case reflect.Bool: + if inv.Bool() { + out.SetUint(1) + } else { + out.SetUint(0) + } + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + panic("Can't happen. No uint types in BSON.") + } + case reflect.Float32, reflect.Float64: + switch inv.Kind() { + case reflect.Float32, reflect.Float64: + out.SetFloat(inv.Float()) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + out.SetFloat(float64(inv.Int())) + return true + case reflect.Bool: + if inv.Bool() { + out.SetFloat(1) + } else { + out.SetFloat(0) + } + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + panic("Can't happen. No uint types in BSON?") + } + case reflect.Bool: + switch inv.Kind() { + case reflect.Bool: + out.SetBool(inv.Bool()) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + out.SetBool(inv.Int() != 0) + return true + case reflect.Float32, reflect.Float64: + out.SetBool(inv.Float() != 0) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + panic("Can't happen. No uint types in BSON?") + } + case reflect.Struct: + if outt == typeURL && inv.Kind() == reflect.String { + u, err := url.Parse(inv.String()) + if err != nil { + panic(err) + } + out.Set(reflect.ValueOf(u).Elem()) + return true + } + if outt == typeBinary { + if b, ok := in.([]byte); ok { + out.Set(reflect.ValueOf(Binary{Data: b})) + return true + } + } + } + + return false +} + +// -------------------------------------------------------------------------- +// Parsers of basic types. + +func (d *decoder) readRegEx() RegEx { + re := RegEx{} + re.Pattern = d.readCStr() + re.Options = d.readCStr() + return re +} + +func (d *decoder) readBinary() Binary { + l := d.readInt32() + b := Binary{} + b.Kind = d.readByte() + if b.Kind == BinaryBinaryOld && l > 4 { + // Weird obsolete format with redundant length. + rl := d.readInt32() + if rl != l-4 { + corrupted() + } + l = rl + } + b.Data = d.readBytes(l) + return b +} + +func (d *decoder) readStr() string { + l := d.readInt32() + b := d.readBytes(l - 1) + if d.readByte() != '\x00' { + corrupted() + } + return string(b) +} + +func (d *decoder) readCStr() string { + start := d.i + end := start + l := len(d.in) + for ; end != l; end++ { + if d.in[end] == '\x00' { + break + } + } + d.i = end + 1 + if d.i > l { + corrupted() + } + return string(d.in[start:end]) +} + +func (d *decoder) readBool() bool { + b := d.readByte() + if b == 0 { + return false + } + if b == 1 { + return true + } + panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b)) +} + +func (d *decoder) readFloat64() float64 { + return math.Float64frombits(uint64(d.readInt64())) +} + +func (d *decoder) readInt32() int32 { + b := d.readBytes(4) + return int32((uint32(b[0]) << 0) | + (uint32(b[1]) << 8) | + (uint32(b[2]) << 16) | + (uint32(b[3]) << 24)) +} + +func getSize(offset int, b []byte) (int, error) { + if offset+4 > len(b) { + return 0, io.ErrUnexpectedEOF + } + return int((uint32(b[offset]) << 0) | + (uint32(b[offset+1]) << 8) | + (uint32(b[offset+2]) << 16) | + (uint32(b[offset+3]) << 24)), nil +} + +func (d *decoder) readInt64() int64 { + b := d.readBytes(8) + return int64((uint64(b[0]) << 0) | + (uint64(b[1]) << 8) | + (uint64(b[2]) << 16) | + (uint64(b[3]) << 24) | + (uint64(b[4]) << 32) | + (uint64(b[5]) << 40) | + (uint64(b[6]) << 48) | + (uint64(b[7]) << 56)) +} + +func (d *decoder) readByte() byte { + i := d.i + d.i++ + if d.i > len(d.in) { + corrupted() + } + return d.in[i] +} + +func (d *decoder) readBytes(length int32) []byte { + if length < 0 { + corrupted() + } + start := d.i + d.i += int(length) + if d.i < start || d.i > len(d.in) { + corrupted() + } + return d.in[start : start+int(length)] +} diff --git a/backend/vendor/github.com/globalsign/mgo/bson/encode.go b/backend/vendor/github.com/globalsign/mgo/bson/encode.go new file mode 100644 index 00000000..d0c6b2a8 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/encode.go @@ -0,0 +1,645 @@ +// BSON library for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// gobson - BSON library for Go. + +package bson + +import ( + "encoding/json" + "fmt" + "math" + "net/url" + "reflect" + "sort" + "strconv" + "sync" + "time" +) + +// -------------------------------------------------------------------------- +// Some internal infrastructure. + +var ( + typeBinary = reflect.TypeOf(Binary{}) + typeObjectId = reflect.TypeOf(ObjectId("")) + typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")}) + typeSymbol = reflect.TypeOf(Symbol("")) + typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0)) + typeOrderKey = reflect.TypeOf(MinKey) + typeDocElem = reflect.TypeOf(DocElem{}) + typeRawDocElem = reflect.TypeOf(RawDocElem{}) + typeRaw = reflect.TypeOf(Raw{}) + typeRawPtr = reflect.PtrTo(reflect.TypeOf(Raw{})) + typeURL = reflect.TypeOf(url.URL{}) + typeTime = reflect.TypeOf(time.Time{}) + typeString = reflect.TypeOf("") + typeJSONNumber = reflect.TypeOf(json.Number("")) + typeTimeDuration = reflect.TypeOf(time.Duration(0)) +) + +var ( + // spec for []uint8 or []byte encoding + arrayOps = map[string]bool{ + "$in": true, + "$nin": true, + "$all": true, + } +) + +const itoaCacheSize = 32 + +const ( + getterUnknown = iota + getterNone + getterTypeVal + getterTypePtr + getterAddr +) + +var itoaCache []string + +var getterStyles map[reflect.Type]int +var getterIface reflect.Type +var getterMutex sync.RWMutex + +func init() { + itoaCache = make([]string, itoaCacheSize) + for i := 0; i != itoaCacheSize; i++ { + itoaCache[i] = strconv.Itoa(i) + } + var iface Getter + getterIface = reflect.TypeOf(&iface).Elem() + getterStyles = make(map[reflect.Type]int) +} + +func itoa(i int) string { + if i < itoaCacheSize { + return itoaCache[i] + } + return strconv.Itoa(i) +} + +func getterStyle(outt reflect.Type) int { + getterMutex.RLock() + style := getterStyles[outt] + getterMutex.RUnlock() + if style != getterUnknown { + return style + } + + getterMutex.Lock() + defer getterMutex.Unlock() + if outt.Implements(getterIface) { + vt := outt + for vt.Kind() == reflect.Ptr { + vt = vt.Elem() + } + if vt.Implements(getterIface) { + style = getterTypeVal + } else { + style = getterTypePtr + } + } else if reflect.PtrTo(outt).Implements(getterIface) { + style = getterAddr + } else { + style = getterNone + } + getterStyles[outt] = style + return style +} + +func getGetter(outt reflect.Type, out reflect.Value) Getter { + style := getterStyle(outt) + if style == getterNone { + return nil + } + if style == getterAddr { + if !out.CanAddr() { + return nil + } + return out.Addr().Interface().(Getter) + } + if style == getterTypeVal && out.Kind() == reflect.Ptr && out.IsNil() { + return nil + } + return out.Interface().(Getter) +} + +// -------------------------------------------------------------------------- +// Marshaling of the document value itself. + +type encoder struct { + out []byte +} + +func (e *encoder) addDoc(v reflect.Value) { + for { + if vi, ok := v.Interface().(Getter); ok { + getv, err := vi.GetBSON() + if err != nil { + panic(err) + } + v = reflect.ValueOf(getv) + continue + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + continue + } + break + } + + if v.Type() == typeRaw { + raw := v.Interface().(Raw) + if raw.Kind != 0x03 && raw.Kind != 0x00 { + panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document") + } + if len(raw.Data) == 0 { + panic("Attempted to marshal empty Raw document") + } + e.addBytes(raw.Data...) + return + } + + start := e.reserveInt32() + + switch v.Kind() { + case reflect.Map: + e.addMap(v) + case reflect.Struct: + e.addStruct(v) + case reflect.Array, reflect.Slice: + e.addSlice(v) + default: + panic("Can't marshal " + v.Type().String() + " as a BSON document") + } + + e.addBytes(0) + e.setInt32(start, int32(len(e.out)-start)) +} + +func (e *encoder) addMap(v reflect.Value) { + for _, k := range v.MapKeys() { + e.addElem(fmt.Sprint(k), v.MapIndex(k), false) + } +} + +func (e *encoder) addStruct(v reflect.Value) { + sinfo, err := getStructInfo(v.Type()) + if err != nil { + panic(err) + } + var value reflect.Value + if sinfo.InlineMap >= 0 { + m := v.Field(sinfo.InlineMap) + if m.Len() > 0 { + for _, k := range m.MapKeys() { + ks := k.String() + if _, found := sinfo.FieldsMap[ks]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks)) + } + e.addElem(ks, m.MapIndex(k), false) + } + } + } + for _, info := range sinfo.FieldsList { + if info.Inline == nil { + value = v.Field(info.Num) + } else { + // as pointers to struct are allowed here, + // there is no guarantee that pointer won't be nil. + // + // It is expected allowed behaviour + // so info.Inline MAY consist index to a nil pointer + // and that is why we safely call v.FieldByIndex and just continue on panic + field, errField := safeFieldByIndex(v, info.Inline) + if errField != nil { + continue + } + + value = field + } + if info.OmitEmpty && isZero(value) { + continue + } + if useRespectNilValues && + (value.Kind() == reflect.Slice || value.Kind() == reflect.Map) && + value.IsNil() { + e.addElem(info.Key, reflect.ValueOf(nil), info.MinSize) + continue + } + e.addElem(info.Key, value, info.MinSize) + } +} + +func safeFieldByIndex(v reflect.Value, index []int) (result reflect.Value, err error) { + defer func() { + if recovered := recover(); recovered != nil { + switch r := recovered.(type) { + case string: + err = fmt.Errorf("%s", r) + case error: + err = r + } + } + }() + + result = v.FieldByIndex(index) + return +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.String: + return len(v.String()) == 0 + case reflect.Ptr, reflect.Interface: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + if vt == typeTime { + return v.Interface().(time.Time).IsZero() + } + for i := 0; i < v.NumField(); i++ { + if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +func (e *encoder) addSlice(v reflect.Value) { + vi := v.Interface() + if d, ok := vi.(D); ok { + for _, elem := range d { + e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) + } + return + } + if d, ok := vi.(RawD); ok { + for _, elem := range d { + e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) + } + return + } + l := v.Len() + et := v.Type().Elem() + if et == typeDocElem { + for i := 0; i < l; i++ { + elem := v.Index(i).Interface().(DocElem) + e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) + } + return + } + if et == typeRawDocElem { + for i := 0; i < l; i++ { + elem := v.Index(i).Interface().(RawDocElem) + e.addElem(elem.Name, reflect.ValueOf(elem.Value), false) + } + return + } + for i := 0; i < l; i++ { + e.addElem(itoa(i), v.Index(i), false) + } +} + +// -------------------------------------------------------------------------- +// Marshaling of elements in a document. + +func (e *encoder) addElemName(kind byte, name string) { + e.addBytes(kind) + e.addBytes([]byte(name)...) + e.addBytes(0) +} + +func (e *encoder) addElem(name string, v reflect.Value, minSize bool) { + + if !v.IsValid() { + e.addElemName(0x0A, name) + return + } + + if getter := getGetter(v.Type(), v); getter != nil { + getv, err := getter.GetBSON() + if err != nil { + panic(err) + } + e.addElem(name, reflect.ValueOf(getv), minSize) + return + } + + switch v.Kind() { + + case reflect.Interface: + e.addElem(name, v.Elem(), minSize) + + case reflect.Ptr: + e.addElem(name, v.Elem(), minSize) + + case reflect.String: + s := v.String() + switch v.Type() { + case typeObjectId: + if len(s) != 12 { + panic("ObjectIDs must be exactly 12 bytes long (got " + + strconv.Itoa(len(s)) + ")") + } + e.addElemName(0x07, name) + e.addBytes([]byte(s)...) + case typeSymbol: + e.addElemName(0x0E, name) + e.addStr(s) + case typeJSONNumber: + n := v.Interface().(json.Number) + if i, err := n.Int64(); err == nil { + e.addElemName(0x12, name) + e.addInt64(i) + } else if f, err := n.Float64(); err == nil { + e.addElemName(0x01, name) + e.addFloat64(f) + } else { + panic("failed to convert json.Number to a number: " + s) + } + default: + e.addElemName(0x02, name) + e.addStr(s) + } + + case reflect.Float32, reflect.Float64: + e.addElemName(0x01, name) + e.addFloat64(v.Float()) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + u := v.Uint() + if int64(u) < 0 { + panic("BSON has no uint64 type, and value is too large to fit correctly in an int64") + } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) { + e.addElemName(0x10, name) + e.addInt32(int32(u)) + } else { + e.addElemName(0x12, name) + e.addInt64(int64(u)) + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch v.Type() { + case typeMongoTimestamp: + e.addElemName(0x11, name) + e.addInt64(v.Int()) + + case typeOrderKey: + if v.Int() == int64(MaxKey) { + e.addElemName(0x7F, name) + } else { + e.addElemName(0xFF, name) + } + case typeTimeDuration: + // Stored as int64 + e.addElemName(0x12, name) + + e.addInt64(int64(v.Int() / 1e6)) + default: + i := v.Int() + if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 { + // It fits into an int32, encode as such. + e.addElemName(0x10, name) + e.addInt32(int32(i)) + } else { + e.addElemName(0x12, name) + e.addInt64(i) + } + } + + case reflect.Bool: + e.addElemName(0x08, name) + if v.Bool() { + e.addBytes(1) + } else { + e.addBytes(0) + } + + case reflect.Map: + e.addElemName(0x03, name) + e.addDoc(v) + + case reflect.Slice: + vt := v.Type() + et := vt.Elem() + if et.Kind() == reflect.Uint8 { + if arrayOps[name] { + e.addElemName(0x04, name) + e.addDoc(v) + } else { + e.addElemName(0x05, name) + e.addBinary(0x00, v.Bytes()) + } + } else if et == typeDocElem || et == typeRawDocElem { + e.addElemName(0x03, name) + e.addDoc(v) + } else { + e.addElemName(0x04, name) + e.addDoc(v) + } + + case reflect.Array: + et := v.Type().Elem() + if et.Kind() == reflect.Uint8 { + if arrayOps[name] { + e.addElemName(0x04, name) + e.addDoc(v) + } else { + e.addElemName(0x05, name) + if v.CanAddr() { + e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte)) + } else { + n := v.Len() + e.addInt32(int32(n)) + e.addBytes(0x00) + for i := 0; i < n; i++ { + el := v.Index(i) + e.addBytes(byte(el.Uint())) + } + } + } + } else { + e.addElemName(0x04, name) + e.addDoc(v) + } + + case reflect.Struct: + switch s := v.Interface().(type) { + + case Raw: + kind := s.Kind + if kind == 0x00 { + kind = 0x03 + } + if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F { + panic("Attempted to marshal empty Raw document") + } + e.addElemName(kind, name) + e.addBytes(s.Data...) + + case Binary: + e.addElemName(0x05, name) + e.addBinary(s.Kind, s.Data) + + case Decimal128: + e.addElemName(0x13, name) + e.addInt64(int64(s.l)) + e.addInt64(int64(s.h)) + + case DBPointer: + e.addElemName(0x0C, name) + e.addStr(s.Namespace) + if len(s.Id) != 12 { + panic("ObjectIDs must be exactly 12 bytes long (got " + + strconv.Itoa(len(s.Id)) + ")") + } + e.addBytes([]byte(s.Id)...) + + case RegEx: + e.addElemName(0x0B, name) + e.addCStr(s.Pattern) + options := runes(s.Options) + sort.Sort(options) + e.addCStr(string(options)) + + case JavaScript: + if s.Scope == nil { + e.addElemName(0x0D, name) + e.addStr(s.Code) + } else { + e.addElemName(0x0F, name) + start := e.reserveInt32() + e.addStr(s.Code) + e.addDoc(reflect.ValueOf(s.Scope)) + e.setInt32(start, int32(len(e.out)-start)) + } + + case time.Time: + // MongoDB handles timestamps as milliseconds. + e.addElemName(0x09, name) + e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6)) + + case url.URL: + e.addElemName(0x02, name) + e.addStr(s.String()) + + case undefined: + e.addElemName(0x06, name) + + default: + e.addElemName(0x03, name) + e.addDoc(v) + } + + default: + panic("Can't marshal " + v.Type().String() + " in a BSON document") + } +} + +// ------------- +// Helper method for sorting regex options +type runes []rune + +func (a runes) Len() int { return len(a) } +func (a runes) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a runes) Less(i, j int) bool { return a[i] < a[j] } + +// -------------------------------------------------------------------------- +// Marshaling of base types. + +func (e *encoder) addBinary(subtype byte, v []byte) { + if subtype == 0x02 { + // Wonder how that brilliant idea came to life. Obsolete, luckily. + e.addInt32(int32(len(v) + 4)) + e.addBytes(subtype) + e.addInt32(int32(len(v))) + } else { + e.addInt32(int32(len(v))) + e.addBytes(subtype) + } + e.addBytes(v...) +} + +func (e *encoder) addStr(v string) { + e.addInt32(int32(len(v) + 1)) + e.addCStr(v) +} + +func (e *encoder) addCStr(v string) { + e.addBytes([]byte(v)...) + e.addBytes(0) +} + +func (e *encoder) reserveInt32() (pos int) { + pos = len(e.out) + e.addBytes(0, 0, 0, 0) + return pos +} + +func (e *encoder) setInt32(pos int, v int32) { + e.out[pos+0] = byte(v) + e.out[pos+1] = byte(v >> 8) + e.out[pos+2] = byte(v >> 16) + e.out[pos+3] = byte(v >> 24) +} + +func (e *encoder) addInt32(v int32) { + u := uint32(v) + e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24)) +} + +func (e *encoder) addInt64(v int64) { + u := uint64(v) + e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24), + byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56)) +} + +func (e *encoder) addFloat64(v float64) { + e.addInt64(int64(math.Float64bits(v))) +} + +func (e *encoder) addBytes(v ...byte) { + e.out = append(e.out, v...) +} diff --git a/backend/vendor/github.com/globalsign/mgo/bson/json.go b/backend/vendor/github.com/globalsign/mgo/bson/json.go new file mode 100644 index 00000000..045c7130 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/json.go @@ -0,0 +1,384 @@ +package bson + +import ( + "bytes" + "encoding/base64" + "fmt" + "strconv" + "strings" + "time" + + "github.com/globalsign/mgo/internal/json" +) + +// UnmarshalJSON unmarshals a JSON value that may hold non-standard +// syntax as defined in BSON's extended JSON specification. +func UnmarshalJSON(data []byte, value interface{}) error { + d := json.NewDecoder(bytes.NewBuffer(data)) + d.Extend(&jsonExt) + return d.Decode(value) +} + +// MarshalJSON marshals a JSON value that may hold non-standard +// syntax as defined in BSON's extended JSON specification. +func MarshalJSON(value interface{}) ([]byte, error) { + var buf bytes.Buffer + e := json.NewEncoder(&buf) + e.Extend(&jsonExt) + err := e.Encode(value) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// jdec is used internally by the JSON decoding functions +// so they may unmarshal functions without getting into endless +// recursion due to keyed objects. +func jdec(data []byte, value interface{}) error { + d := json.NewDecoder(bytes.NewBuffer(data)) + d.Extend(&funcExt) + return d.Decode(value) +} + +var jsonExt json.Extension +var funcExt json.Extension + +// TODO +// - Shell regular expressions ("/regexp/opts") + +func init() { + jsonExt.DecodeUnquotedKeys(true) + jsonExt.DecodeTrailingCommas(true) + + funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary") + jsonExt.DecodeKeyed("$binary", jdecBinary) + jsonExt.DecodeKeyed("$binaryFunc", jdecBinary) + jsonExt.EncodeType([]byte(nil), jencBinarySlice) + jsonExt.EncodeType(Binary{}, jencBinaryType) + + funcExt.DecodeFunc("ISODate", "$dateFunc", "S") + funcExt.DecodeFunc("new Date", "$dateFunc", "S") + jsonExt.DecodeKeyed("$date", jdecDate) + jsonExt.DecodeKeyed("$dateFunc", jdecDate) + jsonExt.EncodeType(time.Time{}, jencDate) + + funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i") + jsonExt.DecodeKeyed("$timestamp", jdecTimestamp) + jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp) + + funcExt.DecodeConst("undefined", Undefined) + + jsonExt.DecodeKeyed("$regex", jdecRegEx) + jsonExt.EncodeType(RegEx{}, jencRegEx) + + funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id") + jsonExt.DecodeKeyed("$oid", jdecObjectId) + jsonExt.DecodeKeyed("$oidFunc", jdecObjectId) + jsonExt.EncodeType(ObjectId(""), jencObjectId) + + funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id") + jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef) + + funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N") + jsonExt.DecodeKeyed("$numberLong", jdecNumberLong) + jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong) + jsonExt.EncodeType(int64(0), jencNumberLong) + jsonExt.EncodeType(int(0), jencInt) + + funcExt.DecodeConst("MinKey", MinKey) + funcExt.DecodeConst("MaxKey", MaxKey) + jsonExt.DecodeKeyed("$minKey", jdecMinKey) + jsonExt.DecodeKeyed("$maxKey", jdecMaxKey) + jsonExt.EncodeType(orderKey(0), jencMinMaxKey) + + jsonExt.DecodeKeyed("$undefined", jdecUndefined) + jsonExt.EncodeType(Undefined, jencUndefined) + + jsonExt.Extend(&funcExt) +} + +func fbytes(format string, args ...interface{}) []byte { + var buf bytes.Buffer + fmt.Fprintf(&buf, format, args...) + return buf.Bytes() +} + +func jdecBinary(data []byte) (interface{}, error) { + var v struct { + Binary []byte `json:"$binary"` + Type string `json:"$type"` + Func struct { + Binary []byte `json:"$binary"` + Type int64 `json:"$type"` + } `json:"$binaryFunc"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + + var binData []byte + var binKind int64 + if v.Type == "" && v.Binary == nil { + binData = v.Func.Binary + binKind = v.Func.Type + } else if v.Type == "" { + return v.Binary, nil + } else { + binData = v.Binary + binKind, err = strconv.ParseInt(v.Type, 0, 64) + if err != nil { + binKind = -1 + } + } + + if binKind == 0 { + return binData, nil + } + if binKind < 0 || binKind > 255 { + return nil, fmt.Errorf("invalid type in binary object: %s", data) + } + + return Binary{Kind: byte(binKind), Data: binData}, nil +} + +func jencBinarySlice(v interface{}) ([]byte, error) { + in := v.([]byte) + out := make([]byte, base64.StdEncoding.EncodedLen(len(in))) + base64.StdEncoding.Encode(out, in) + return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil +} + +func jencBinaryType(v interface{}) ([]byte, error) { + in := v.(Binary) + out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data))) + base64.StdEncoding.Encode(out, in.Data) + return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil +} + +const jdateFormat = "2006-01-02T15:04:05.999Z07:00" + +func jdecDate(data []byte) (interface{}, error) { + var v struct { + S string `json:"$date"` + Func struct { + S string + } `json:"$dateFunc"` + } + _ = jdec(data, &v) + if v.S == "" { + v.S = v.Func.S + } + if v.S != "" { + var errs []string + for _, format := range []string{jdateFormat, "2006-01-02"} { + t, err := time.Parse(format, v.S) + if err == nil { + return t, nil + } + errs = append(errs, err.Error()) + } + return nil, fmt.Errorf("cannot parse date: %q [%s]", v.S, strings.Join(errs, ", ")) + } + + var vn struct { + Date struct { + N int64 `json:"$numberLong,string"` + } `json:"$date"` + Func struct { + S int64 + } `json:"$dateFunc"` + } + err := jdec(data, &vn) + if err != nil { + return nil, fmt.Errorf("cannot parse date: %q", data) + } + n := vn.Date.N + if n == 0 { + n = vn.Func.S + } + return time.Unix(n/1000, n%1000*1e6).UTC(), nil +} + +func jencDate(v interface{}) ([]byte, error) { + t := v.(time.Time) + return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil +} + +func jdecTimestamp(data []byte) (interface{}, error) { + var v struct { + Func struct { + T int32 `json:"t"` + I int32 `json:"i"` + } `json:"$timestamp"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil +} + +func jencTimestamp(v interface{}) ([]byte, error) { + ts := uint64(v.(MongoTimestamp)) + return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil +} + +func jdecRegEx(data []byte) (interface{}, error) { + var v struct { + Regex string `json:"$regex"` + Options string `json:"$options"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + return RegEx{v.Regex, v.Options}, nil +} + +func jencRegEx(v interface{}) ([]byte, error) { + re := v.(RegEx) + type regex struct { + Regex string `json:"$regex"` + Options string `json:"$options"` + } + return json.Marshal(regex{re.Pattern, re.Options}) +} + +func jdecObjectId(data []byte) (interface{}, error) { + var v struct { + Id string `json:"$oid"` + Func struct { + Id string + } `json:"$oidFunc"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + if v.Id == "" { + v.Id = v.Func.Id + } + return ObjectIdHex(v.Id), nil +} + +func jencObjectId(v interface{}) ([]byte, error) { + return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil +} + +func jdecDBRef(data []byte) (interface{}, error) { + // TODO Support unmarshaling $ref and $id into the input value. + var v struct { + Obj map[string]interface{} `json:"$dbrefFunc"` + } + // TODO Fix this. Must not be required. + v.Obj = make(map[string]interface{}) + err := jdec(data, &v) + if err != nil { + return nil, err + } + return v.Obj, nil +} + +func jdecNumberLong(data []byte) (interface{}, error) { + var v struct { + N int64 `json:"$numberLong,string"` + Func struct { + N int64 `json:",string"` + } `json:"$numberLongFunc"` + } + var vn struct { + N int64 `json:"$numberLong"` + Func struct { + N int64 + } `json:"$numberLongFunc"` + } + err := jdec(data, &v) + if err != nil { + err = jdec(data, &vn) + v.N = vn.N + v.Func.N = vn.Func.N + } + if err != nil { + return nil, err + } + if v.N != 0 { + return v.N, nil + } + return v.Func.N, nil +} + +func jencNumberLong(v interface{}) ([]byte, error) { + n := v.(int64) + f := `{"$numberLong":"%d"}` + if n <= 1<<53 { + f = `{"$numberLong":%d}` + } + return fbytes(f, n), nil +} + +func jencInt(v interface{}) ([]byte, error) { + n := v.(int) + f := `{"$numberLong":"%d"}` + if int64(n) <= 1<<53 { + f = `%d` + } + return fbytes(f, n), nil +} + +func jdecMinKey(data []byte) (interface{}, error) { + var v struct { + N int64 `json:"$minKey"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + if v.N != 1 { + return nil, fmt.Errorf("invalid $minKey object: %s", data) + } + return MinKey, nil +} + +func jdecMaxKey(data []byte) (interface{}, error) { + var v struct { + N int64 `json:"$maxKey"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + if v.N != 1 { + return nil, fmt.Errorf("invalid $maxKey object: %s", data) + } + return MaxKey, nil +} + +func jencMinMaxKey(v interface{}) ([]byte, error) { + switch v.(orderKey) { + case MinKey: + return []byte(`{"$minKey":1}`), nil + case MaxKey: + return []byte(`{"$maxKey":1}`), nil + } + panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v)) +} + +func jdecUndefined(data []byte) (interface{}, error) { + var v struct { + B bool `json:"$undefined"` + } + err := jdec(data, &v) + if err != nil { + return nil, err + } + if !v.B { + return nil, fmt.Errorf("invalid $undefined object: %s", data) + } + return Undefined, nil +} + +func jencUndefined(v interface{}) ([]byte, error) { + return []byte(`{"$undefined":true}`), nil +} diff --git a/backend/vendor/github.com/globalsign/mgo/bson/stream.go b/backend/vendor/github.com/globalsign/mgo/bson/stream.go new file mode 100644 index 00000000..46652845 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bson/stream.go @@ -0,0 +1,90 @@ +package bson + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" +) + +const ( + // MinDocumentSize is the size of the smallest possible valid BSON document: + // an int32 size header + 0x00 (end of document). + MinDocumentSize = 5 + + // MaxDocumentSize is the largest possible size for a BSON document allowed by MongoDB, + // that is, 16 MiB (see https://docs.mongodb.com/manual/reference/limits/). + MaxDocumentSize = 16777216 +) + +// ErrInvalidDocumentSize is an error returned when a BSON document's header +// contains a size smaller than MinDocumentSize or greater than MaxDocumentSize. +type ErrInvalidDocumentSize struct { + DocumentSize int32 +} + +func (e ErrInvalidDocumentSize) Error() string { + return fmt.Sprintf("invalid document size %d", e.DocumentSize) +} + +// A Decoder reads and decodes BSON values from an input stream. +type Decoder struct { + source io.Reader +} + +// NewDecoder returns a new Decoder that reads from source. +// It does not add any extra buffering, and may not read data from source beyond the BSON values requested. +func NewDecoder(source io.Reader) *Decoder { + return &Decoder{source: source} +} + +// Decode reads the next BSON-encoded value from its input and stores it in the value pointed to by v. +// See the documentation for Unmarshal for details about the conversion of BSON into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + // BSON documents start with their size as a *signed* int32. + var docSize int32 + if err = binary.Read(dec.source, binary.LittleEndian, &docSize); err != nil { + return + } + + if docSize < MinDocumentSize || docSize > MaxDocumentSize { + return ErrInvalidDocumentSize{DocumentSize: docSize} + } + + docBuffer := bytes.NewBuffer(make([]byte, 0, docSize)) + if err = binary.Write(docBuffer, binary.LittleEndian, docSize); err != nil { + return + } + + // docSize is the *full* document's size (including the 4-byte size header, + // which has already been read). + if _, err = io.CopyN(docBuffer, dec.source, int64(docSize-4)); err != nil { + return + } + + // Let Unmarshal handle the rest. + defer handleErr(&err) + return Unmarshal(docBuffer.Bytes(), v) +} + +// An Encoder encodes and writes BSON values to an output stream. +type Encoder struct { + target io.Writer +} + +// NewEncoder returns a new Encoder that writes to target. +func NewEncoder(target io.Writer) *Encoder { + return &Encoder{target: target} +} + +// Encode encodes v to BSON, and if successful writes it to the Encoder's output stream. +// See the documentation for Marshal for details about the conversion of Go values to BSON. +func (enc *Encoder) Encode(v interface{}) error { + data, err := Marshal(v) + if err != nil { + return err + } + + _, err = enc.target.Write(data) + return err +} diff --git a/backend/vendor/github.com/globalsign/mgo/bulk.go b/backend/vendor/github.com/globalsign/mgo/bulk.go new file mode 100644 index 00000000..c234fcce --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/bulk.go @@ -0,0 +1,366 @@ +package mgo + +import ( + "bytes" + "sort" + "sync" + + "github.com/globalsign/mgo/bson" +) + +// Bulk represents an operation that can be prepared with several +// orthogonal changes before being delivered to the server. +// +// MongoDB servers older than version 2.6 do not have proper support for bulk +// operations, so the driver attempts to map its API as much as possible into +// the functionality that works. In particular, in those releases updates and +// removals are sent individually, and inserts are sent in bulk but have +// suboptimal error reporting compared to more recent versions of the server. +// See the documentation of BulkErrorCase for details on that. +// +// Relevant documentation: +// +// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api +// +type Bulk struct { + c *Collection + opcount int + actions []bulkAction + ordered bool +} + +type bulkOp int + +const ( + bulkInsert bulkOp = iota + 1 + bulkUpdate + bulkUpdateAll + bulkRemove +) + +type bulkAction struct { + op bulkOp + docs []interface{} + idxs []int +} + +type bulkUpdateOp []interface{} +type bulkDeleteOp []interface{} + +// BulkResult holds the results for a bulk operation. +type BulkResult struct { + Matched int + Modified int // Available only for MongoDB 2.6+ + + // Be conservative while we understand exactly how to report these + // results in a useful and convenient way, and also how to emulate + // them with prior servers. + private bool +} + +// BulkError holds an error returned from running a Bulk operation. +// Individual errors may be obtained and inspected via the Cases method. +type BulkError struct { + ecases []BulkErrorCase +} + +func (e *BulkError) Error() string { + if len(e.ecases) == 0 { + return "invalid BulkError instance: no errors" + } + if len(e.ecases) == 1 { + return e.ecases[0].Err.Error() + } + msgs := make([]string, 0, len(e.ecases)) + seen := make(map[string]bool) + for _, ecase := range e.ecases { + msg := ecase.Err.Error() + if !seen[msg] { + seen[msg] = true + msgs = append(msgs, msg) + } + } + if len(msgs) == 1 { + return msgs[0] + } + var buf bytes.Buffer + buf.WriteString("multiple errors in bulk operation:\n") + for _, msg := range msgs { + buf.WriteString(" - ") + buf.WriteString(msg) + buf.WriteByte('\n') + } + return buf.String() +} + +type bulkErrorCases []BulkErrorCase + +func (slice bulkErrorCases) Len() int { return len(slice) } +func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index } +func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } + +// BulkErrorCase holds an individual error found while attempting a single change +// within a bulk operation, and the position in which it was enqueued. +// +// MongoDB servers older than version 2.6 do not have proper support for bulk +// operations, so the driver attempts to map its API as much as possible into +// the functionality that works. In particular, only the last error is reported +// for bulk inserts and without any positional information, so the Index +// field is set to -1 in these cases. +type BulkErrorCase struct { + Index int // Position of operation that failed, or -1 if unknown. + Err error +} + +// Cases returns all individual errors found while attempting the requested changes. +// +// See the documentation of BulkErrorCase for limitations in older MongoDB releases. +func (e *BulkError) Cases() []BulkErrorCase { + return e.ecases +} + +var actionPool = sync.Pool{ + New: func() interface{} { + return &bulkAction{ + docs: make([]interface{}, 0), + idxs: make([]int, 0), + } + }, +} + +// Bulk returns a value to prepare the execution of a bulk operation. +func (c *Collection) Bulk() *Bulk { + return &Bulk{c: c, ordered: true} +} + +// Unordered puts the bulk operation in unordered mode. +// +// In unordered mode the indvidual operations may be sent +// out of order, which means latter operations may proceed +// even if prior ones have failed. +func (b *Bulk) Unordered() { + b.ordered = false +} + +func (b *Bulk) action(op bulkOp, opcount int) *bulkAction { + var action *bulkAction + if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op { + action = &b.actions[len(b.actions)-1] + } else if !b.ordered { + for i := range b.actions { + if b.actions[i].op == op { + action = &b.actions[i] + break + } + } + } + if action == nil { + a := actionPool.Get().(*bulkAction) + a.op = op + b.actions = append(b.actions, *a) + action = &b.actions[len(b.actions)-1] + } + for i := 0; i < opcount; i++ { + action.idxs = append(action.idxs, b.opcount) + b.opcount++ + } + return action +} + +// Insert queues up the provided documents for insertion. +func (b *Bulk) Insert(docs ...interface{}) { + action := b.action(bulkInsert, len(docs)) + action.docs = append(action.docs, docs...) +} + +// Remove queues up the provided selectors for removing matching documents. +// Each selector will remove only a single matching document. +func (b *Bulk) Remove(selectors ...interface{}) { + action := b.action(bulkRemove, len(selectors)) + for _, selector := range selectors { + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &deleteOp{ + Collection: b.c.FullName, + Selector: selector, + Flags: 1, + Limit: 1, + }) + } +} + +// RemoveAll queues up the provided selectors for removing all matching documents. +// Each selector will remove all matching documents. +func (b *Bulk) RemoveAll(selectors ...interface{}) { + action := b.action(bulkRemove, len(selectors)) + for _, selector := range selectors { + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &deleteOp{ + Collection: b.c.FullName, + Selector: selector, + Flags: 0, + Limit: 0, + }) + } +} + +// Update queues up the provided pairs of updating instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair matches exactly one document for updating at most. +func (b *Bulk) Update(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.Update requires an even number of parameters") + } + action := b.action(bulkUpdate, len(pairs)/2) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + }) + } +} + +// UpdateAll queues up the provided pairs of updating instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair updates all documents matching the selector. +func (b *Bulk) UpdateAll(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.UpdateAll requires an even number of parameters") + } + action := b.action(bulkUpdate, len(pairs)/2) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + Flags: 2, + Multi: true, + }) + } +} + +// Upsert queues up the provided pairs of upserting instructions. +// The first element of each pair selects which documents must be +// updated, and the second element defines how to update it. +// Each pair matches exactly one document for updating at most. +func (b *Bulk) Upsert(pairs ...interface{}) { + if len(pairs)%2 != 0 { + panic("Bulk.Update requires an even number of parameters") + } + action := b.action(bulkUpdate, len(pairs)/2) + for i := 0; i < len(pairs); i += 2 { + selector := pairs[i] + if selector == nil { + selector = bson.D{} + } + action.docs = append(action.docs, &updateOp{ + Collection: b.c.FullName, + Selector: selector, + Update: pairs[i+1], + Flags: 1, + Upsert: true, + }) + } +} + +// Run runs all the operations queued up. +// +// If an error is reported on an unordered bulk operation, the error value may +// be an aggregation of all issues observed. As an exception to that, Insert +// operations running on MongoDB versions prior to 2.6 will report the last +// error only due to a limitation in the wire protocol. +func (b *Bulk) Run() (*BulkResult, error) { + var result BulkResult + var berr BulkError + var failed bool + for i := range b.actions { + action := &b.actions[i] + var ok bool + switch action.op { + case bulkInsert: + ok = b.runInsert(action, &result, &berr) + case bulkUpdate: + ok = b.runUpdate(action, &result, &berr) + case bulkRemove: + ok = b.runRemove(action, &result, &berr) + default: + panic("unknown bulk operation") + } + action.idxs = action.idxs[0:0] + action.docs = action.docs[0:0] + actionPool.Put(action) + if !ok { + failed = true + if b.ordered { + break + } + } + } + if failed { + sort.Sort(bulkErrorCases(berr.ecases)) + return nil, &berr + } + return &result, nil +} + +func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool { + op := &insertOp{b.c.FullName, action.docs, 0} + if !b.ordered { + op.flags = 1 // ContinueOnError + } + lerr, err := b.c.writeOp(op, b.ordered) + return b.checkSuccess(action, berr, lerr, err) +} + +func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool { + lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered) + if lerr != nil { + result.Matched += lerr.N + result.Modified += lerr.modified + } + return b.checkSuccess(action, berr, lerr, err) +} + +func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool { + lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered) + if lerr != nil { + result.Matched += lerr.N + result.Modified += lerr.modified + } + return b.checkSuccess(action, berr, lerr, err) +} + +func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool { + if lerr != nil && len(lerr.ecases) > 0 { + for i := 0; i < len(lerr.ecases); i++ { + // Map back from the local error index into the visible one. + ecase := lerr.ecases[i] + idx := ecase.Index + if idx >= 0 { + idx = action.idxs[idx] + } + berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err}) + } + return false + } else if err != nil { + for i := 0; i < len(action.idxs); i++ { + berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err}) + } + return false + } + return true +} diff --git a/backend/vendor/github.com/globalsign/mgo/changestreams.go b/backend/vendor/github.com/globalsign/mgo/changestreams.go new file mode 100644 index 00000000..5c2279c6 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/changestreams.go @@ -0,0 +1,357 @@ +package mgo + +import ( + "errors" + "fmt" + "reflect" + "sync" + "time" + + "github.com/globalsign/mgo/bson" +) + +type FullDocument string + +const ( + Default = "default" + UpdateLookup = "updateLookup" +) + +type ChangeStream struct { + iter *Iter + isClosed bool + options ChangeStreamOptions + pipeline interface{} + resumeToken *bson.Raw + collection *Collection + readPreference *ReadPreference + err error + m sync.Mutex + sessionCopied bool +} + +type ChangeStreamOptions struct { + + // FullDocument controls the amount of data that the server will return when + // returning a changes document. + FullDocument FullDocument + + // ResumeAfter specifies the logical starting point for the new change stream. + ResumeAfter *bson.Raw + + // MaxAwaitTimeMS specifies the maximum amount of time for the server to wait + // on new documents to satisfy a change stream query. + MaxAwaitTimeMS time.Duration + + // BatchSize specifies the number of documents to return per batch. + BatchSize int + + // Collation specifies the way the server should collate returned data. + //TODO Collation *Collation +} + +var errMissingResumeToken = errors.New("resume token missing from result") + +// Watch constructs a new ChangeStream capable of receiving continuing data +// from the database. +func (coll *Collection) Watch(pipeline interface{}, + options ChangeStreamOptions) (*ChangeStream, error) { + + if pipeline == nil { + pipeline = []bson.M{} + } + + csPipe := constructChangeStreamPipeline(pipeline, options) + pipe := coll.Pipe(&csPipe) + if options.MaxAwaitTimeMS > 0 { + pipe.SetMaxTime(options.MaxAwaitTimeMS) + } + if options.BatchSize > 0 { + pipe.Batch(options.BatchSize) + } + pIter := pipe.Iter() + + // check that there was no issue creating the iterator. + // this will fail immediately with an error from the server if running against + // a standalone. + if err := pIter.Err(); err != nil { + return nil, err + } + + pIter.isChangeStream = true + return &ChangeStream{ + iter: pIter, + collection: coll, + resumeToken: nil, + options: options, + pipeline: pipeline, + }, nil +} + +// Next retrieves the next document from the change stream, blocking if necessary. +// Next returns true if a document was successfully unmarshalled into result, +// and false if an error occured. When Next returns false, the Err method should +// be called to check what error occurred during iteration. If there were no events +// available (ErrNotFound), the Err method returns nil so the user can retry the invocaton. +// +// For example: +// +// pipeline := []bson.M{} +// +// changeStream := collection.Watch(pipeline, ChangeStreamOptions{}) +// for changeStream.Next(&changeDoc) { +// fmt.Printf("Change: %v\n", changeDoc) +// } +// +// if err := changeStream.Close(); err != nil { +// return err +// } +// +// If the pipeline used removes the _id field from the result, Next will error +// because the _id field is needed to resume iteration when an error occurs. +// +func (changeStream *ChangeStream) Next(result interface{}) bool { + // the err field is being constantly overwritten and we don't want the user to + // attempt to read it at this point so we lock. + changeStream.m.Lock() + + defer changeStream.m.Unlock() + + // if we are in a state of error, then don't continue. + if changeStream.err != nil { + return false + } + + if changeStream.isClosed { + changeStream.err = fmt.Errorf("illegal use of a closed ChangeStream") + return false + } + + var err error + + // attempt to fetch the change stream result. + err = changeStream.fetchResultSet(result) + if err == nil { + return true + } + + // if we get no results we return false with no errors so the user can call Next + // again, resuming is not needed as the iterator is simply timed out as no events happened. + // The user will call Timeout in order to understand if this was the case. + if err == ErrNotFound { + return false + } + + // check if the error is resumable + if !isResumableError(err) { + // error is not resumable, give up and return it to the user. + changeStream.err = err + return false + } + + // try to resume. + err = changeStream.resume() + if err != nil { + // we've not been able to successfully resume and should only try once, + // so we give up. + changeStream.err = err + return false + } + + // we've successfully resumed the changestream. + // try to fetch the next result. + err = changeStream.fetchResultSet(result) + if err != nil { + changeStream.err = err + return false + } + + return true +} + +// Err returns nil if no errors happened during iteration, or the actual +// error otherwise. +func (changeStream *ChangeStream) Err() error { + changeStream.m.Lock() + defer changeStream.m.Unlock() + return changeStream.err +} + +// Close kills the server cursor used by the iterator, if any, and returns +// nil if no errors happened during iteration, or the actual error otherwise. +func (changeStream *ChangeStream) Close() error { + changeStream.m.Lock() + defer changeStream.m.Unlock() + changeStream.isClosed = true + err := changeStream.iter.Close() + if err != nil { + changeStream.err = err + } + if changeStream.sessionCopied { + changeStream.iter.session.Close() + changeStream.sessionCopied = false + } + return err +} + +// ResumeToken returns a copy of the current resume token held by the change stream. +// This token should be treated as an opaque token that can be provided to instantiate +// a new change stream. +func (changeStream *ChangeStream) ResumeToken() *bson.Raw { + changeStream.m.Lock() + defer changeStream.m.Unlock() + if changeStream.resumeToken == nil { + return nil + } + var tokenCopy = *changeStream.resumeToken + return &tokenCopy +} + +// Timeout returns true if the last call of Next returned false because of an iterator timeout. +func (changeStream *ChangeStream) Timeout() bool { + return changeStream.iter.Timeout() +} + +func constructChangeStreamPipeline(pipeline interface{}, + options ChangeStreamOptions) interface{} { + pipelinev := reflect.ValueOf(pipeline) + + // ensure that the pipeline passed in is a slice. + if pipelinev.Kind() != reflect.Slice { + panic("pipeline argument must be a slice") + } + + // construct the options to be used by the change notification + // pipeline stage. + changeStreamStageOptions := bson.M{} + + if options.FullDocument != "" { + changeStreamStageOptions["fullDocument"] = options.FullDocument + } + if options.ResumeAfter != nil { + changeStreamStageOptions["resumeAfter"] = options.ResumeAfter + } + + changeStreamStage := bson.M{"$changeStream": changeStreamStageOptions} + + pipeOfInterfaces := make([]interface{}, pipelinev.Len()+1) + + // insert the change notification pipeline stage at the beginning of the + // aggregation. + pipeOfInterfaces[0] = changeStreamStage + + // convert the passed in slice to a slice of interfaces. + for i := 0; i < pipelinev.Len(); i++ { + pipeOfInterfaces[1+i] = pipelinev.Index(i).Addr().Interface() + } + var pipelineAsInterface interface{} = pipeOfInterfaces + return pipelineAsInterface +} + +func (changeStream *ChangeStream) resume() error { + // copy the information for the new socket. + + // Thanks to Copy() future uses will acquire a new socket against the newly selected DB. + newSession := changeStream.iter.session.Copy() + + // fetch the cursor from the iterator and use it to run a killCursors + // on the connection. + cursorId := changeStream.iter.op.cursorId + err := runKillCursorsOnSession(newSession, cursorId) + if err != nil { + return err + } + + // change out the old connection to the database with the new connection. + if changeStream.sessionCopied { + changeStream.collection.Database.Session.Close() + } + changeStream.collection.Database.Session = newSession + changeStream.sessionCopied = true + + opts := changeStream.options + if changeStream.resumeToken != nil { + opts.ResumeAfter = changeStream.resumeToken + } + // make a new pipeline containing the resume token. + changeStreamPipeline := constructChangeStreamPipeline(changeStream.pipeline, opts) + + // generate the new iterator with the new connection. + newPipe := changeStream.collection.Pipe(changeStreamPipeline) + changeStream.iter = newPipe.Iter() + if err := changeStream.iter.Err(); err != nil { + return err + } + changeStream.iter.isChangeStream = true + return nil +} + +// fetchResumeToken unmarshals the _id field from the document, setting an error +// on the changeStream if it is unable to. +func (changeStream *ChangeStream) fetchResumeToken(rawResult *bson.Raw) error { + changeStreamResult := struct { + ResumeToken *bson.Raw `bson:"_id,omitempty"` + }{} + + err := rawResult.Unmarshal(&changeStreamResult) + if err != nil { + return err + } + + if changeStreamResult.ResumeToken == nil { + return errMissingResumeToken + } + + changeStream.resumeToken = changeStreamResult.ResumeToken + return nil +} + +func (changeStream *ChangeStream) fetchResultSet(result interface{}) error { + rawResult := bson.Raw{} + + // fetch the next set of documents from the cursor. + gotNext := changeStream.iter.Next(&rawResult) + err := changeStream.iter.Err() + if err != nil { + return err + } + + if !gotNext && err == nil { + // If the iter.Err() method returns nil despite us not getting a next batch, + // it is becuase iter.Err() silences this case. + return ErrNotFound + } + + // grab the resumeToken from the results + if err := changeStream.fetchResumeToken(&rawResult); err != nil { + return err + } + + // put the raw results into the data structure the user provided. + if err := rawResult.Unmarshal(result); err != nil { + return err + } + return nil +} + +func isResumableError(err error) bool { + _, isQueryError := err.(*QueryError) + // if it is not a database error OR it is a database error, + // but the error is a notMaster error + //and is not a missingResumeToken error (caused by the user provided pipeline) + return (!isQueryError || isNotMasterError(err)) && (err != errMissingResumeToken) +} + +func runKillCursorsOnSession(session *Session, cursorId int64) error { + socket, err := session.acquireSocket(true) + if err != nil { + return err + } + err = socket.Query(&killCursorsOp{[]int64{cursorId}}) + if err != nil { + return err + } + socket.Release() + + return nil +} diff --git a/backend/vendor/github.com/globalsign/mgo/cluster.go b/backend/vendor/github.com/globalsign/mgo/cluster.go new file mode 100644 index 00000000..ff431cac --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/cluster.go @@ -0,0 +1,704 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "errors" + "fmt" + "net" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/globalsign/mgo/bson" +) + +// --------------------------------------------------------------------------- +// Mongo cluster encapsulation. +// +// A cluster enables the communication with one or more servers participating +// in a mongo cluster. This works with individual servers, a replica set, +// a replica pair, one or multiple mongos routers, etc. + +type mongoCluster struct { + sync.RWMutex + serverSynced sync.Cond + userSeeds []string + dynaSeeds []string + servers mongoServers + masters mongoServers + references int + syncing bool + syncCount uint + cachedIndex map[string]bool + sync chan bool + dial dialer + dialInfo *DialInfo +} + +func newCluster(userSeeds []string, info *DialInfo) *mongoCluster { + cluster := &mongoCluster{ + userSeeds: userSeeds, + references: 1, + dial: dialer{info.Dial, info.DialServer}, + dialInfo: info, + } + cluster.serverSynced.L = cluster.RWMutex.RLocker() + cluster.sync = make(chan bool, 1) + stats.cluster(+1) + go cluster.syncServersLoop() + return cluster +} + +// Acquire increases the reference count for the cluster. +func (cluster *mongoCluster) Acquire() { + cluster.Lock() + cluster.references++ + debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references) + cluster.Unlock() +} + +// Release decreases the reference count for the cluster. Once +// it reaches zero, all servers will be closed. +func (cluster *mongoCluster) Release() { + cluster.Lock() + if cluster.references == 0 { + panic("cluster.Release() with references == 0") + } + cluster.references-- + debugf("Cluster %p released (refs=%d)", cluster, cluster.references) + if cluster.references == 0 { + for _, server := range cluster.servers.Slice() { + server.Close() + } + // Wake up the sync loop so it can die. + cluster.syncServers() + stats.cluster(-1) + } + cluster.Unlock() +} + +func (cluster *mongoCluster) LiveServers() (servers []string) { + cluster.RLock() + for _, serv := range cluster.servers.Slice() { + servers = append(servers, serv.Addr) + } + cluster.RUnlock() + return servers +} + +func (cluster *mongoCluster) removeServer(server *mongoServer) { + cluster.Lock() + cluster.masters.Remove(server) + other := cluster.servers.Remove(server) + cluster.Unlock() + if other != nil { + other.CloseIdle() + log("Removed server ", server.Addr, " from cluster.") + } + server.CloseIdle() +} + +type isMasterResult struct { + IsMaster bool + Secondary bool + Primary string + Hosts []string + Passives []string + Tags bson.D + Msg string + SetName string `bson:"setName"` + MaxWireVersion int `bson:"maxWireVersion"` +} + +func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error { + // Monotonic let's it talk to a slave and still hold the socket. + session := newSession(Monotonic, cluster, cluster.dialInfo) + session.setSocket(socket) + + var cmd = bson.D{{Name: "isMaster", Value: 1}} + + // Send client metadata to the server to identify this socket if this is + // the first isMaster call only. + // + // isMaster commands issued after the initial connection handshake MUST NOT contain handshake arguments + // https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.rst#connection-handshake + // + socket.sendMeta.Do(func() { + var meta = bson.M{ + "driver": bson.M{ + "name": "mgo", + "version": "globalsign", + }, + "os": bson.M{ + "type": runtime.GOOS, + "architecture": runtime.GOARCH, + }, + } + + // Include the application name if set + if cluster.dialInfo.AppName != "" { + meta["application"] = bson.M{"name": cluster.dialInfo.AppName} + } + + cmd = append(cmd, bson.DocElem{ + Name: "client", + Value: meta, + }) + }) + + err := session.runOnSocket(socket, cmd, result) + session.Close() + return err +} + +type possibleTimeout interface { + Timeout() bool +} + +func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) { + addr := server.Addr + log("SYNC Processing ", addr, "...") + + // Retry a few times to avoid knocking a server down for a hiccup. + var result isMasterResult + var tryerr error + for retry := 0; ; retry++ { + if retry == 3 || retry == 1 && cluster.dialInfo.FailFast { + return nil, nil, tryerr + } + if retry > 0 { + // Don't abuse the server needlessly if there's something actually wrong. + if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() { + // Give a chance for waiters to timeout as well. + cluster.serverSynced.Broadcast() + } + time.Sleep(syncShortDelay) + } + + // Don't ever hit the pool limit for syncing + config := cluster.dialInfo.Copy() + config.PoolLimit = 0 + + socket, _, err := server.AcquireSocket(config) + if err != nil { + tryerr = err + logf("SYNC Failed to get socket to %s: %v", addr, err) + continue + } + err = cluster.isMaster(socket, &result) + + // Restore the correct dial config before returning it to the pool + socket.dialInfo = cluster.dialInfo + socket.Release() + + if err != nil { + tryerr = err + logf("SYNC Command 'ismaster' to %s failed: %v", addr, err) + continue + } + debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result) + break + } + + if cluster.dialInfo.ReplicaSetName != "" && result.SetName != cluster.dialInfo.ReplicaSetName { + logf("SYNC Server %s is not a member of replica set %q", addr, cluster.dialInfo.ReplicaSetName) + return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.dialInfo.ReplicaSetName) + } + + if result.IsMaster { + debugf("SYNC %s is a master.", addr) + if !server.info.Master { + // Made an incorrect assumption above, so fix stats. + stats.conn(-1, false) + stats.conn(+1, true) + } + } else if result.Secondary { + debugf("SYNC %s is a slave.", addr) + } else if cluster.dialInfo.Direct { + logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr) + } else { + logf("SYNC %s is neither a master nor a slave.", addr) + // Let stats track it as whatever was known before. + return nil, nil, errors.New(addr + " is not a master nor slave") + } + + info = &mongoServerInfo{ + Master: result.IsMaster, + Mongos: result.Msg == "isdbgrid", + Tags: result.Tags, + SetName: result.SetName, + MaxWireVersion: result.MaxWireVersion, + } + + hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives)) + if result.Primary != "" { + // First in the list to speed up master discovery. + hosts = append(hosts, result.Primary) + } + hosts = append(hosts, result.Hosts...) + hosts = append(hosts, result.Passives...) + + debugf("SYNC %s knows about the following peers: %#v", addr, hosts) + return info, hosts, nil +} + +type syncKind bool + +const ( + completeSync syncKind = true + partialSync syncKind = false +) + +func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) { + cluster.Lock() + current := cluster.servers.Search(server.ResolvedAddr) + if current == nil { + if syncKind == partialSync { + cluster.Unlock() + server.Close() + log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.") + return + } + cluster.servers.Add(server) + if info.Master { + cluster.masters.Add(server) + log("SYNC Adding ", server.Addr, " to cluster as a master.") + } else { + log("SYNC Adding ", server.Addr, " to cluster as a slave.") + } + } else { + if server != current { + panic("addServer attempting to add duplicated server") + } + if server.Info().Master != info.Master { + if info.Master { + log("SYNC Server ", server.Addr, " is now a master.") + cluster.masters.Add(server) + } else { + log("SYNC Server ", server.Addr, " is now a slave.") + cluster.masters.Remove(server) + } + } + } + server.SetInfo(info) + debugf("SYNC Broadcasting availability of server %s", server.Addr) + cluster.serverSynced.Broadcast() + cluster.Unlock() +} + +func (cluster *mongoCluster) getKnownAddrs() []string { + cluster.RLock() + max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len() + seen := make(map[string]bool, max) + known := make([]string, 0, max) + + add := func(addr string) { + if _, found := seen[addr]; !found { + seen[addr] = true + known = append(known, addr) + } + } + + for _, addr := range cluster.userSeeds { + add(addr) + } + for _, addr := range cluster.dynaSeeds { + add(addr) + } + for _, serv := range cluster.servers.Slice() { + add(serv.Addr) + } + cluster.RUnlock() + + return known +} + +// syncServers injects a value into the cluster.sync channel to force +// an iteration of the syncServersLoop function. +func (cluster *mongoCluster) syncServers() { + select { + case cluster.sync <- true: + default: + } +} + +// How long to wait for a checkup of the cluster topology if nothing +// else kicks a synchronization before that. +const syncServersDelay = 30 * time.Second +const syncShortDelay = 500 * time.Millisecond + +// syncServersLoop loops while the cluster is alive to keep its idea of +// the server topology up-to-date. It must be called just once from +// newCluster. The loop iterates once syncServersDelay has passed, or +// if somebody injects a value into the cluster.sync channel to force a +// synchronization. A loop iteration will contact all servers in +// parallel, ask them about known peers and their own role within the +// cluster, and then attempt to do the same with all the peers +// retrieved. +func (cluster *mongoCluster) syncServersLoop() { + for { + debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster) + + cluster.Lock() + if cluster.references == 0 { + cluster.Unlock() + break + } + cluster.references++ // Keep alive while syncing. + direct := cluster.dialInfo.Direct + cluster.Unlock() + + cluster.syncServersIteration(direct) + + // We just synchronized, so consume any outstanding requests. + select { + case <-cluster.sync: + default: + } + + cluster.Release() + + // Hold off before allowing another sync. No point in + // burning CPU looking for down servers. + if !cluster.dialInfo.FailFast { + time.Sleep(syncShortDelay) + } + + cluster.Lock() + if cluster.references == 0 { + cluster.Unlock() + break + } + cluster.syncCount++ + // Poke all waiters so they have a chance to timeout or + // restart syncing if they wish to. + cluster.serverSynced.Broadcast() + // Check if we have to restart immediately either way. + restart := !direct && cluster.masters.Empty() || cluster.servers.Empty() + cluster.Unlock() + + if restart { + log("SYNC No masters found. Will synchronize again.") + time.Sleep(syncShortDelay) + continue + } + + debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster) + + // Hold off until somebody explicitly requests a synchronization + // or it's time to check for a cluster topology change again. + select { + case <-cluster.sync: + case <-time.After(syncServersDelay): + } + } + debugf("SYNC Cluster %p is stopping its sync loop.", cluster) +} + +func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer { + cluster.RLock() + server := cluster.servers.Search(tcpaddr.String()) + cluster.RUnlock() + if server != nil { + return server + } + return newServer(addr, tcpaddr, cluster.sync, cluster.dial, cluster.dialInfo) +} + +func resolveAddr(addr string) (*net.TCPAddr, error) { + // Simple cases that do not need actual resolution. Works with IPv4 and v6. + if host, port, err := net.SplitHostPort(addr); err == nil { + if port, _ := strconv.Atoi(port); port > 0 { + zone := "" + if i := strings.LastIndex(host, "%"); i >= 0 { + zone = host[i+1:] + host = host[:i] + } + ip := net.ParseIP(host) + if ip != nil { + return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil + } + } + } + + // Attempt to resolve IPv4 and v6 concurrently. + addrChan := make(chan *net.TCPAddr, 2) + for _, network := range []string{"udp4", "udp6"} { + network := network + go func() { + // The unfortunate UDP dialing hack allows having a timeout on address resolution. + conn, err := net.DialTimeout(network, addr, 10*time.Second) + if err != nil { + addrChan <- nil + } else { + addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr)) + conn.Close() + } + }() + } + + // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available. + tcpaddr := <-addrChan + if tcpaddr == nil || len(tcpaddr.IP) != 4 { + var timeout <-chan time.Time + if tcpaddr != nil { + // Don't wait too long if an IPv6 address is known. + timeout = time.After(50 * time.Millisecond) + } + select { + case <-timeout: + case tcpaddr2 := <-addrChan: + if tcpaddr == nil || tcpaddr2 != nil { + // It's an IPv4 address or the only known address. Use it. + tcpaddr = tcpaddr2 + } + } + } + + if tcpaddr == nil { + log("SYNC Failed to resolve server address: ", addr) + return nil, errors.New("failed to resolve server address: " + addr) + } + if tcpaddr.String() != addr { + debug("SYNC Address ", addr, " resolved as ", tcpaddr.String()) + } + return tcpaddr, nil +} + +type pendingAdd struct { + server *mongoServer + info *mongoServerInfo +} + +func (cluster *mongoCluster) syncServersIteration(direct bool) { + log("SYNC Starting full topology synchronization...") + + var wg sync.WaitGroup + var m sync.Mutex + notYetAdded := make(map[string]pendingAdd) + addIfFound := make(map[string]bool) + seen := make(map[string]bool) + syncKind := partialSync + + var spawnSync func(addr string, byMaster bool) + spawnSync = func(addr string, byMaster bool) { + wg.Add(1) + go func() { + defer wg.Done() + + tcpaddr, err := resolveAddr(addr) + if err != nil { + log("SYNC Failed to start sync of ", addr, ": ", err.Error()) + return + } + resolvedAddr := tcpaddr.String() + + m.Lock() + if byMaster { + if pending, ok := notYetAdded[resolvedAddr]; ok { + delete(notYetAdded, resolvedAddr) + m.Unlock() + cluster.addServer(pending.server, pending.info, completeSync) + return + } + addIfFound[resolvedAddr] = true + } + if seen[resolvedAddr] { + m.Unlock() + return + } + seen[resolvedAddr] = true + m.Unlock() + + server := cluster.server(addr, tcpaddr) + info, hosts, err := cluster.syncServer(server) + if err != nil { + cluster.removeServer(server) + return + } + + m.Lock() + add := direct || info.Master || addIfFound[resolvedAddr] + if add { + syncKind = completeSync + } else { + notYetAdded[resolvedAddr] = pendingAdd{server, info} + } + m.Unlock() + if add { + cluster.addServer(server, info, completeSync) + } + if !direct { + for _, addr := range hosts { + spawnSync(addr, info.Master) + } + } + }() + } + + knownAddrs := cluster.getKnownAddrs() + for _, addr := range knownAddrs { + spawnSync(addr, false) + } + wg.Wait() + + if syncKind == completeSync { + logf("SYNC Synchronization was complete (got data from primary).") + for _, pending := range notYetAdded { + cluster.removeServer(pending.server) + } + } else { + logf("SYNC Synchronization was partial (cannot talk to primary).") + for _, pending := range notYetAdded { + cluster.addServer(pending.server, pending.info, partialSync) + } + } + + cluster.Lock() + mastersLen := cluster.masters.Len() + logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen) + + // Update dynamic seeds, but only if we have any good servers. Otherwise, + // leave them alone for better chances of a successful sync in the future. + if syncKind == completeSync { + dynaSeeds := make([]string, cluster.servers.Len()) + for i, server := range cluster.servers.Slice() { + dynaSeeds[i] = server.Addr + } + cluster.dynaSeeds = dynaSeeds + debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds) + } + cluster.Unlock() +} + +// AcquireSocketWithPoolTimeout returns a socket to a server in the cluster. If slaveOk is +// true, it will attempt to return a socket to a slave server. If it is +// false, the socket will necessarily be to a master server. +func (cluster *mongoCluster) AcquireSocketWithPoolTimeout(mode Mode, slaveOk bool, syncTimeout time.Duration, serverTags []bson.D, info *DialInfo) (s *mongoSocket, err error) { + var started time.Time + var syncCount uint + for { + cluster.RLock() + for { + mastersLen := cluster.masters.Len() + slavesLen := cluster.servers.Len() - mastersLen + debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen) + if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk { + break + } + if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() { + break + } + if started.IsZero() { + // Initialize after fast path above. + started = time.Now() + syncCount = cluster.syncCount + } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.dialInfo.FailFast && cluster.syncCount != syncCount { + cluster.RUnlock() + return nil, errors.New("no reachable servers") + } + log("Waiting for servers to synchronize...") + cluster.syncServers() + + // Remember: this will release and reacquire the lock. + cluster.serverSynced.Wait() + } + + var server *mongoServer + if slaveOk { + server = cluster.servers.BestFit(mode, serverTags) + } else { + server = cluster.masters.BestFit(mode, nil) + } + cluster.RUnlock() + + if server == nil { + // Must have failed the requested tags. Sleep to avoid spinning. + time.Sleep(1e8) + continue + } + + s, abended, err := server.AcquireSocketWithBlocking(info) + if err == errPoolTimeout { + // No need to remove servers from the topology if acquiring a socket fails for this reason. + return nil, err + } + if err != nil { + cluster.removeServer(server) + cluster.syncServers() + continue + } + if abended && !slaveOk { + var result isMasterResult + err := cluster.isMaster(s, &result) + if err != nil || !result.IsMaster { + logf("Cannot confirm server %s as master (%v)", server.Addr, err) + s.Release() + cluster.syncServers() + time.Sleep(100 * time.Millisecond) + continue + } else { + // We've managed to successfully reconnect to the master, we are no longer abnormaly ended + server.Lock() + server.abended = false + server.Unlock() + } + } + return s, nil + } +} + +func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) { + cluster.Lock() + if cluster.cachedIndex == nil { + cluster.cachedIndex = make(map[string]bool) + } + if exists { + cluster.cachedIndex[cacheKey] = true + } else { + delete(cluster.cachedIndex, cacheKey) + } + cluster.Unlock() +} + +func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) { + cluster.RLock() + if cluster.cachedIndex != nil { + result = cluster.cachedIndex[cacheKey] + } + cluster.RUnlock() + return +} + +func (cluster *mongoCluster) ResetIndexCache() { + cluster.Lock() + cluster.cachedIndex = make(map[string]bool) + cluster.Unlock() +} diff --git a/backend/vendor/github.com/globalsign/mgo/coarse_time.go b/backend/vendor/github.com/globalsign/mgo/coarse_time.go new file mode 100644 index 00000000..e54dd17c --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/coarse_time.go @@ -0,0 +1,62 @@ +package mgo + +import ( + "sync" + "sync/atomic" + "time" +) + +// coarseTimeProvider provides a periodically updated (approximate) time value to +// amortise the cost of frequent calls to time.Now. +// +// A read throughput increase of ~6% was measured when using coarseTimeProvider with the +// high-precision event timer (HPET) on FreeBSD 11.1 and Go 1.10.1 after merging +// #116. +// +// Calling Now returns a time.Time that is updated at the configured interval, +// however due to scheduling the value may be marginally older than expected. +// +// coarseTimeProvider is safe for concurrent use. +type coarseTimeProvider struct { + once sync.Once + stop chan struct{} + last atomic.Value +} + +// Now returns the most recently acquired time.Time value. +func (t *coarseTimeProvider) Now() time.Time { + return t.last.Load().(time.Time) +} + +// Close stops the periodic update of t. +// +// Any subsequent calls to Now will return the same value forever. +func (t *coarseTimeProvider) Close() { + t.once.Do(func() { + close(t.stop) + }) +} + +// newcoarseTimeProvider returns a coarseTimeProvider configured to update at granularity. +func newcoarseTimeProvider(granularity time.Duration) *coarseTimeProvider { + t := &coarseTimeProvider{ + stop: make(chan struct{}), + } + + t.last.Store(time.Now()) + + go func() { + ticker := time.NewTicker(granularity) + for { + select { + case <-t.stop: + ticker.Stop() + return + case <-ticker.C: + t.last.Store(time.Now()) + } + } + }() + + return t +} diff --git a/backend/vendor/github.com/globalsign/mgo/doc.go b/backend/vendor/github.com/globalsign/mgo/doc.go new file mode 100644 index 00000000..f3f373bf --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/doc.go @@ -0,0 +1,35 @@ +// Package mgo (pronounced as "mango") offers a rich MongoDB driver for Go. +// +// Detailed documentation of the API is available at GoDoc: +// +// https://godoc.org/github.com/globalsign/mgo +// +// Usage of the driver revolves around the concept of sessions. To +// get started, obtain a session using the Dial function: +// +// session, err := mgo.Dial(url) +// +// This will establish one or more connections with the cluster of +// servers defined by the url parameter. From then on, the cluster +// may be queried with multiple consistency rules (see SetMode) and +// documents retrieved with statements such as: +// +// c := session.DB(database).C(collection) +// err := c.Find(query).One(&result) +// +// New sessions are typically created by calling session.Copy on the +// initial session obtained at dial time. These new sessions will share +// the same cluster information and connection pool, and may be easily +// handed into other methods and functions for organizing logic. +// Every session created must have its Close method called at the end +// of its life time, so its resources may be put back in the pool or +// collected, depending on the case. +// +// There is a sub-package that provides support for BSON, which can be +// used by itself as well: +// +// https://godoc.org/github.com/globalsign/mgo/bson +// +// For more details, see the documentation for the types and methods. +// +package mgo diff --git a/backend/vendor/github.com/globalsign/mgo/gridfs.go b/backend/vendor/github.com/globalsign/mgo/gridfs.go new file mode 100644 index 00000000..0954b166 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/gridfs.go @@ -0,0 +1,782 @@ +// mgo - MongoDB driver for Go +// +// Copyright (c) 2010-2012 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package mgo + +import ( + "crypto/md5" + "encoding/hex" + "errors" + "hash" + "io" + "os" + "sync" + "time" + + "github.com/globalsign/mgo/bson" +) + +// GridFS stores files in two collections: +// +// - chunks stores the binary chunks. For details, see the chunks Collection. +// - files stores the file’s metadata. For details, see the files Collection. +// +// GridFS places the collections in a common bucket by prefixing each with the bucket name. +// By default, GridFS uses two collections with a bucket named fs: +// +// - fs.files +// - fs.chunks +// +// You can choose a different bucket name, as well as create multiple buckets in a single database. +// The full collection name, which includes the bucket name, is subject to the namespace length limit. +// +// Relevant documentation: +// +// https://docs.mongodb.com/manual/core/gridfs/ +// https://docs.mongodb.com/manual/core/gridfs/#gridfs-chunks-collection +// https://docs.mongodb.com/manual/core/gridfs/#gridfs-files-collection +// +type GridFS struct { + Files *Collection + Chunks *Collection +} + +type gfsFileMode int + +const ( + gfsClosed gfsFileMode = 0 + gfsReading gfsFileMode = 1 + gfsWriting gfsFileMode = 2 +) + +// GridFile document in files collection +type GridFile struct { + m sync.Mutex + c sync.Cond + gfs *GridFS + mode gfsFileMode + err error + + chunk int + offset int64 + + wpending int + wbuf []byte + wsum hash.Hash + + rbuf []byte + rcache *gfsCachedChunk + + doc gfsFile +} + +type gfsFile struct { + Id interface{} `bson:"_id"` + ChunkSize int `bson:"chunkSize"` + UploadDate time.Time `bson:"uploadDate"` + Length int64 `bson:",minsize"` + MD5 string + Filename string `bson:",omitempty"` + ContentType string `bson:"contentType,omitempty"` + Metadata *bson.Raw `bson:",omitempty"` +} + +type gfsChunk struct { + Id interface{} `bson:"_id"` + FilesId interface{} `bson:"files_id"` + N int + Data []byte +} + +type gfsCachedChunk struct { + wait sync.Mutex + n int + data []byte + err error +} + +func newGridFS(db *Database, prefix string) *GridFS { + return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")} +} + +func (gfs *GridFS) newFile() *GridFile { + file := &GridFile{gfs: gfs} + file.c.L = &file.m + //runtime.SetFinalizer(file, finalizeFile) + return file +} + +func finalizeFile(file *GridFile) { + file.Close() +} + +// Create creates a new file with the provided name in the GridFS. If the file +// name already exists, a new version will be inserted with an up-to-date +// uploadDate that will cause it to be atomically visible to the Open and +// OpenId methods. If the file name is not important, an empty name may be +// provided and the file Id used instead. +// +// It's important to Close files whether they are being written to +// or read from, and to check the err result to ensure the operation +// completed successfully. +// +// A simple example inserting a new file: +// +// func check(err error) { +// if err != nil { +// panic(err.String()) +// } +// } +// file, err := db.GridFS("fs").Create("myfile.txt") +// check(err) +// n, err := file.Write([]byte("Hello world!")) +// check(err) +// err = file.Close() +// check(err) +// fmt.Printf("%d bytes written\n", n) +// +// The io.Writer interface is implemented by *GridFile and may be used to +// help on the file creation. For example: +// +// file, err := db.GridFS("fs").Create("myfile.txt") +// check(err) +// messages, err := os.Open("/var/log/messages") +// check(err) +// defer messages.Close() +// err = io.Copy(file, messages) +// check(err) +// err = file.Close() +// check(err) +// +func (gfs *GridFS) Create(name string) (file *GridFile, err error) { + file = gfs.newFile() + file.mode = gfsWriting + file.wsum = md5.New() + file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name} + return +} + +// OpenId returns the file with the provided id, for reading. +// If the file isn't found, err will be set to mgo.ErrNotFound. +// +// It's important to Close files whether they are being written to +// or read from, and to check the err result to ensure the operation +// completed successfully. +// +// The following example will print the first 8192 bytes from the file: +// +// func check(err error) { +// if err != nil { +// panic(err.String()) +// } +// } +// file, err := db.GridFS("fs").OpenId(objid) +// check(err) +// b := make([]byte, 8192) +// n, err := file.Read(b) +// check(err) +// fmt.Println(string(b)) +// check(err) +// err = file.Close() +// check(err) +// fmt.Printf("%d bytes read\n", n) +// +// The io.Reader interface is implemented by *GridFile and may be used to +// deal with it. As an example, the following snippet will dump the whole +// file into the standard output: +// +// file, err := db.GridFS("fs").OpenId(objid) +// check(err) +// err = io.Copy(os.Stdout, file) +// check(err) +// err = file.Close() +// check(err) +// +func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) { + var doc gfsFile + err = gfs.Files.Find(bson.M{"_id": id}).One(&doc) + if err != nil { + return + } + file = gfs.newFile() + file.mode = gfsReading + file.doc = doc + return +} + +// Open returns the most recently uploaded file with the provided +// name, for reading. If the file isn't found, err will be set +// to mgo.ErrNotFound. +// +// It's important to Close files whether they are being written to +// or read from, and to check the err result to ensure the operation +// completed successfully. +// +// The following example will print the first 8192 bytes from the file: +// +// file, err := db.GridFS("fs").Open("myfile.txt") +// check(err) +// b := make([]byte, 8192) +// n, err := file.Read(b) +// check(err) +// fmt.Println(string(b)) +// check(err) +// err = file.Close() +// check(err) +// fmt.Printf("%d bytes read\n", n) +// +// The io.Reader interface is implemented by *GridFile and may be used to +// deal with it. As an example, the following snippet will dump the whole +// file into the standard output: +// +// file, err := db.GridFS("fs").Open("myfile.txt") +// check(err) +// err = io.Copy(os.Stdout, file) +// check(err) +// err = file.Close() +// check(err) +// +func (gfs *GridFS) Open(name string) (file *GridFile, err error) { + var doc gfsFile + err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc) + if err != nil { + return + } + file = gfs.newFile() + file.mode = gfsReading + file.doc = doc + return +} + +// OpenNext opens the next file from iter for reading, sets *file to it, +// and returns true on the success case. If no more documents are available +// on iter or an error occurred, *file is set to nil and the result is false. +// Errors will be available via iter.Err(). +// +// The iter parameter must be an iterator on the GridFS files collection. +// Using the GridFS.Find method is an easy way to obtain such an iterator, +// but any iterator on the collection will work. +// +// If the provided *file is non-nil, OpenNext will close it before attempting +// to iterate to the next element. This means that in a loop one only +// has to worry about closing files when breaking out of the loop early +// (break, return, or panic). +// +// For example: +// +// gfs := db.GridFS("fs") +// query := gfs.Find(nil).Sort("filename") +// iter := query.Iter() +// var f *mgo.GridFile +// for gfs.OpenNext(iter, &f) { +// fmt.Printf("Filename: %s\n", f.Name()) +// } +// if iter.Close() != nil { +// panic(iter.Close()) +// } +// +func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool { + if *file != nil { + // Ignoring the error here shouldn't be a big deal + // as we're reading the file and the loop iteration + // for this file is finished. + _ = (*file).Close() + } + var doc gfsFile + if !iter.Next(&doc) { + *file = nil + return false + } + f := gfs.newFile() + f.mode = gfsReading + f.doc = doc + *file = f + return true +} + +// Find runs query on GridFS's files collection and returns +// the resulting Query. +// +// This logic: +// +// gfs := db.GridFS("fs") +// iter := gfs.Find(nil).Iter() +// +// Is equivalent to: +// +// files := db.C("fs" + ".files") +// iter := files.Find(nil).Iter() +// +func (gfs *GridFS) Find(query interface{}) *Query { + return gfs.Files.Find(query) +} + +// RemoveId deletes the file with the provided id from the GridFS. +func (gfs *GridFS) RemoveId(id interface{}) error { + err := gfs.Files.Remove(bson.M{"_id": id}) + if err != nil { + return err + } + _, err = gfs.Chunks.RemoveAll(bson.D{{Name: "files_id", Value: id}}) + return err +} + +type gfsDocId struct { + Id interface{} `bson:"_id"` +} + +// Remove deletes all files with the provided name from the GridFS. +func (gfs *GridFS) Remove(name string) (err error) { + iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter() + var doc gfsDocId + for iter.Next(&doc) { + if e := gfs.RemoveId(doc.Id); e != nil { + err = e + } + } + if err == nil { + err = iter.Close() + } + return err +} + +func (file *GridFile) assertMode(mode gfsFileMode) { + switch file.mode { + case mode: + return + case gfsWriting: + panic("GridFile is open for writing") + case gfsReading: + panic("GridFile is open for reading") + case gfsClosed: + panic("GridFile is closed") + default: + panic("internal error: missing GridFile mode") + } +} + +// SetChunkSize sets size of saved chunks. Once the file is written to, it +// will be split in blocks of that size and each block saved into an +// independent chunk document. The default chunk size is 255kb. +// +// It is a runtime error to call this function once the file has started +// being written to. +func (file *GridFile) SetChunkSize(bytes int) { + file.assertMode(gfsWriting) + debugf("GridFile %p: setting chunk size to %d", file, bytes) + file.m.Lock() + file.doc.ChunkSize = bytes + file.m.Unlock() +} + +// Id returns the current file Id. +func (file *GridFile) Id() interface{} { + return file.doc.Id +} + +// SetId changes the current file Id. +// +// It is a runtime error to call this function once the file has started +// being written to, or when the file is not open for writing. +func (file *GridFile) SetId(id interface{}) { + file.assertMode(gfsWriting) + file.m.Lock() + file.doc.Id = id + file.m.Unlock() +} + +// Name returns the optional file name. An empty string will be returned +// in case it is unset. +func (file *GridFile) Name() string { + return file.doc.Filename +} + +// SetName changes the optional file name. An empty string may be used to +// unset it. +// +// It is a runtime error to call this function when the file is not open +// for writing. +func (file *GridFile) SetName(name string) { + file.assertMode(gfsWriting) + file.m.Lock() + file.doc.Filename = name + file.m.Unlock() +} + +// ContentType returns the optional file content type. An empty string will be +// returned in case it is unset. +func (file *GridFile) ContentType() string { + return file.doc.ContentType +} + +// SetContentType changes the optional file content type. An empty string may be +// used to unset it. +// +// It is a runtime error to call this function when the file is not open +// for writing. +func (file *GridFile) SetContentType(ctype string) { + file.assertMode(gfsWriting) + file.m.Lock() + file.doc.ContentType = ctype + file.m.Unlock() +} + +// GetMeta unmarshals the optional "metadata" field associated with the +// file into the result parameter. The meaning of keys under that field +// is user-defined. For example: +// +// result := struct{ INode int }{} +// err = file.GetMeta(&result) +// if err != nil { +// panic(err.String()) +// } +// fmt.Printf("inode: %d\n", result.INode) +// +func (file *GridFile) GetMeta(result interface{}) (err error) { + file.m.Lock() + if file.doc.Metadata != nil { + err = bson.Unmarshal(file.doc.Metadata.Data, result) + } + file.m.Unlock() + return +} + +// SetMeta changes the optional "metadata" field associated with the +// file. The meaning of keys under that field is user-defined. +// For example: +// +// file.SetMeta(bson.M{"inode": inode}) +// +// It is a runtime error to call this function when the file is not open +// for writing. +func (file *GridFile) SetMeta(metadata interface{}) { + file.assertMode(gfsWriting) + data, err := bson.Marshal(metadata) + file.m.Lock() + if err != nil && file.err == nil { + file.err = err + } else { + file.doc.Metadata = &bson.Raw{Data: data} + } + file.m.Unlock() +} + +// Size returns the file size in bytes. +func (file *GridFile) Size() (bytes int64) { + file.m.Lock() + bytes = file.doc.Length + file.m.Unlock() + return +} + +// MD5 returns the file MD5 as a hex-encoded string. +func (file *GridFile) MD5() (md5 string) { + return file.doc.MD5 +} + +// UploadDate returns the file upload time. +func (file *GridFile) UploadDate() time.Time { + return file.doc.UploadDate +} + +// SetUploadDate changes the file upload time. +// +// It is a runtime error to call this function when the file is not open +// for writing. +func (file *GridFile) SetUploadDate(t time.Time) { + file.assertMode(gfsWriting) + file.m.Lock() + file.doc.UploadDate = t + file.m.Unlock() +} + +// Close flushes any pending changes in case the file is being written +// to, waits for any background operations to finish, and closes the file. +// +// It's important to Close files whether they are being written to +// or read from, and to check the err result to ensure the operation +// completed successfully. +func (file *GridFile) Close() (err error) { + file.m.Lock() + defer file.m.Unlock() + if file.mode == gfsWriting { + if len(file.wbuf) > 0 && file.err == nil { + file.insertChunk(file.wbuf) + file.wbuf = file.wbuf[0:0] + } + file.completeWrite() + } else if file.mode == gfsReading && file.rcache != nil { + file.rcache.wait.Lock() + file.rcache = nil + } + file.mode = gfsClosed + debugf("GridFile %p: closed", file) + return file.err +} + +func (file *GridFile) completeWrite() { + for file.wpending > 0 { + debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending) + file.c.Wait() + } + if file.err == nil { + hexsum := hex.EncodeToString(file.wsum.Sum(nil)) + if file.doc.UploadDate.IsZero() { + file.doc.UploadDate = bson.Now() + } + file.doc.MD5 = hexsum + file.err = file.gfs.Files.Insert(file.doc) + } + if file.err != nil { + file.gfs.Chunks.RemoveAll(bson.D{{Name: "files_id", Value: file.doc.Id}}) + } + if file.err == nil { + index := Index{ + Key: []string{"files_id", "n"}, + Unique: true, + } + file.err = file.gfs.Chunks.EnsureIndex(index) + } +} + +// Abort cancels an in-progress write, preventing the file from being +// automically created and ensuring previously written chunks are +// removed when the file is closed. +// +// It is a runtime error to call Abort when the file was not opened +// for writing. +func (file *GridFile) Abort() { + if file.mode != gfsWriting { + panic("file.Abort must be called on file opened for writing") + } + file.err = errors.New("write aborted") +} + +// Write writes the provided data to the file and returns the +// number of bytes written and an error in case something +// wrong happened. +// +// The file will internally cache the data so that all but the last +// chunk sent to the database have the size defined by SetChunkSize. +// This also means that errors may be deferred until a future call +// to Write or Close. +// +// The parameters and behavior of this function turn the file +// into an io.Writer. +func (file *GridFile) Write(data []byte) (n int, err error) { + file.assertMode(gfsWriting) + file.m.Lock() + debugf("GridFile %p: writing %d bytes", file, len(data)) + defer file.m.Unlock() + + if file.err != nil { + return 0, file.err + } + + n = len(data) + file.doc.Length += int64(n) + chunkSize := file.doc.ChunkSize + + if len(file.wbuf)+len(data) < chunkSize { + file.wbuf = append(file.wbuf, data...) + return + } + + // First, flush file.wbuf complementing with data. + if len(file.wbuf) > 0 { + missing := chunkSize - len(file.wbuf) + if missing > len(data) { + missing = len(data) + } + file.wbuf = append(file.wbuf, data[:missing]...) + data = data[missing:] + file.insertChunk(file.wbuf) + file.wbuf = file.wbuf[0:0] + } + + // Then, flush all chunks from data without copying. + for len(data) > chunkSize { + size := chunkSize + if size > len(data) { + size = len(data) + } + file.insertChunk(data[:size]) + data = data[size:] + } + + // And append the rest for a future call. + file.wbuf = append(file.wbuf, data...) + + return n, file.err +} + +func (file *GridFile) insertChunk(data []byte) { + n := file.chunk + file.chunk++ + debugf("GridFile %p: adding to checksum: %q", file, string(data)) + file.wsum.Write(data) + + for file.doc.ChunkSize*file.wpending >= 1024*1024 { + // Hold on.. we got a MB pending. + file.c.Wait() + if file.err != nil { + return + } + } + + file.wpending++ + + debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data)) + + // We may not own the memory of data, so rather than + // simply copying it, we'll marshal the document ahead of time. + data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data}) + if err != nil { + file.err = err + return + } + + go func() { + err := file.gfs.Chunks.Insert(bson.Raw{Data: data}) + file.m.Lock() + file.wpending-- + if err != nil && file.err == nil { + file.err = err + } + file.c.Broadcast() + file.m.Unlock() + }() +} + +// Seek sets the offset for the next Read or Write on file to +// offset, interpreted according to whence: 0 means relative to +// the origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. It returns the new offset and +// an error, if any. +func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) { + file.m.Lock() + debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence) + defer file.m.Unlock() + switch whence { + case os.SEEK_SET: + case os.SEEK_CUR: + offset += file.offset + case os.SEEK_END: + offset += file.doc.Length + default: + panic("unsupported whence value") + } + if offset > file.doc.Length { + return file.offset, errors.New("seek past end of file") + } + if offset == file.doc.Length { + // If we're seeking to the end of the file, + // no need to read anything. This enables + // a client to find the size of the file using only the + // io.ReadSeeker interface with low overhead. + file.offset = offset + return file.offset, nil + } + chunk := int(offset / int64(file.doc.ChunkSize)) + if chunk+1 == file.chunk && offset >= file.offset { + file.rbuf = file.rbuf[int(offset-file.offset):] + file.offset = offset + return file.offset, nil + } + file.offset = offset + file.chunk = chunk + file.rbuf = nil + file.rbuf, err = file.getChunk() + if err == nil { + file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):] + } + return file.offset, err +} + +// Read reads into b the next available data from the file and +// returns the number of bytes written and an error in case +// something wrong happened. At the end of the file, n will +// be zero and err will be set to io.EOF. +// +// The parameters and behavior of this function turn the file +// into an io.Reader. +func (file *GridFile) Read(b []byte) (n int, err error) { + file.assertMode(gfsReading) + file.m.Lock() + debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b)) + defer file.m.Unlock() + if file.offset == file.doc.Length { + return 0, io.EOF + } + for err == nil { + i := copy(b, file.rbuf) + n += i + file.offset += int64(i) + file.rbuf = file.rbuf[i:] + if i == len(b) || file.offset == file.doc.Length { + break + } + b = b[i:] + file.rbuf, err = file.getChunk() + } + return n, err +} + +func (file *GridFile) getChunk() (data []byte, err error) { + cache := file.rcache + file.rcache = nil + if cache != nil && cache.n == file.chunk { + debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk) + cache.wait.Lock() + data, err = cache.data, cache.err + } else { + debugf("GridFile %p: Fetching chunk %d", file, file.chunk) + var doc gfsChunk + err = file.gfs.Chunks.Find(bson.D{{Name: "files_id", Value: file.doc.Id}, {Name: "n", Value: file.chunk}}).One(&doc) + data = doc.Data + } + file.chunk++ + if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length { + // Read the next one in background. + cache = &gfsCachedChunk{n: file.chunk} + cache.wait.Lock() + debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk) + // Clone the session to avoid having it closed in between. + chunks := file.gfs.Chunks + session := chunks.Database.Session.Clone() + go func(id interface{}, n int) { + defer session.Close() + chunks = chunks.With(session) + var doc gfsChunk + cache.err = chunks.Find(bson.D{{Name: "files_id", Value: id}, {Name: "n", Value: n}}).One(&doc) + cache.data = doc.Data + cache.wait.Unlock() + }(file.doc.Id, file.chunk) + file.rcache = cache + } + debugf("Returning err: %#v", err) + return +} diff --git a/backend/vendor/github.com/globalsign/mgo/internal/json/LICENSE b/backend/vendor/github.com/globalsign/mgo/internal/json/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/internal/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/vendor/github.com/globalsign/mgo/internal/json/decode.go b/backend/vendor/github.com/globalsign/mgo/internal/json/decode.go new file mode 100644 index 00000000..d5ca1f9a --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/internal/json/decode.go @@ -0,0 +1,1685 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores key- +// value pairs from the JSON object into the map. The map's key type must +// either be a string or implement encoding.TextUnmarshaler. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + useNumber bool + ext Extension +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else if c == '[' { + d.scan.step(&d.scan, ']') + } else { + // Was inside a function name. Get out of it. + d.scan.step(&d.scan, '(') + d.scan.step(&d.scan, ')') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + + case scanBeginName: + d.name(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginName: + switch v := d.nameInterface().(type) { + case nil, string: + return v + } + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, v + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, v + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if d.storeKeyed(pv) { + return + } + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: + // struct or + // map[string]T or map[encoding.TextUnmarshaler]T + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind or be an encoding.TextUnmarshaler. + t := v.Type() + if t.Key().Kind() != reflect.String && + !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + + empty := true + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + if !empty && !d.ext.trailingCommas { + d.syntaxError("beginning of object key string") + } + break + } + empty = false + if op == scanBeginName { + if !d.ext.unquotedKeys { + d.syntaxError("beginning of object key string") + } + } else if op != scanBeginLiteral { + d.error(errPhase) + } + unquotedKey := op == scanBeginName + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + var key []byte + if unquotedKey { + key = item + // TODO Fix code below to quote item when necessary. + } else { + var ok bool + key, ok = unquoteBytes(item) + if !ok { + d.error(errPhase) + } + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := v.Type().Key() + var kv reflect.Value + switch { + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(v.Type().Key()) + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(v.Type().Key()) + d.literalStore(item, kv, true) + kv = kv.Elem() + default: + panic("json: Unexpected key type") // should never occur + } + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// isNull returns whether there's a null literal at the provided offset. +func (d *decodeState) isNull(off int) bool { + if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' { + return false + } + d.nextscan.reset() + for i, c := range d.data[off:] { + if i > 4 { + return false + } + switch d.nextscan.step(&d.nextscan, c) { + case scanContinue, scanBeginName: + continue + } + break + } + return true +} + +// name consumes a const or function from d.data[d.off-1:], decoding into the value v. +// the first byte of the function name has been read already. +func (d *decodeState) name(v reflect.Value) { + if d.isNull(d.off - 1) { + d.literal(v) + return + } + + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if d.storeKeyed(pv) { + return + } + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over function in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + out := d.nameInterface() + if out == nil { + v.Set(reflect.Zero(v.Type())) + } else { + v.Set(reflect.ValueOf(out)) + } + return + } + + nameStart := d.off - 1 + + op := d.scanWhile(scanContinue) + + name := d.data[nameStart : d.off-1] + if op != scanParam { + // Back up so the byte just read is consumed next. + d.off-- + d.scan.undo(op) + if l, ok := d.convertLiteral(name); ok { + d.storeValue(v, l) + return + } + d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) + } + + funcName := string(name) + funcData := d.ext.funcs[funcName] + if funcData.key == "" { + d.error(fmt.Errorf("json: unknown function %q", funcName)) + } + + // Check type of target: + // struct or + // map[string]T or map[encoding.TextUnmarshaler]T + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind or be an encoding.TextUnmarshaler. + t := v.Type() + if t.Key().Kind() != reflect.String && + !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + // TODO Fix case of func field as map. + //topv := v + + // Figure out field corresponding to function. + key := []byte(funcData.key) + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + v = reflect.New(elemType).Elem() + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + for _, i := range f.index { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = v.Field(i) + } + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + } + + // Check for unmarshaler on func field itself. + u, _, _ = d.indirect(v, false) + if u != nil { + d.off = nameStart + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + + var mapElem reflect.Value + + // Parse function arguments. + for i := 0; ; i++ { + // closing ) - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndParams { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + if i >= len(funcData.args) { + d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) + } + key := []byte(funcData.args[i]) + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, key) { + f = ff + break + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := v.Type().Key() + var kv reflect.Value + switch { + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(v.Type().Key()) + case reflect.PtrTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(v.Type().Key()) + d.literalStore(key, kv, true) + kv = kv.Elem() + default: + panic("json: Unexpected key type") // should never occur + } + v.SetMapIndex(kv, subv) + } + + // Next token must be , or ). + op = d.scanWhile(scanSkipSpace) + if op == scanEndParams { + break + } + if op != scanParam { + d.error(errPhase) + } + } +} + +// keyed attempts to decode an object or function using a keyed doc extension, +// and returns the value and true on success, or nil and false otherwise. +func (d *decodeState) keyed() (interface{}, bool) { + if len(d.ext.keyed) == 0 { + return nil, false + } + + unquote := false + + // Look-ahead first key to check for a keyed document extension. + d.nextscan.reset() + var start, end int + for i, c := range d.data[d.off-1:] { + switch op := d.nextscan.step(&d.nextscan, c); op { + case scanSkipSpace, scanContinue, scanBeginObject: + continue + case scanBeginLiteral, scanBeginName: + unquote = op == scanBeginLiteral + start = i + continue + } + end = i + break + } + + name := bytes.Trim(d.data[d.off-1+start:d.off-1+end], " \n\t") + + var key []byte + var ok bool + if unquote { + key, ok = unquoteBytes(name) + if !ok { + d.error(errPhase) + } + } else { + funcData, ok := d.ext.funcs[string(name)] + if !ok { + return nil, false + } + key = []byte(funcData.key) + } + + decode, ok := d.ext.keyed[string(key)] + if !ok { + return nil, false + } + + d.off-- + out, err := decode(d.next()) + if err != nil { + d.error(err) + } + return out, true +} + +func (d *decodeState) storeKeyed(v reflect.Value) bool { + keyed, ok := d.keyed() + if !ok { + return false + } + d.storeValue(v, keyed) + return true +} + +var ( + trueBytes = []byte("true") + falseBytes = []byte("false") + nullBytes = []byte("null") +) + +func (d *decodeState) storeValue(v reflect.Value, from interface{}) { + switch from { + case nil: + d.literalStore(nullBytes, v, false) + return + case true: + d.literalStore(trueBytes, v, false) + return + case false: + d.literalStore(falseBytes, v, false) + return + } + fromv := reflect.ValueOf(from) + for fromv.Kind() == reflect.Ptr && !fromv.IsNil() { + fromv = fromv.Elem() + } + fromt := fromv.Type() + for v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + vt := v.Type() + if fromt.AssignableTo(vt) { + v.Set(fromv) + } else if fromt.ConvertibleTo(vt) { + v.Set(fromv.Convert(vt)) + } else { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + } +} + +func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) { + if len(name) == 0 { + return nil, false + } + switch name[0] { + case 't': + if bytes.Equal(name, trueBytes) { + return true, true + } + case 'f': + if bytes.Equal(name, falseBytes) { + return false, true + } + case 'n': + if bytes.Equal(name, nullBytes) { + return nil, true + } + } + if l, ok := d.ext.consts[string(name)]; ok { + return l, true + } + return nil, false +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + case scanBeginName: + return d.nameInterface() + } +} + +func (d *decodeState) syntaxError(expected string) { + msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected) + d.error(&SyntaxError{msg, int64(d.off)}) +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + if len(v) > 0 && !d.ext.trailingCommas { + d.syntaxError("beginning of value") + } + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() interface{} { + v, ok := d.keyed() + if ok { + return v + } + + m := make(map[string]interface{}) + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + if len(m) > 0 && !d.ext.trailingCommas { + d.syntaxError("beginning of object key string") + } + break + } + if op == scanBeginName { + if !d.ext.unquotedKeys { + d.syntaxError("beginning of object key string") + } + } else if op != scanBeginLiteral { + d.error(errPhase) + } + unquotedKey := op == scanBeginName + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + var key string + if unquotedKey { + key = string(item) + } else { + var ok bool + key, ok = unquote(item) + if !ok { + d.error(errPhase) + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// nameInterface is like function but returns map[string]interface{}. +func (d *decodeState) nameInterface() interface{} { + v, ok := d.keyed() + if ok { + return v + } + + nameStart := d.off - 1 + + op := d.scanWhile(scanContinue) + + name := d.data[nameStart : d.off-1] + if op != scanParam { + // Back up so the byte just read is consumed next. + d.off-- + d.scan.undo(op) + if l, ok := d.convertLiteral(name); ok { + return l + } + d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)}) + } + + funcName := string(name) + funcData := d.ext.funcs[funcName] + if funcData.key == "" { + d.error(fmt.Errorf("json: unknown function %q", funcName)) + } + + m := make(map[string]interface{}) + for i := 0; ; i++ { + // Look ahead for ) - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndParams { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + if i >= len(funcData.args) { + d.error(fmt.Errorf("json: too many arguments for function %s", funcName)) + } + m[funcData.args[i]] = d.valueInterface() + + // Next token must be , or ). + op = d.scanWhile(scanSkipSpace) + if op == scanEndParams { + break + } + if op != scanParam { + d.error(errPhase) + } + } + return map[string]interface{}{funcData.key: m} +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/backend/vendor/github.com/globalsign/mgo/internal/json/encode.go b/backend/vendor/github.com/globalsign/mgo/internal/json/encode.go new file mode 100644 index 00000000..e4b8f864 --- /dev/null +++ b/backend/vendor/github.com/globalsign/mgo/internal/json/encode.go @@ -0,0 +1,1260 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 4627. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// This escaping can be disabled using an Encoder with DisableHTMLEscaping. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON value. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. The map's key type must either be a string +// or implement encoding.TextMarshaler. The map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON value. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON value. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v, encOpts{escapeHTML: true}) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML