diff --git a/backend/conf/config.yml b/backend/conf/config.yml index 3805762a..a5e0b23b 100644 --- a/backend/conf/config.yml +++ b/backend/conf/config.yml @@ -20,7 +20,7 @@ log: server: host: 0.0.0.0 port: 8000 - master: "N" + master: "Y" secret: "crawlab" register: # mac地址 或者 ip地址,如果是ip,则需要手动指定IP diff --git a/backend/config/config.go b/backend/config/config.go index 4d83c0f7..e4c4616c 100644 --- a/backend/config/config.go +++ b/backend/config/config.go @@ -28,7 +28,7 @@ func (c *Config) Init() error { } viper.SetConfigType("yaml") // 设置配置文件格式为YAML viper.AutomaticEnv() // 读取匹配的环境变量 - viper.SetEnvPrefix("CRAWLAB") // 读取环境变量的前缀为APISERVER + viper.SetEnvPrefix("CRAWLAB") // 读取环境变量的前缀为CRAWLAB replacer := strings.NewReplacer(".", "_") viper.SetEnvKeyReplacer(replacer) if err := viper.ReadInConfig(); err != nil { // viper解析配置文件 diff --git a/backend/constants/anchor.go b/backend/constants/anchor.go new file mode 100644 index 00000000..f462135f --- /dev/null +++ b/backend/constants/anchor.go @@ -0,0 +1,8 @@ +package constants + +const ( + AnchorStartStage = "START_STAGE" + AnchorStartUrl = "START_URL" + AnchorItems = "ITEMS" + AnchorParsers = "PARSERS" +) diff --git a/backend/constants/config_spider.go b/backend/constants/config_spider.go new file mode 100644 index 00000000..c29624dc --- /dev/null +++ b/backend/constants/config_spider.go @@ -0,0 +1,6 @@ +package constants + +const ( + EngineScrapy = "scrapy" + EngineColly = "colly" +) diff --git a/backend/constants/scrapy.go b/backend/constants/scrapy.go new file mode 100644 index 00000000..bc82508f --- /dev/null +++ b/backend/constants/scrapy.go @@ -0,0 +1,5 @@ +package constants + +const ScrapyProtectedStageNames = "" + +const ScrapyProtectedFieldNames = "_id,task_id,ts" diff --git a/backend/constants/spider.go b/backend/constants/spider.go index b4b7f65e..5119aa67 100644 --- a/backend/constants/spider.go +++ b/backend/constants/spider.go @@ -3,4 +3,5 @@ package constants const ( Customized = "customized" Configurable = "configurable" + Plugin = "plugin" ) diff --git a/backend/database/redis.go b/backend/database/redis.go index 348a74bb..bffc40be 100644 --- a/backend/database/redis.go +++ b/backend/database/redis.go @@ -102,7 +102,7 @@ func NewRedisPool() *redis.Pool { return redis.DialURL(url, redis.DialConnectTimeout(time.Second*10), redis.DialReadTimeout(time.Second*10), - redis.DialWriteTimeout(time.Second*10), + redis.DialWriteTimeout(time.Second*15), ) }, TestOnBorrow: func(c redis.Conn, t time.Time) error { diff --git a/backend/entity/common.go b/backend/entity/common.go index 332cc494..c46ae4f9 100644 --- a/backend/entity/common.go +++ b/backend/entity/common.go @@ -3,15 +3,15 @@ package entity import "strconv" type Page struct { - Skip int - Limit int - PageNum int + Skip int + Limit int + PageNum int PageSize int } -func (p *Page)GetPage(pageNum string, pageSize string) { +func (p *Page) GetPage(pageNum string, pageSize string) { p.PageNum, _ = strconv.Atoi(pageNum) p.PageSize, _ = strconv.Atoi(pageSize) p.Skip = p.PageSize * (p.PageNum - 1) p.Limit = p.PageSize -} \ No newline at end of file +} diff --git a/backend/entity/config_spider.go b/backend/entity/config_spider.go new file mode 100644 index 00000000..3fe28bc9 --- /dev/null +++ b/backend/entity/config_spider.go @@ -0,0 +1,30 @@ +package entity + +type ConfigSpiderData struct { + Version string `yaml:"version" json:"version"` + Engine string `yaml:"engine" json:"engine"` + StartUrl string `yaml:"start_url" json:"start_url"` + StartStage string `yaml:"start_stage" json:"start_stage"` + Stages map[string]Stage `yaml:"stages" json:"stages"` + Settings map[string]string `yaml:"settings" json:"settings"` +} + +type Stage struct { + Name string `yaml:"name" json:"name"` + IsList bool `yaml:"is_list" json:"is_list"` + ListCss string `yaml:"list_css" json:"list_css"` + ListXpath string `yaml:"list_xpath" json:"list_xpath"` + PageCss string `yaml:"page_css" json:"page_css"` + PageXpath string `yaml:"page_xpath" json:"page_xpath"` + PageAttr string `yaml:"page_attr" json:"page_attr"` + Fields []Field `yaml:"fields" json:"fields"` +} + +type Field struct { + Name string `yaml:"name" json:"name"` + Css string `yaml:"css" json:"css"` + Xpath string `yaml:"xpath" json:"xpath"` + Attr string `yaml:"attr" json:"attr"` + NextStage string `yaml:"next_stage" json:"next_stage"` + Remark string `yaml:"remark" json:"remark"` +} diff --git a/backend/go.mod b/backend/go.mod index 428c2fd3..d59b6d41 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -17,4 +17,5 @@ require ( github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 github.com/spf13/viper v1.4.0 gopkg.in/go-playground/validator.v9 v9.29.1 + gopkg.in/yaml.v2 v2.2.2 ) diff --git a/backend/main.go b/backend/main.go index 2c92ab37..b49efae7 100644 --- a/backend/main.go +++ b/backend/main.go @@ -110,7 +110,6 @@ func main() { if model.IsMaster() { // 中间件 app.Use(middlewares.CORSMiddleware()) - //app.Use(middlewares.AuthorizationMiddleware()) anonymousGroup := app.Group("/") { anonymousGroup.POST("/login", routes.Login) // 用户登录 @@ -130,7 +129,7 @@ func main() { // 爬虫 authGroup.GET("/spiders", routes.GetSpiderList) // 爬虫列表 authGroup.GET("/spiders/:id", routes.GetSpider) // 爬虫详情 - authGroup.POST("/spiders", routes.PutSpider) // 上传爬虫 + authGroup.POST("/spiders", routes.PutSpider) // 上传爬虫 TODO: 名称不对 authGroup.POST("/spiders/:id", routes.PostSpider) // 修改爬虫 authGroup.POST("/spiders/:id/publish", routes.PublishSpider) // 发布爬虫 authGroup.DELETE("/spiders/:id", routes.DeleteSpider) // 删除爬虫 @@ -140,6 +139,14 @@ func main() { authGroup.GET("/spiders/:id/dir", routes.GetSpiderDir) // 爬虫目录 authGroup.GET("/spiders/:id/stats", routes.GetSpiderStats) // 爬虫统计数据 authGroup.GET("/spider/types", routes.GetSpiderTypes) // 爬虫类型 + // 可配置爬虫 + authGroup.GET("/config_spiders/:id/config", routes.GetConfigSpiderConfig) // 获取可配置爬虫配置 + authGroup.POST("/config_spiders/:id/config", routes.PostConfigSpiderConfig) // 更改可配置爬虫配置 + authGroup.PUT("/config_spiders", routes.PutConfigSpider) // 添加可配置爬虫 + authGroup.POST("/config_spiders/:id", routes.PostConfigSpider) // 修改可配置爬虫 + authGroup.POST("/config_spiders/:id/upload", routes.UploadConfigSpider) // 上传可配置爬虫 + authGroup.POST("/config_spiders/:id/spiderfile", routes.PostConfigSpiderSpiderfile) // 上传可配置爬虫 + authGroup.GET("/config_spiders_templates", routes.GetConfigSpiderTemplateList) // 获取可配置爬虫模版列表 // 任务 authGroup.GET("/tasks", routes.GetTaskList) // 任务列表 authGroup.GET("/tasks/:id", routes.GetTask) // 任务详情 diff --git a/backend/mock/node_test.go b/backend/mock/node_test.go index 669cafc5..abd568c2 100644 --- a/backend/mock/node_test.go +++ b/backend/mock/node_test.go @@ -42,12 +42,12 @@ func init() { app.DELETE("/tasks/:id", DeleteTask) // 删除任务 app.GET("/tasks/:id/results", GetTaskResults) // 任务结果 app.GET("/tasks/:id/results/download", DownloadTaskResultsCsv) // 下载任务结果 - app.GET("/spiders", GetSpiderList) // 爬虫列表 - app.GET("/spiders/:id", GetSpider) // 爬虫详情 - app.POST("/spiders/:id", PostSpider) // 修改爬虫 - app.DELETE("/spiders/:id",DeleteSpider) // 删除爬虫 - app.GET("/spiders/:id/tasks",GetSpiderTasks) // 爬虫任务列表 - app.GET("/spiders/:id/dir",GetSpiderDir) // 爬虫目录 + app.GET("/spiders", GetSpiderList) // 爬虫列表 + app.GET("/spiders/:id", GetSpider) // 爬虫详情 + app.POST("/spiders/:id", PostSpider) // 修改爬虫 + app.DELETE("/spiders/:id", DeleteSpider) // 删除爬虫 + app.GET("/spiders/:id/tasks", GetSpiderTasks) // 爬虫任务列表 + app.GET("/spiders/:id/dir", GetSpiderDir) // 爬虫目录 } //mock test, test data in ./mock diff --git a/backend/mock/stats.go b/backend/mock/stats.go index db2348c6..f0227da9 100644 --- a/backend/mock/stats.go +++ b/backend/mock/stats.go @@ -6,8 +6,6 @@ import ( "net/http" ) - - var taskDailyItems = []model.TaskDailyItem{ { Date: "2019/08/19", diff --git a/backend/mock/system.go b/backend/mock/system.go index c4807247..f33e02ba 100644 --- a/backend/mock/system.go +++ b/backend/mock/system.go @@ -1 +1 @@ -package mock \ No newline at end of file +package mock diff --git a/backend/mock/user.go b/backend/mock/user.go index c4807247..f33e02ba 100644 --- a/backend/mock/user.go +++ b/backend/mock/user.go @@ -1 +1 @@ -package mock \ No newline at end of file +package mock diff --git a/backend/model/config_spider/common.go b/backend/model/config_spider/common.go new file mode 100644 index 00000000..c803755a --- /dev/null +++ b/backend/model/config_spider/common.go @@ -0,0 +1,30 @@ +package config_spider + +import "crawlab/entity" + +func GetAllFields(data entity.ConfigSpiderData) []entity.Field { + var fields []entity.Field + for _, stage := range data.Stages { + for _, field := range stage.Fields { + fields = append(fields, field) + } + } + return fields +} + +func GetStartStageName(data entity.ConfigSpiderData) string { + // 如果 start_stage 设置了且在 stages 里,则返回 + if data.StartStage != "" { + for stageName := range data.Stages { + if stageName == data.StartStage { + return data.StartStage + } + } + } + + // 否则返回第一个 stage + for stageName := range data.Stages { + return stageName + } + return "" +} diff --git a/backend/model/config_spider/scrapy.go b/backend/model/config_spider/scrapy.go new file mode 100644 index 00000000..6fcb77f0 --- /dev/null +++ b/backend/model/config_spider/scrapy.go @@ -0,0 +1,258 @@ +package config_spider + +import ( + "crawlab/constants" + "crawlab/entity" + "crawlab/model" + "crawlab/utils" + "errors" + "fmt" + "path/filepath" +) + +type ScrapyGenerator struct { + Spider model.Spider + ConfigData entity.ConfigSpiderData +} + +// 生成爬虫文件 +func (g ScrapyGenerator) Generate() error { + // 生成 items.py + if err := g.ProcessItems(); err != nil { + return err + } + + // 生成 spider.py + if err := g.ProcessSpider(); err != nil { + return err + } + return nil +} + +// 生成 items.py +func (g ScrapyGenerator) ProcessItems() error { + // 待处理文件名 + src := g.Spider.Src + filePath := filepath.Join(src, "config_spider", "items.py") + + // 获取所有字段 + fields := g.GetAllFields() + + // 字段名列表(包含默认字段名) + fieldNames := []string{ + "_id", + "task_id", + "ts", + } + + // 加入字段 + for _, field := range fields { + fieldNames = append(fieldNames, field.Name) + } + + // 将字段名转化为python代码 + str := "" + for _, fieldName := range fieldNames { + line := g.PadCode(fmt.Sprintf("%s = scrapy.Field()", fieldName), 1) + str += line + } + + // 将占位符替换为代码 + if err := utils.SetFileVariable(filePath, constants.AnchorItems, str); err != nil { + return err + } + + return nil +} + +// 生成 spider.py +func (g ScrapyGenerator) ProcessSpider() error { + // 待处理文件名 + src := g.Spider.Src + filePath := filepath.Join(src, "config_spider", "spiders", "spider.py") + + // 替换 start_stage + if err := utils.SetFileVariable(filePath, constants.AnchorStartStage, "parse_"+GetStartStageName(g.ConfigData)); err != nil { + return err + } + + // 替换 start_url + if err := utils.SetFileVariable(filePath, constants.AnchorStartUrl, g.ConfigData.StartUrl); err != nil { + return err + } + + // 替换 parsers + strParser := "" + for stageName, stage := range g.ConfigData.Stages { + stageStr := g.GetParserString(stageName, stage) + strParser += stageStr + } + if err := utils.SetFileVariable(filePath, constants.AnchorParsers, strParser); err != nil { + return err + } + + return nil +} + +func (g ScrapyGenerator) GetParserString(stageName string, stage entity.Stage) string { + // 构造函数定义行 + strDef := g.PadCode(fmt.Sprintf("def parse_%s(self, response):", stageName), 1) + + strParse := "" + if stage.IsList { + // 列表逻辑 + strParse = g.GetListParserString(stageName, stage) + } else { + // 非列表逻辑 + strParse = g.GetNonListParserString(stageName, stage) + } + + // 构造 + str := fmt.Sprintf(`%s%s`, strDef, strParse) + + return str +} + +func (g ScrapyGenerator) PadCode(str string, num int) string { + res := "" + for i := 0; i < num; i++ { + res += " " + } + res += str + res += "\n" + return res +} + +func (g ScrapyGenerator) GetNonListParserString(stageName string, stage entity.Stage) string { + str := "" + + // 获取或构造item + str += g.PadCode("item = Item() if response.meta.get('item') is None else response.meta.get('item')", 2) + + // 遍历字段列表 + for _, f := range stage.Fields { + line := fmt.Sprintf(`item['%s'] = response.%s.extract_first()`, f.Name, g.GetExtractStringFromField(f)) + line = g.PadCode(line, 2) + str += line + } + + // next stage 字段 + if f, err := g.GetNextStageField(stage); err == nil { + // 如果找到 next stage 字段,进行下一个回调 + str += g.PadCode(fmt.Sprintf(`yield scrapy.Request(url="get_real_url(response, item['%s'])", callback=self.parse_%s, meta={'item': item})`, f.Name, f.NextStage), 2) + } else { + // 如果没找到 next stage 字段,返回 item + str += g.PadCode(fmt.Sprintf(`yield item`), 2) + } + + // 加入末尾换行 + str += g.PadCode("", 0) + + return str +} + +func (g ScrapyGenerator) GetListParserString(stageName string, stage entity.Stage) string { + str := "" + + // 获取前一个 stage 的 item + str += g.PadCode(`prev_item = response.meta.get('item')`, 2) + + // for 循环遍历列表 + str += g.PadCode(fmt.Sprintf(`for elem in response.%s:`, g.GetListString(stage)), 2) + + // 构造item + str += g.PadCode(`item = Item()`, 3) + + // 遍历字段列表 + for _, f := range stage.Fields { + line := fmt.Sprintf(`item['%s'] = elem.%s.extract_first()`, f.Name, g.GetExtractStringFromField(f)) + line = g.PadCode(line, 3) + str += line + } + + // 把前一个 stage 的 item 值赋给当前 item + str += g.PadCode(`if prev_item is not None:`, 3) + str += g.PadCode(`for key, value in prev_item.items():`, 4) + str += g.PadCode(`item[key] = value`, 5) + + // next stage 字段 + if f, err := g.GetNextStageField(stage); err == nil { + // 如果找到 next stage 字段,进行下一个回调 + str += g.PadCode(fmt.Sprintf(`yield scrapy.Request(url=get_real_url(response, item['%s']), callback=self.parse_%s, meta={'item': item})`, f.Name, f.NextStage), 3) + } else { + // 如果没找到 next stage 字段,返回 item + str += g.PadCode(fmt.Sprintf(`yield item`), 3) + } + + // 分页 + if stage.PageCss != "" || stage.PageXpath != "" { + str += g.PadCode(fmt.Sprintf(`next_url = response.%s.extract_first()`, g.GetExtractStringFromStage(stage)), 2) + str += g.PadCode(fmt.Sprintf(`yield scrapy.Request(url=get_real_url(response, next_url), callback=self.parse_%s, meta={'item': prev_item})`, stageName), 2) + } + + // 加入末尾换行 + str += g.PadCode("", 0) + + return str +} + +// 获取所有字段 +func (g ScrapyGenerator) GetAllFields() []entity.Field { + return GetAllFields(g.ConfigData) +} + +// 获取包含 next stage 的字段 +func (g ScrapyGenerator) GetNextStageField(stage entity.Stage) (entity.Field, error) { + for _, field := range stage.Fields { + if field.NextStage != "" { + return field, nil + } + } + return entity.Field{}, errors.New("cannot find next stage field") +} + +func (g ScrapyGenerator) GetExtractStringFromField(f entity.Field) string { + if f.Css != "" { + // 如果为CSS + if f.Attr == "" { + // 文本 + return fmt.Sprintf(`css('%s::text')`, f.Css) + } else { + // 属性 + return fmt.Sprintf(`css('%s::attr("%s")')`, f.Css, f.Attr) + } + } else { + // 如果为XPath + if f.Attr == "" { + // 文本 + return fmt.Sprintf(`xpath('string(%s)')`, f.Xpath) + } else { + // 属性 + return fmt.Sprintf(`xpath('%s/@%s')`, f.Xpath, f.Attr) + } + } +} + +func (g ScrapyGenerator) GetExtractStringFromStage(stage entity.Stage) string { + // 分页元素属性,默认为 href + pageAttr := "href" + if stage.PageAttr != "" { + pageAttr = stage.PageAttr + } + + if stage.PageCss != "" { + // 如果为CSS + return fmt.Sprintf(`css('%s::attr("%s")')`, stage.PageCss, pageAttr) + } else { + // 如果为XPath + return fmt.Sprintf(`xpath('%s/@%s')`, stage.PageXpath, pageAttr) + } +} + +func (g ScrapyGenerator) GetListString(stage entity.Stage) string { + if stage.ListCss != "" { + return fmt.Sprintf(`css('%s')`, stage.ListCss) + } else { + return fmt.Sprintf(`xpath('%s')`, stage.ListXpath) + } +} diff --git a/backend/model/spider.go b/backend/model/spider.go index 5c2c92e8..a0d72c1c 100644 --- a/backend/model/spider.go +++ b/backend/model/spider.go @@ -1,11 +1,17 @@ package model import ( + "crawlab/constants" "crawlab/database" "crawlab/entity" + "crawlab/utils" + "errors" "github.com/apex/log" "github.com/globalsign/mgo" "github.com/globalsign/mgo/bson" + "gopkg.in/yaml.v2" + "io/ioutil" + "path/filepath" "runtime/debug" "time" ) @@ -25,25 +31,20 @@ type Spider struct { Site string `json:"site" bson:"site"` // 爬虫网站 Envs []Env `json:"envs" bson:"envs"` // 环境变量 Remark string `json:"remark" bson:"remark"` // 备注 + Src string `json:"src" bson:"src"` // 源码位置 + // 自定义爬虫 - Src string `json:"src" bson:"src"` // 源码位置 Cmd string `json:"cmd" bson:"cmd"` // 执行命令 + // 可配置爬虫 + Template string `json:"template" bson:"template"` // Spiderfile模版 + // 前端展示 - LastRunTs time.Time `json:"last_run_ts"` // 最后一次执行时间 - LastStatus string `json:"last_status"` // 最后执行状态 - - // TODO: 可配置爬虫 - //Fields []interface{} `json:"fields"` - //DetailFields []interface{} `json:"detail_fields"` - //CrawlType string `json:"crawl_type"` - //StartUrl string `json:"start_url"` - //UrlPattern string `json:"url_pattern"` - //ItemSelector string `json:"item_selector"` - //ItemSelectorType string `json:"item_selector_type"` - //PaginationSelector string `json:"pagination_selector"` - //PaginationSelectorType string `json:"pagination_selector_type"` + LastRunTs time.Time `json:"last_run_ts"` // 最后一次执行时间 + LastStatus string `json:"last_status"` // 最后执行状态 + Config entity.ConfigSpiderData `json:"config"` // 可配置爬虫配置 + // 时间 CreateTs time.Time `json:"create_ts" bson:"create_ts"` UpdateTs time.Time `json:"update_ts" bson:"update_ts"` } @@ -98,13 +99,14 @@ func (spider *Spider) GetLastTask() (Task, error) { return tasks[0], nil } +// 删除爬虫 func (spider *Spider) Delete() error { s, c := database.GetCol("spiders") defer s.Close() return c.RemoveId(spider.Id) } -// 爬虫列表 +// 获取爬虫列表 func GetSpiderList(filter interface{}, skip int, limit int) ([]Spider, int, error) { s, c := database.GetCol("spiders") defer s.Close() @@ -116,6 +118,10 @@ func GetSpiderList(filter interface{}, skip int, limit int) ([]Spider, int, erro return spiders, 0, err } + if spiders == nil { + spiders = []Spider{} + } + // 遍历爬虫列表 for i, spider := range spiders { // 获取最后一次任务 @@ -136,7 +142,7 @@ func GetSpiderList(filter interface{}, skip int, limit int) ([]Spider, int, erro return spiders, count, nil } -// 获取爬虫 +// 获取爬虫(根据FileId) func GetSpiderByFileId(fileId bson.ObjectId) *Spider { s, c := database.GetCol("spiders") defer s.Close() @@ -150,7 +156,7 @@ func GetSpiderByFileId(fileId bson.ObjectId) *Spider { return result } -// 获取爬虫 +// 获取爬虫(根据名称) func GetSpiderByName(name string) *Spider { s, c := database.GetCol("spiders") defer s.Close() @@ -158,26 +164,36 @@ func GetSpiderByName(name string) *Spider { var result *Spider if err := c.Find(bson.M{"name": name}).One(&result); err != nil { log.Errorf("get spider error: %s, spider_name: %s", err.Error(), name) - debug.PrintStack() + //debug.PrintStack() return nil } return result } -// 获取爬虫 +// 获取爬虫(根据ID) func GetSpider(id bson.ObjectId) (Spider, error) { s, c := database.GetCol("spiders") defer s.Close() - var result Spider - if err := c.FindId(id).One(&result); err != nil { + // 获取爬虫 + var spider Spider + if err := c.FindId(id).One(&spider); err != nil { if err != mgo.ErrNotFound { log.Errorf("get spider error: %s, id: %id", err.Error(), id.Hex()) debug.PrintStack() } - return result, err + return spider, err } - return result, nil + + // 如果为可配置爬虫,获取爬虫配置 + if spider.Type == constants.Configurable && utils.Exists(filepath.Join(spider.Src, "Spiderfile")) { + config, err := GetConfigSpiderData(spider) + if err != nil { + return spider, err + } + spider.Config = config + } + return spider, nil } // 更新爬虫 @@ -217,10 +233,12 @@ func RemoveSpider(id bson.ObjectId) error { s, gf := database.GetGridFs("files") defer s.Close() - if err := gf.RemoveId(result.FileId); err != nil { - log.Error("remove file error, id:" + result.FileId.Hex()) - debug.PrintStack() - return err + if result.FileId.Hex() != constants.ObjectIdNull { + if err := gf.RemoveId(result.FileId); err != nil { + log.Error("remove file error, id:" + result.FileId.Hex()) + debug.PrintStack() + return err + } } return nil @@ -245,7 +263,7 @@ func RemoveAllSpider() error { return nil } -// 爬虫总数 +// 获取爬虫总数 func GetSpiderCount() (int, error) { s, c := database.GetCol("spiders") defer s.Close() @@ -257,7 +275,7 @@ func GetSpiderCount() (int, error) { return count, nil } -// 爬虫类型 +// 获取爬虫类型 func GetSpiderTypes() ([]*entity.SpiderType, error) { s, c := database.GetCol("spiders") defer s.Close() @@ -277,3 +295,35 @@ func GetSpiderTypes() ([]*entity.SpiderType, error) { return types, nil } + +func GetConfigSpiderData(spider Spider) (entity.ConfigSpiderData, error) { + // 构造配置数据 + configData := entity.ConfigSpiderData{} + + // 校验爬虫类别 + if spider.Type != constants.Configurable { + return configData, errors.New("not a configurable spider") + } + + // Spiderfile 目录 + sfPath := filepath.Join(spider.Src, "Spiderfile") + + // 读取YAML文件 + yamlFile, err := ioutil.ReadFile(sfPath) + if err != nil { + return configData, err + } + + // 反序列化 + if err := yaml.Unmarshal(yamlFile, &configData); err != nil { + return configData, err + } + + // 赋值 stage_name + for stageName, stage := range configData.Stages { + stage.Name = stageName + configData.Stages[stageName] = stage + } + + return configData, nil +} diff --git a/backend/routes/config_spider.go b/backend/routes/config_spider.go new file mode 100644 index 00000000..e387935a --- /dev/null +++ b/backend/routes/config_spider.go @@ -0,0 +1,316 @@ +package routes + +import ( + "crawlab/constants" + "crawlab/entity" + "crawlab/model" + "crawlab/services" + "crawlab/utils" + "fmt" + "github.com/gin-gonic/gin" + "github.com/globalsign/mgo/bson" + "github.com/spf13/viper" + "gopkg.in/yaml.v2" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strings" +) + +// 添加可配置爬虫 +func PutConfigSpider(c *gin.Context) { + var spider model.Spider + if err := c.ShouldBindJSON(&spider); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + + // 爬虫名称不能为空 + if spider.Name == "" { + HandleErrorF(http.StatusBadRequest, c, "spider name should not be empty") + return + } + + // 模版名不能为空 + if spider.Template == "" { + HandleErrorF(http.StatusBadRequest, c, "spider template should not be empty") + return + } + + // 判断爬虫是否存在 + if spider := model.GetSpiderByName(spider.Name); spider != nil { + HandleErrorF(http.StatusBadRequest, c, fmt.Sprintf("spider for '%s' already exists", spider.Name)) + return + } + + // 设置爬虫类别 + spider.Type = constants.Configurable + + // 将FileId置空 + spider.FileId = bson.ObjectIdHex(constants.ObjectIdNull) + + // 创建爬虫目录 + spiderDir := filepath.Join(viper.GetString("spider.path"), spider.Name) + if utils.Exists(spiderDir) { + if err := os.RemoveAll(spiderDir); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + } + if err := os.MkdirAll(spiderDir, 0777); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + spider.Src = spiderDir + + // 复制Spiderfile模版 + contentByte, err := ioutil.ReadFile("./template/spiderfile/Spiderfile." + spider.Template) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + f, err := os.Create(filepath.Join(spider.Src, "Spiderfile")) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + defer f.Close() + if _, err := f.Write(contentByte); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 添加爬虫到数据库 + if err := spider.Add(); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: spider, + }) +} + +// 更改可配置爬虫 +func PostConfigSpider(c *gin.Context) { + PostSpider(c) +} + +// 上传可配置爬虫Spiderfile +func UploadConfigSpider(c *gin.Context) { + id := c.Param("id") + + // 获取爬虫 + var spider model.Spider + spider, err := model.GetSpider(bson.ObjectIdHex(id)) + if err != nil { + HandleErrorF(http.StatusBadRequest, c, fmt.Sprintf("cannot find spider (id: %s)", id)) + } + + // 获取上传文件 + file, header, err := c.Request.FormFile("file") + if err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + + // 文件名称必须为Spiderfile + filename := header.Filename + if filename != "Spiderfile" && filename != "Spiderfile.yaml" && filename != "Spiderfile.yml" { + HandleErrorF(http.StatusBadRequest, c, "filename must be 'Spiderfile(.yaml|.yml)'") + return + } + + // 爬虫目录 + spiderDir := filepath.Join(viper.GetString("spider.path"), spider.Name) + + // 爬虫Spiderfile文件路径 + sfPath := filepath.Join(spiderDir, filename) + + // 创建(如果不存在)或打开Spiderfile(如果存在) + var f *os.File + if utils.Exists(sfPath) { + f, err = os.OpenFile(sfPath, os.O_WRONLY, 0777) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + } + } else { + f, err = os.Create(sfPath) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + } + } + + // 将上传的文件拷贝到爬虫Spiderfile文件 + _, err = io.Copy(f, file) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 关闭Spiderfile文件 + _ = f.Close() + + // 构造配置数据 + configData := entity.ConfigSpiderData{} + + // 读取YAML文件 + yamlFile, err := ioutil.ReadFile(sfPath) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 反序列化 + if err := yaml.Unmarshal(yamlFile, &configData); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 根据序列化后的数据处理爬虫文件 + if err := services.ProcessSpiderFilesFromConfigData(spider, configData); err != nil { + HandleError(http.StatusInternalServerError, c, err) + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func PostConfigSpiderSpiderfile(c *gin.Context) { + type Body struct { + Content string `json:"content"` + } + + id := c.Param("id") + + // 文件内容 + var reqBody Body + if err := c.ShouldBindJSON(&reqBody); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + content := reqBody.Content + + // 获取爬虫 + var spider model.Spider + spider, err := model.GetSpider(bson.ObjectIdHex(id)) + if err != nil { + HandleErrorF(http.StatusBadRequest, c, fmt.Sprintf("cannot find spider (id: %s)", id)) + return + } + + // 反序列化 + var configData entity.ConfigSpiderData + if err := yaml.Unmarshal([]byte(content), &configData); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + + // 校验configData + if err := services.ValidateSpiderfile(configData); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 写文件 + if err := ioutil.WriteFile(filepath.Join(spider.Src, "Spiderfile"), []byte(content), os.ModePerm); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 根据序列化后的数据处理爬虫文件 + if err := services.ProcessSpiderFilesFromConfigData(spider, configData); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func PostConfigSpiderConfig(c *gin.Context) { + id := c.Param("id") + + // 获取爬虫 + var spider model.Spider + spider, err := model.GetSpider(bson.ObjectIdHex(id)) + if err != nil { + HandleErrorF(http.StatusBadRequest, c, fmt.Sprintf("cannot find spider (id: %s)", id)) + return + } + + // 反序列化配置数据 + var configData entity.ConfigSpiderData + if err := c.ShouldBindJSON(&configData); err != nil { + HandleError(http.StatusBadRequest, c, err) + return + } + + // 校验configData + if err := services.ValidateSpiderfile(configData); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 替换Spiderfile文件 + if err := services.GenerateSpiderfileFromConfigData(spider, configData); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + // 根据序列化后的数据处理爬虫文件 + if err := services.ProcessSpiderFilesFromConfigData(spider, configData); err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + }) +} + +func GetConfigSpiderConfig(c *gin.Context) { + id := c.Param("id") + + // 校验ID + if !bson.IsObjectIdHex(id) { + HandleErrorF(http.StatusBadRequest, c, "invalid id") + } + + // 获取爬虫 + spider, err := model.GetSpider(bson.ObjectIdHex(id)) + if err != nil { + HandleError(http.StatusInternalServerError, c, err) + return + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: spider.Config, + }) +} + +// 获取模版名称列表 +func GetConfigSpiderTemplateList(c *gin.Context) { + var data []string + for _, fInfo := range utils.ListDir("./template/spiderfile") { + templateName := strings.Replace(fInfo.Name(), "Spiderfile.", "", -1) + data = append(data, templateName) + } + + c.JSON(http.StatusOK, Response{ + Status: "ok", + Message: "success", + Data: data, + }) +} diff --git a/backend/routes/spider.go b/backend/routes/spider.go index 4c26fcee..588811e3 100644 --- a/backend/routes/spider.go +++ b/backend/routes/spider.go @@ -34,7 +34,7 @@ func GetSpiderList(c *gin.Context) { "name": bson.M{"$regex": bson.RegEx{Pattern: keyword, Options: "im"}}, } - if t != "" { + if t != "" && t != "all" { filter["type"] = t } @@ -153,6 +153,7 @@ func PutSpider(c *gin.Context) { return } + // 获取 GridFS 实例 s, gf := database.GetGridFs("files") defer s.Close() diff --git a/backend/routes/task.go b/backend/routes/task.go index c84ea210..9c0aa43f 100644 --- a/backend/routes/task.go +++ b/backend/routes/task.go @@ -36,7 +36,7 @@ func GetTaskList(c *gin.Context) { data.PageNum = 1 } if data.PageSize == 0 { - data.PageNum = 10 + data.PageSize = 10 } // 过滤条件 diff --git a/backend/services/config_spider.go b/backend/services/config_spider.go new file mode 100644 index 00000000..7c736cc7 --- /dev/null +++ b/backend/services/config_spider.go @@ -0,0 +1,265 @@ +package services + +import ( + "crawlab/constants" + "crawlab/database" + "crawlab/entity" + "crawlab/model" + "crawlab/model/config_spider" + "crawlab/utils" + "errors" + "fmt" + "github.com/apex/log" + "github.com/globalsign/mgo/bson" + uuid "github.com/satori/go.uuid" + "github.com/spf13/viper" + "gopkg.in/yaml.v2" + "os" + "path/filepath" + "strings" +) + +func GenerateConfigSpiderFiles(spider model.Spider, configData entity.ConfigSpiderData) error { + // 校验Spiderfile正确性 + if err := ValidateSpiderfile(configData); err != nil { + return err + } + + // 构造代码生成器 + generator := config_spider.ScrapyGenerator{ + Spider: spider, + ConfigData: configData, + } + + // 生成代码 + if err := generator.Generate(); err != nil { + return err + } + + return nil +} + +// 验证Spiderfile +func ValidateSpiderfile(configData entity.ConfigSpiderData) error { + // 获取所有字段 + fields := config_spider.GetAllFields(configData) + + // 校验是否存在 start_url + if configData.StartUrl == "" { + return errors.New("spiderfile invalid: start_url is empty") + } + + // 校验是否存在 start_stage + if configData.StartStage == "" { + return errors.New("spiderfile invalid: start_stage is empty") + } + + // 校验是否存在 stages + if len(configData.Stages) == 0 { + return errors.New("spiderfile invalid: stages is empty") + } + + // 校验stages + dict := map[string]int{} + for stageName, stage := range configData.Stages { + // stage 名称不能为空 + if stageName == "" { + return errors.New("spiderfile invalid: stage name is empty") + } + + // stage 名称不能为保留字符串 + // NOTE: 如果有其他Engine,可以扩展,默认为Scrapy + if configData.Engine == "" || configData.Engine == constants.EngineScrapy { + if strings.Contains(constants.ScrapyProtectedStageNames, stageName) { + return errors.New(fmt.Sprintf("spiderfile invalid: stage name '%s' is protected", stageName)) + } + } else { + return errors.New(fmt.Sprintf("spiderfile invalid: engine '%s' is not implemented", configData.Engine)) + } + + // stage 名称不能重复 + if dict[stageName] == 1 { + return errors.New(fmt.Sprintf("spiderfile invalid: stage name '%s' is duplicated", stageName)) + } + dict[stageName] = 1 + + // stage 字段不能为空 + if len(stage.Fields) == 0 { + return errors.New(fmt.Sprintf("spiderfile invalid: stage '%s' has no fields", stageName)) + } + + // 是否包含 next_stage + hasNextStage := false + + // 遍历字段列表 + for _, field := range stage.Fields { + // stage 的 next stage 只能有一个 + if field.NextStage != "" { + if hasNextStage { + return errors.New(fmt.Sprintf("spiderfile invalid: stage '%s' has more than 1 next_stage", stageName)) + } + hasNextStage = true + } + + // 字段里 css 和 xpath 只能包含一个 + if field.Css != "" && field.Xpath != "" { + return errors.New(fmt.Sprintf("spiderfile invalid: field '%s' in stage '%s' has both css and xpath set which is prohibited", field.Name, stageName)) + } + } + + // stage 里 page_css 和 page_xpath 只能包含一个 + if stage.PageCss != "" && stage.PageXpath != "" { + return errors.New(fmt.Sprintf("spiderfile invalid: stage '%s' has both page_css and page_xpath set which is prohibited", stageName)) + } + + // stage 里 list_css 和 list_xpath 只能包含一个 + if stage.ListCss != "" && stage.ListXpath != "" { + return errors.New(fmt.Sprintf("spiderfile invalid: stage '%s' has both list_css and list_xpath set which is prohibited", stageName)) + } + + // 如果 stage 的 is_list 为 true 但 list_css 为空,报错 + if stage.IsList && (stage.ListCss == "" && stage.ListXpath == "") { + return errors.New("spiderfile invalid: stage with is_list = true should have either list_css or list_xpath being set") + } + } + + // 校验字段唯一性 + if !IsUniqueConfigSpiderFields(fields) { + return errors.New("spiderfile invalid: fields not unique") + } + + // 字段名称不能为保留字符串 + for _, field := range fields { + if strings.Contains(constants.ScrapyProtectedFieldNames, field.Name) { + return errors.New(fmt.Sprintf("spiderfile invalid: field name '%s' is protected", field.Name)) + } + } + + return nil +} + +func IsUniqueConfigSpiderFields(fields []entity.Field) bool { + dict := map[string]int{} + for _, field := range fields { + if dict[field.Name] == 1 { + return false + } + dict[field.Name] = 1 + } + return true +} + +func ProcessSpiderFilesFromConfigData(spider model.Spider, configData entity.ConfigSpiderData) error { + spiderDir := spider.Src + + // 赋值 stage_name + for stageName, stage := range configData.Stages { + stage.Name = stageName + configData.Stages[stageName] = stage + } + + // 删除已有的爬虫文件 + for _, fInfo := range utils.ListDir(spiderDir) { + // 不删除Spiderfile + if fInfo.Name() == "Spiderfile" { + continue + } + + // 删除其他文件 + if err := os.RemoveAll(filepath.Join(spiderDir, fInfo.Name())); err != nil { + return err + } + } + + // 拷贝爬虫文件 + tplDir := "./template/scrapy" + for _, fInfo := range utils.ListDir(tplDir) { + // 跳过Spiderfile + if fInfo.Name() == "Spiderfile" { + continue + } + + srcPath := filepath.Join(tplDir, fInfo.Name()) + if fInfo.IsDir() { + dirPath := filepath.Join(spiderDir, fInfo.Name()) + if err := utils.CopyDir(srcPath, dirPath); err != nil { + return err + } + } else { + if err := utils.CopyFile(srcPath, filepath.Join(spiderDir, fInfo.Name())); err != nil { + return err + } + } + } + + // 更改爬虫文件 + if err := GenerateConfigSpiderFiles(spider, configData); err != nil { + return err + } + + // 打包为 zip 文件 + files, err := utils.GetFilesFromDir(spiderDir) + if err != nil { + return err + } + randomId := uuid.NewV4() + tmpFilePath := filepath.Join(viper.GetString("other.tmppath"), spider.Name+"."+randomId.String()+".zip") + spiderZipFileName := spider.Name + ".zip" + if err := utils.Compress(files, tmpFilePath); err != nil { + return err + } + + // 获取 GridFS 实例 + s, gf := database.GetGridFs("files") + defer s.Close() + + // 判断文件是否已经存在 + var gfFile model.GridFs + if err := gf.Find(bson.M{"filename": spiderZipFileName}).One(&gfFile); err == nil { + // 已经存在文件,则删除 + _ = gf.RemoveId(gfFile.Id) + } + + // 上传到GridFs + fid, err := UploadToGridFs(spiderZipFileName, tmpFilePath) + if err != nil { + log.Errorf("upload to grid fs error: %s", err.Error()) + return err + } + + // 保存爬虫 FileId + spider.FileId = fid + _ = spider.Save() + + return nil +} + +func GenerateSpiderfileFromConfigData(spider model.Spider, configData entity.ConfigSpiderData) error { + // Spiderfile 路径 + sfPath := filepath.Join(spider.Src, "Spiderfile") + + // 生成Yaml内容 + sfContentByte, err := yaml.Marshal(configData) + if err != nil { + return err + } + + // 打开文件 + var f *os.File + if utils.Exists(sfPath) { + f, err = os.OpenFile(sfPath, os.O_WRONLY|os.O_TRUNC, 0777) + } else { + f, err = os.OpenFile(sfPath, os.O_CREATE, 0777) + } + if err != nil { + return err + } + defer f.Close() + + // 写入内容 + if _, err := f.Write(sfContentByte); err != nil { + return err + } + + return nil +} diff --git a/backend/services/node.go b/backend/services/node.go index dffe5ac9..be916f10 100644 --- a/backend/services/node.go +++ b/backend/services/node.go @@ -258,7 +258,7 @@ func InitNodeService() error { return err } - // 如果为主节点,每30秒刷新所有节点信息 + // 如果为主节点,每10秒刷新所有节点信息 if model.IsMaster() { spec := "*/10 * * * * *" if _, err := c.AddFunc(spec, UpdateNodeStatus); err != nil { diff --git a/backend/services/register/register.go b/backend/services/register/register.go index ccd8b67d..ed4e1891 100644 --- a/backend/services/register/register.go +++ b/backend/services/register/register.go @@ -6,6 +6,7 @@ import ( "net" "reflect" "runtime/debug" + "sync" ) type Register interface { @@ -97,25 +98,31 @@ func getMac() (string, error) { var register Register // 获得注册器 -func GetRegister() Register { - if register != nil { - return register - } +var once sync.Once - registerType := viper.GetString("server.register.type") - if registerType == "mac" { - register = &MacRegister{} - } else { - ip := viper.GetString("server.register.ip") - if ip == "" { - log.Error("server.register.ip is empty") - debug.PrintStack() - return nil +func GetRegister() Register { + once.Do(func() { + + if register != nil { + register = register } - register = &IpRegister{ - Ip: ip, + + registerType := viper.GetString("server.register.type") + if registerType == "mac" { + register = &MacRegister{} + } else { + ip := viper.GetString("server.register.ip") + if ip == "" { + log.Error("server.register.ip is empty") + debug.PrintStack() + register = nil + } + register = &IpRegister{ + Ip: ip, + } } - } - log.Info("register type is :" + reflect.TypeOf(register).String()) + log.Info("register type is :" + reflect.TypeOf(register).String()) + + }) return register } diff --git a/backend/services/spider.go b/backend/services/spider.go index 84d218bb..3922d822 100644 --- a/backend/services/spider.go +++ b/backend/services/spider.go @@ -116,12 +116,23 @@ func PublishAllSpiders() { // 发布爬虫 func PublishSpider(spider model.Spider) { - // 查询gf file,不存在则删除 - gfFile := model.GetGridFs(spider.FileId) - if gfFile == nil { - _ = model.RemoveSpider(spider.Id) + var gfFile *model.GridFs + if spider.FileId.Hex() != constants.ObjectIdNull { + // 查询gf file,不存在则标记为爬虫文件不存在 + gfFile = model.GetGridFs(spider.FileId) + if gfFile == nil { + spider.FileId = constants.ObjectIdNull + _ = spider.Save() + return + } + } + + // 如果FileId为空,表示还没有上传爬虫到GridFS,则跳过 + if spider.FileId == bson.ObjectIdHex(constants.ObjectIdNull) { return } + + // 获取爬虫同步实例 spiderSync := spider_handler.SpiderSync{ Spider: spider, } diff --git a/backend/services/spider_handler/spider.go b/backend/services/spider_handler/spider.go index cce025dc..c3a2500d 100644 --- a/backend/services/spider_handler/spider.go +++ b/backend/services/spider_handler/spider.go @@ -10,6 +10,7 @@ import ( "github.com/spf13/viper" "io" "os" + "os/exec" "path/filepath" "runtime/debug" ) @@ -99,7 +100,6 @@ func (s *SpiderSync) Download() { // 创建临时文件 tmpFilePath := filepath.Join(tmpPath, randomId.String()+".zip") tmpFile := utils.OpenFile(tmpFilePath) - defer utils.Close(tmpFile) // 将该文件写入临时文件 if _, err := io.Copy(tmpFile, f); err != nil { @@ -119,6 +119,15 @@ func (s *SpiderSync) Download() { return } + //递归修改目标文件夹权限 + // 解决scrapy.setting中开启LOG_ENABLED 和 LOG_FILE时不能创建log文件的问题 + cmd := exec.Command("chmod", "-R", "777", dstPath) + if err := cmd.Run(); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return + } + // 关闭临时文件 if err := tmpFile.Close(); err != nil { log.Errorf(err.Error()) diff --git a/backend/services/task.go b/backend/services/task.go index 9e584e82..0339118a 100644 --- a/backend/services/task.go +++ b/backend/services/task.go @@ -217,7 +217,22 @@ func ExecuteShellCmd(cmdStr string, cwd string, t model.Task, s model.Spider) (e } // 环境变量配置 - cmd = SetEnv(cmd, s.Envs, t.Id, s.Col) + envs := s.Envs + if s.Type == constants.Configurable { + // 数据库配置 + envs = append(envs, model.Env{Name: "CRAWLAB_MONGO_HOST", Value: viper.GetString("mongo.host")}) + envs = append(envs, model.Env{Name: "CRAWLAB_MONGO_PORT", Value: viper.GetString("mongo.port")}) + envs = append(envs, model.Env{Name: "CRAWLAB_MONGO_DB", Value: viper.GetString("mongo.db")}) + envs = append(envs, model.Env{Name: "CRAWLAB_MONGO_USERNAME", Value: viper.GetString("mongo.username")}) + envs = append(envs, model.Env{Name: "CRAWLAB_MONGO_PASSWORD", Value: viper.GetString("mongo.password")}) + envs = append(envs, model.Env{Name: "CRAWLAB_MONGO_AUTHSOURCE", Value: viper.GetString("mongo.authSource")}) + + // 设置配置 + for envName, envValue := range s.Config.Settings { + envs = append(envs, model.Env{Name: "CRAWLAB_SETTING_" + envName, Value: envValue}) + } + } + cmd = SetEnv(cmd, envs, t.Id, s.Col) // 起一个goroutine来监控进程 ch := utils.TaskExecChanMap.ChanBlocked(t.Id) @@ -293,9 +308,12 @@ func SaveTaskResultCount(id string) func() { // 执行任务 func ExecuteTask(id int) { - if flag, _ := LockList.Load(id); flag.(bool) { - log.Debugf(GetWorkerPrefix(id) + "正在执行任务...") - return + if flag, ok := LockList.Load(id); ok { + if flag.(bool) { + log.Debugf(GetWorkerPrefix(id) + "正在执行任务...") + return + } + } // 上锁 @@ -369,7 +387,14 @@ func ExecuteTask(id int) { ) // 执行命令 - cmd := spider.Cmd + var cmd string + if spider.Type == constants.Configurable { + // 可配置爬虫命令 + cmd = "scrapy crawl config_spider" + } else { + // 自定义爬虫命令 + cmd = spider.Cmd + } // 加入参数 if t.Param != "" { @@ -452,6 +477,29 @@ func GetTaskLog(id string) (logStr string, err error) { } if IsMasterNode(task.NodeId.Hex()) { + if !utils.Exists(task.LogPath) { + fileDir, err := MakeLogDir(task) + + if err != nil { + log.Errorf(err.Error()) + } + + fileP := GetLogFilePaths(fileDir) + + // 获取日志文件路径 + fLog, err := os.Create(fileP) + defer fLog.Close() + if err != nil { + log.Errorf("create task log file error: %s", fileP) + debug.PrintStack() + } + task.LogPath = fileP + if err := task.Save(); err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + } + + } // 若为主节点,获取本机日志 logBytes, err := model.GetLocalLog(task.LogPath) if err != nil { diff --git a/backend/template/scrapy/config_spider/__init__.py b/backend/template/scrapy/config_spider/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/backend/template/scrapy/config_spider/items.py b/backend/template/scrapy/config_spider/items.py new file mode 100644 index 00000000..16681a52 --- /dev/null +++ b/backend/template/scrapy/config_spider/items.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class Item(scrapy.Item): +###ITEMS### diff --git a/backend/template/scrapy/config_spider/middlewares.py b/backend/template/scrapy/config_spider/middlewares.py new file mode 100644 index 00000000..e864bd0b --- /dev/null +++ b/backend/template/scrapy/config_spider/middlewares.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + + +class ConfigSpiderSpiderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, dict or Item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request, dict + # or Item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class ConfigSpiderDownloaderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/backend/template/scrapy/config_spider/pipelines.py b/backend/template/scrapy/config_spider/pipelines.py new file mode 100644 index 00000000..69af4c85 --- /dev/null +++ b/backend/template/scrapy/config_spider/pipelines.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- + +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + +import os +from pymongo import MongoClient + +mongo = MongoClient( + host=os.environ.get('CRAWLAB_MONGO_HOST') or 'localhost', + port=int(os.environ.get('CRAWLAB_MONGO_PORT') or 27017), + username=os.environ.get('CRAWLAB_MONGO_USERNAME'), + password=os.environ.get('CRAWLAB_MONGO_PASSWORD'), + authSource=os.environ.get('CRAWLAB_MONGO_AUTHSOURCE') or 'admin' +) +db = mongo[os.environ.get('CRAWLAB_MONGO_DB') or 'test'] +col = db[os.environ.get('CRAWLAB_COLLECTION') or 'test'] +task_id = os.environ.get('CRAWLAB_TASK_ID') + +class ConfigSpiderPipeline(object): + def process_item(self, item, spider): + item['task_id'] = task_id + if col is not None: + col.save(item) + return item diff --git a/backend/template/scrapy/config_spider/settings.py b/backend/template/scrapy/config_spider/settings.py new file mode 100644 index 00000000..4b0965f2 --- /dev/null +++ b/backend/template/scrapy/config_spider/settings.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- +import os +import re +import json + +# Scrapy settings for config_spider project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'Crawlab Configurable Spider' + +SPIDER_MODULES = ['config_spider.spiders'] +NEWSPIDER_MODULE = 'config_spider.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +USER_AGENT = 'Crawlab Spider' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = True + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'config_spider.middlewares.ConfigSpiderSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'config_spider.middlewares.ConfigSpiderDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'config_spider.pipelines.ConfigSpiderPipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' + +for setting_env_name in [x for x in os.environ.keys() if x.startswith('CRAWLAB_SETTING_')]: + setting_name = setting_env_name.replace('CRAWLAB_SETTING_', '') + setting_value = os.environ.get(setting_env_name) + if setting_value.lower() == 'true': + setting_value = True + elif setting_value.lower() == 'false': + setting_value = False + elif re.search(r'^\d+$', setting_value) is not None: + setting_value = int(setting_value) + elif re.search(r'^\{.*\}$', setting_value.strip()) is not None: + setting_value = json.loads(setting_value) + elif re.search(r'^\[.*\]$', setting_value.strip()) is not None: + setting_value = json.loads(setting_value) + else: + pass + locals()[setting_name] = setting_value + diff --git a/backend/template/scrapy/config_spider/spiders/__init__.py b/backend/template/scrapy/config_spider/spiders/__init__.py new file mode 100644 index 00000000..ebd689ac --- /dev/null +++ b/backend/template/scrapy/config_spider/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/backend/template/scrapy/config_spider/spiders/spider.py b/backend/template/scrapy/config_spider/spiders/spider.py new file mode 100644 index 00000000..0e3c661d --- /dev/null +++ b/backend/template/scrapy/config_spider/spiders/spider.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +import scrapy +import re +from config_spider.items import Item +from urllib.parse import urljoin + +def get_real_url(response, url): + if re.search(r'^https?|^\/\/', url): + return url + return urljoin(response.url, url) + +class ConfigSpider(scrapy.Spider): + name = 'config_spider' + + def start_requests(self): + yield scrapy.Request(url='###START_URL###', callback=self.###START_STAGE###) + +###PARSERS### diff --git a/backend/template/scrapy/scrapy.cfg b/backend/template/scrapy/scrapy.cfg new file mode 100644 index 00000000..a78d91e3 --- /dev/null +++ b/backend/template/scrapy/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = config_spider.settings + +[deploy] +#url = http://localhost:6800/ +project = config_spider diff --git a/backend/template/spiderfile/Spiderfile.163_news b/backend/template/spiderfile/Spiderfile.163_news new file mode 100644 index 00000000..29d58279 --- /dev/null +++ b/backend/template/spiderfile/Spiderfile.163_news @@ -0,0 +1,20 @@ +version: "0.4.0" +name: "toscrapy_books" +start_url: "http://news.163.com/special/0001386F/rank_news.html" +start_stage: "list" +engine: "scrapy" +stages: + list: + is_list: true + list_css: "table tr:not(:first-child)" + fields: + - name: "title" + css: "td:nth-child(1) > a" + - name: "url" + css: "td:nth-child(1) > a" + attr: "href" + - name: "clicks" + css: "td.cBlue" +settings: + ROBOTSTXT_OBEY: false + USER_AGENT: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 diff --git a/backend/template/spiderfile/Spiderfile.baidu b/backend/template/spiderfile/Spiderfile.baidu new file mode 100644 index 00000000..fbf720e4 --- /dev/null +++ b/backend/template/spiderfile/Spiderfile.baidu @@ -0,0 +1,22 @@ +version: 0.4.0 +name: toscrapy_books +start_url: http://www.baidu.com/s?wd=crawlab +start_stage: list +engine: scrapy +stages: + list: + is_list: true + list_xpath: //*[contains(@class, "c-container")] + page_xpath: //*[@id="page"]//a[@class="n"][last()] + page_attr: href + fields: + - name: title + xpath: .//h3/a + - name: url + xpath: .//h3/a + attr: href + - name: abstract + xpath: .//*[@class="c-abstract"] +settings: + ROBOTSTXT_OBEY: false + USER_AGENT: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36 diff --git a/backend/template/spiderfile/Spiderfile.toscrapy_books b/backend/template/spiderfile/Spiderfile.toscrapy_books new file mode 100644 index 00000000..4bf18f61 --- /dev/null +++ b/backend/template/spiderfile/Spiderfile.toscrapy_books @@ -0,0 +1,28 @@ +version: "0.4.0" +name: "toscrapy_books" +start_url: "http://books.toscrape.com" +start_stage: "list" +engine: "scrapy" +stages: + list: + is_list: true + list_css: "section article.product_pod" + page_css: "ul.pager li.next a" + page_attr: "href" + fields: + - name: "title" + css: "h3 > a" + - name: "url" + css: "h3 > a" + attr: "href" + next_stage: "detail" + - name: "price" + css: ".product_price > .price_color" + detail: + is_list: false + fields: + - name: "description" + css: "#product_description + p" +settings: + ROBOTSTXT_OBEY: true + AUTOTHROTTLE_ENABLED: true diff --git a/backend/utils/file.go b/backend/utils/file.go index babc0d69..c71b2cb0 100644 --- a/backend/utils/file.go +++ b/backend/utils/file.go @@ -3,11 +3,15 @@ package utils import ( "archive/zip" "bufio" + "fmt" "github.com/apex/log" "io" + "io/ioutil" "os" + "path" "path/filepath" "runtime/debug" + "strings" ) // 删除文件 @@ -71,6 +75,16 @@ func IsDir(path string) bool { return s.IsDir() } +func ListDir(path string) []os.FileInfo { + list, err := ioutil.ReadDir(path) + if err != nil { + log.Errorf(err.Error()) + debug.PrintStack() + return nil + } + return list +} + // 判断所给路径是否为文件 func IsFile(path string) bool { return !IsDir(path) @@ -153,7 +167,6 @@ func DeCompress(srcFile *os.File, dstPath string) error { debug.PrintStack() continue } - defer Close(newFile) // 拷贝该文件到新文件中 if _, err := io.Copy(newFile, srcFile); err != nil { @@ -185,8 +198,7 @@ func Compress(files []*os.File, dest string) error { w := zip.NewWriter(d) defer Close(w) for _, file := range files { - err := _Compress(file, "", w) - if err != nil { + if err := _Compress(file, "", w); err != nil { return err } } @@ -239,3 +251,128 @@ func _Compress(file *os.File, prefix string, zw *zip.Writer) error { } return nil } + +func GetFilesFromDir(dirPath string) ([]*os.File, error) { + var res []*os.File + for _, fInfo := range ListDir(dirPath) { + f, err := os.Open(filepath.Join(dirPath, fInfo.Name())) + if err != nil { + return res, err + } + res = append(res, f) + } + return res, nil +} + +func GetAllFilesFromDir(dirPath string) ([]*os.File, error) { + var res []*os.File + if err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error { + if !IsDir(path) { + f, err2 := os.Open(path) + if err2 != nil { + return err + } + res = append(res, f) + } + return nil + }); err != nil { + log.Error(err.Error()) + debug.PrintStack() + return res, err + } + return res, nil +} + +// File copies a single file from src to dst +func CopyFile(src, dst string) error { + var err error + var srcFd *os.File + var dstFd *os.File + var srcInfo os.FileInfo + + if srcFd, err = os.Open(src); err != nil { + return err + } + defer srcFd.Close() + + if dstFd, err = os.Create(dst); err != nil { + return err + } + defer dstFd.Close() + + if _, err = io.Copy(dstFd, srcFd); err != nil { + return err + } + if srcInfo, err = os.Stat(src); err != nil { + return err + } + return os.Chmod(dst, srcInfo.Mode()) +} + +// Dir copies a whole directory recursively +func CopyDir(src string, dst string) error { + var err error + var fds []os.FileInfo + var srcInfo os.FileInfo + + if srcInfo, err = os.Stat(src); err != nil { + return err + } + + if err = os.MkdirAll(dst, srcInfo.Mode()); err != nil { + return err + } + + if fds, err = ioutil.ReadDir(src); err != nil { + return err + } + for _, fd := range fds { + srcfp := path.Join(src, fd.Name()) + dstfp := path.Join(dst, fd.Name()) + + if fd.IsDir() { + if err = CopyDir(srcfp, dstfp); err != nil { + fmt.Println(err) + } + } else { + if err = CopyFile(srcfp, dstfp); err != nil { + fmt.Println(err) + } + } + } + return nil +} + +// 设置文件变量值 +// 可以理解为将文件中的变量占位符替换为想要设置的值 +func SetFileVariable(filePath string, key string, value string) error { + // 占位符标识 + sep := "###" + + // 读取文件到字节 + contentBytes, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + // 将字节转化为文本 + content := string(contentBytes) + + // 替换文本 + content = strings.Replace(content, fmt.Sprintf("%s%s%s", sep, key, sep), value, -1) + + // 打开文件 + f, err := os.OpenFile(filePath, os.O_WRONLY|os.O_TRUNC, 0777) + if err != nil { + return err + } + + // 将替换后的内容写入文件 + if _, err := f.Write([]byte(content)); err != nil { + return err + } + + f.Close() + + return nil +} diff --git a/backend/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go b/backend/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go deleted file mode 100644 index 3525a004..00000000 --- a/backend/vendor/github.com/globalsign/mgo/bson/bson_corpus_spec_test_generator.go +++ /dev/null @@ -1,294 +0,0 @@ -// +build ignore - -package main - -import ( - "bytes" - "fmt" - "go/format" - "html/template" - "io/ioutil" - "log" - "path/filepath" - "strings" - - "github.com/globalsign/mgo/internal/json" -) - -func main() { - log.SetFlags(0) - log.SetPrefix(name + ": ") - - var g Generator - - fmt.Fprintf(&g, "// Code generated by \"%s.go\"; DO NOT EDIT\n\n", name) - - src := g.generate() - - err := ioutil.WriteFile(fmt.Sprintf("%s.go", strings.TrimSuffix(name, "_generator")), src, 0644) - if err != nil { - log.Fatalf("writing output: %s", err) - } -} - -// Generator holds the state of the analysis. Primarily used to buffer -// the output for format.Source. -type Generator struct { - bytes.Buffer // Accumulated output. -} - -// format returns the gofmt-ed contents of the Generator's buffer. -func (g *Generator) format() []byte { - src, err := format.Source(g.Bytes()) - if err != nil { - // Should never happen, but can arise when developing this code. - // The user can compile the output to see the error. - log.Printf("warning: internal error: invalid Go generated: %s", err) - log.Printf("warning: compile the package to analyze the error") - return g.Bytes() - } - return src -} - -// EVERYTHING ABOVE IS CONSTANT BETWEEN THE GENERATORS - -const name = "bson_corpus_spec_test_generator" - -func (g *Generator) generate() []byte { - - testFiles, err := filepath.Glob("./specdata/specifications/source/bson-corpus/tests/*.json") - if err != nil { - log.Fatalf("error reading bson-corpus files: %s", err) - } - - tests, err := g.loadTests(testFiles) - if err != nil { - log.Fatalf("error loading tests: %s", err) - } - - tmpl, err := g.getTemplate() - if err != nil { - log.Fatalf("error loading template: %s", err) - } - - tmpl.Execute(&g.Buffer, tests) - - return g.format() -} - -func (g *Generator) loadTests(filenames []string) ([]*testDef, error) { - var tests []*testDef - for _, filename := range filenames { - test, err := g.loadTest(filename) - if err != nil { - return nil, err - } - - tests = append(tests, test) - } - - return tests, nil -} - -func (g *Generator) loadTest(filename string) (*testDef, error) { - content, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - - var testDef testDef - err = json.Unmarshal(content, &testDef) - if err != nil { - return nil, err - } - - names := make(map[string]struct{}) - - for i := len(testDef.Valid) - 1; i >= 0; i-- { - if testDef.BsonType == "0x05" && testDef.Valid[i].Description == "subtype 0x02" { - testDef.Valid = append(testDef.Valid[:i], testDef.Valid[i+1:]...) - continue - } - - name := cleanupFuncName(testDef.Description + "_" + testDef.Valid[i].Description) - nameIdx := name - j := 1 - for { - if _, ok := names[nameIdx]; !ok { - break - } - - nameIdx = fmt.Sprintf("%s_%d", name, j) - } - - names[nameIdx] = struct{}{} - - testDef.Valid[i].TestDef = &testDef - testDef.Valid[i].Name = nameIdx - testDef.Valid[i].StructTest = testDef.TestKey != "" && - (testDef.BsonType != "0x05" || strings.Contains(testDef.Valid[i].Description, "0x00")) && - !testDef.Deprecated - } - - for i := len(testDef.DecodeErrors) - 1; i >= 0; i-- { - if strings.Contains(testDef.DecodeErrors[i].Description, "UTF-8") { - testDef.DecodeErrors = append(testDef.DecodeErrors[:i], testDef.DecodeErrors[i+1:]...) - continue - } - - name := cleanupFuncName(testDef.Description + "_" + testDef.DecodeErrors[i].Description) - nameIdx := name - j := 1 - for { - if _, ok := names[nameIdx]; !ok { - break - } - - nameIdx = fmt.Sprintf("%s_%d", name, j) - } - names[nameIdx] = struct{}{} - - testDef.DecodeErrors[i].Name = nameIdx - } - - return &testDef, nil -} - -func (g *Generator) getTemplate() (*template.Template, error) { - content := `package bson_test - -import ( - "encoding/hex" - "time" - - . "gopkg.in/check.v1" - "github.com/globalsign/mgo/bson" -) - -func testValid(c *C, in []byte, expected []byte, result interface{}) { - err := bson.Unmarshal(in, result) - c.Assert(err, IsNil) - - out, err := bson.Marshal(result) - c.Assert(err, IsNil) - - c.Assert(string(expected), Equals, string(out), Commentf("roundtrip failed for %T, expected '%x' but got '%x'", result, expected, out)) -} - -func testDecodeSkip(c *C, in []byte) { - err := bson.Unmarshal(in, &struct{}{}) - c.Assert(err, IsNil) -} - -func testDecodeError(c *C, in []byte, result interface{}) { - err := bson.Unmarshal(in, result) - c.Assert(err, Not(IsNil)) -} - -{{range .}} -{{range .Valid}} -func (s *S) Test{{.Name}}(c *C) { - b, err := hex.DecodeString("{{.Bson}}") - c.Assert(err, IsNil) - - {{if .CanonicalBson}} - cb, err := hex.DecodeString("{{.CanonicalBson}}") - c.Assert(err, IsNil) - {{else}} - cb := b - {{end}} - - var resultD bson.D - testValid(c, b, cb, &resultD) - {{if .StructTest}}var resultS struct { - Element {{.TestDef.GoType}} ` + "`bson:\"{{.TestDef.TestKey}}\"`" + ` - } - testValid(c, b, cb, &resultS){{end}} - - testDecodeSkip(c, b) -} -{{end}} - -{{range .DecodeErrors}} -func (s *S) Test{{.Name}}(c *C) { - b, err := hex.DecodeString("{{.Bson}}") - c.Assert(err, IsNil) - - var resultD bson.D - testDecodeError(c, b, &resultD) -} -{{end}} -{{end}} -` - tmpl, err := template.New("").Parse(content) - if err != nil { - return nil, err - } - return tmpl, nil -} - -func cleanupFuncName(name string) string { - return strings.Map(func(r rune) rune { - if (r >= 48 && r <= 57) || (r >= 65 && r <= 90) || (r >= 97 && r <= 122) { - return r - } - return '_' - }, name) -} - -type testDef struct { - Description string `json:"description"` - BsonType string `json:"bson_type"` - TestKey string `json:"test_key"` - Valid []*valid `json:"valid"` - DecodeErrors []*decodeError `json:"decodeErrors"` - Deprecated bool `json:"deprecated"` -} - -func (t *testDef) GoType() string { - switch t.BsonType { - case "0x01": - return "float64" - case "0x02": - return "string" - case "0x03": - return "bson.D" - case "0x04": - return "[]interface{}" - case "0x05": - return "[]byte" - case "0x07": - return "bson.ObjectId" - case "0x08": - return "bool" - case "0x09": - return "time.Time" - case "0x0E": - return "string" - case "0x10": - return "int32" - case "0x12": - return "int64" - case "0x13": - return "bson.Decimal" - default: - return "interface{}" - } -} - -type valid struct { - Description string `json:"description"` - Bson string `json:"bson"` - CanonicalBson string `json:"canonical_bson"` - - Name string - StructTest bool - TestDef *testDef -} - -type decodeError struct { - Description string `json:"description"` - Bson string `json:"bson"` - - Name string -} diff --git a/backend/vendor/github.com/go-playground/locales/.gitignore b/backend/vendor/github.com/go-playground/locales/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/backend/vendor/github.com/go-playground/locales/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/backend/vendor/github.com/go-playground/locales/LICENSE b/backend/vendor/github.com/go-playground/locales/LICENSE new file mode 100644 index 00000000..75854ac4 --- /dev/null +++ b/backend/vendor/github.com/go-playground/locales/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/backend/vendor/github.com/go-playground/locales/README.md b/backend/vendor/github.com/go-playground/locales/README.md new file mode 100644 index 00000000..43329f8d --- /dev/null +++ b/backend/vendor/github.com/go-playground/locales/README.md @@ -0,0 +1,172 @@ +## locales +![Project status](https://img.shields.io/badge/version-0.12.1-green.svg) +[![Build Status](https://semaphoreci.com/api/v1/joeybloggs/locales/branches/master/badge.svg)](https://semaphoreci.com/joeybloggs/locales) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales) +[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within +an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator). + +Features +-------- +- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) + +Full Tests +-------------------- +I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment; +any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details. + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/locales +``` + +NOTES +-------- +You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body +of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string. + +Usage +------- +```go +package main + +import ( + "fmt" + "time" + + "github.com/go-playground/locales/currency" + "github.com/go-playground/locales/en_CA" +) + +func main() { + + loc, _ := time.LoadLocation("America/Toronto") + datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc) + + l := en_CA.New() + + // Dates + fmt.Println(l.FmtDateFull(datetime)) + fmt.Println(l.FmtDateLong(datetime)) + fmt.Println(l.FmtDateMedium(datetime)) + fmt.Println(l.FmtDateShort(datetime)) + + // Times + fmt.Println(l.FmtTimeFull(datetime)) + fmt.Println(l.FmtTimeLong(datetime)) + fmt.Println(l.FmtTimeMedium(datetime)) + fmt.Println(l.FmtTimeShort(datetime)) + + // Months Wide + fmt.Println(l.MonthWide(time.January)) + fmt.Println(l.MonthWide(time.February)) + fmt.Println(l.MonthWide(time.March)) + // ... + + // Months Abbreviated + fmt.Println(l.MonthAbbreviated(time.January)) + fmt.Println(l.MonthAbbreviated(time.February)) + fmt.Println(l.MonthAbbreviated(time.March)) + // ... + + // Months Narrow + fmt.Println(l.MonthNarrow(time.January)) + fmt.Println(l.MonthNarrow(time.February)) + fmt.Println(l.MonthNarrow(time.March)) + // ... + + // Weekdays Wide + fmt.Println(l.WeekdayWide(time.Sunday)) + fmt.Println(l.WeekdayWide(time.Monday)) + fmt.Println(l.WeekdayWide(time.Tuesday)) + // ... + + // Weekdays Abbreviated + fmt.Println(l.WeekdayAbbreviated(time.Sunday)) + fmt.Println(l.WeekdayAbbreviated(time.Monday)) + fmt.Println(l.WeekdayAbbreviated(time.Tuesday)) + // ... + + // Weekdays Short + fmt.Println(l.WeekdayShort(time.Sunday)) + fmt.Println(l.WeekdayShort(time.Monday)) + fmt.Println(l.WeekdayShort(time.Tuesday)) + // ... + + // Weekdays Narrow + fmt.Println(l.WeekdayNarrow(time.Sunday)) + fmt.Println(l.WeekdayNarrow(time.Monday)) + fmt.Println(l.WeekdayNarrow(time.Tuesday)) + // ... + + var f64 float64 + + f64 = -10356.4523 + + // Number + fmt.Println(l.FmtNumber(f64, 2)) + + // Currency + fmt.Println(l.FmtCurrency(f64, 2, currency.CAD)) + fmt.Println(l.FmtCurrency(f64, 2, currency.USD)) + + // Accounting + fmt.Println(l.FmtAccounting(f64, 2, currency.CAD)) + fmt.Println(l.FmtAccounting(f64, 2, currency.USD)) + + f64 = 78.12 + + // Percent + fmt.Println(l.FmtPercent(f64, 0)) + + // Plural Rules for locale, so you know what rules you must cover + fmt.Println(l.PluralsCardinal()) + fmt.Println(l.PluralsOrdinal()) + + // Cardinal Plural Rules + fmt.Println(l.CardinalPluralRule(1, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 1)) + fmt.Println(l.CardinalPluralRule(3, 0)) + + // Ordinal Plural Rules + fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st + fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd + fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd + fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th + + // Range Plural Rules + fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1 + fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2 + fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8 +} +``` + +NOTES: +------- +These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues +I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time +these locales are regenerated the fix will come with. + +I do however realize that time constraints are often important and so there are two options: + +1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface. +2. Add an exception in the locale generation code directly and once regenerated, fix will be in place. + +Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/backend/vendor/github.com/go-playground/locales/currency/currency.go b/backend/vendor/github.com/go-playground/locales/currency/currency.go new file mode 100644 index 00000000..cdaba596 --- /dev/null +++ b/backend/vendor/github.com/go-playground/locales/currency/currency.go @@ -0,0 +1,308 @@ +package currency + +// Type is the currency type associated with the locales currency enum +type Type int + +// locale currencies +const ( + ADP Type = iota + AED + AFA + AFN + ALK + ALL + AMD + ANG + AOA + AOK + AON + AOR + ARA + ARL + ARM + ARP + ARS + ATS + AUD + AWG + AZM + AZN + BAD + BAM + BAN + BBD + BDT + BEC + BEF + BEL + BGL + BGM + BGN + BGO + BHD + BIF + BMD + BND + BOB + BOL + BOP + BOV + BRB + BRC + BRE + BRL + BRN + BRR + BRZ + BSD + BTN + BUK + BWP + BYB + BYN + BYR + BZD + CAD + CDF + CHE + CHF + CHW + CLE + CLF + CLP + CNH + CNX + CNY + COP + COU + CRC + CSD + CSK + CUC + CUP + CVE + CYP + CZK + DDM + DEM + DJF + DKK + DOP + DZD + ECS + ECV + EEK + EGP + ERN + ESA + ESB + ESP + ETB + EUR + FIM + FJD + FKP + FRF + GBP + GEK + GEL + GHC + GHS + GIP + GMD + GNF + GNS + GQE + GRD + GTQ + GWE + GWP + GYD + HKD + HNL + HRD + HRK + HTG + HUF + IDR + IEP + ILP + ILR + ILS + INR + IQD + IRR + ISJ + ISK + ITL + JMD + JOD + JPY + KES + KGS + KHR + KMF + KPW + KRH + KRO + KRW + KWD + KYD + KZT + LAK + LBP + LKR + LRD + LSL + LTL + LTT + LUC + LUF + LUL + LVL + LVR + LYD + MAD + MAF + MCF + MDC + MDL + MGA + MGF + MKD + MKN + MLF + MMK + MNT + MOP + MRO + MTL + MTP + MUR + MVP + MVR + MWK + MXN + MXP + MXV + MYR + MZE + MZM + MZN + NAD + NGN + NIC + NIO + NLG + NOK + NPR + NZD + OMR + PAB + PEI + PEN + PES + PGK + PHP + PKR + PLN + PLZ + PTE + PYG + QAR + RHD + ROL + RON + RSD + RUB + RUR + RWF + SAR + SBD + SCR + SDD + SDG + SDP + SEK + SGD + SHP + SIT + SKK + SLL + SOS + SRD + SRG + SSP + STD + STN + SUR + SVC + SYP + SZL + THB + TJR + TJS + TMM + TMT + TND + TOP + TPE + TRL + TRY + TTD + TWD + TZS + UAH + UAK + UGS + UGX + USD + USN + USS + UYI + UYP + UYU + UZS + VEB + VEF + VND + VNN + VUV + WST + XAF + XAG + XAU + XBA + XBB + XBC + XBD + XCD + XDR + XEU + XFO + XFU + XOF + XPD + XPF + XPT + XRE + XSU + XTS + XUA + XXX + YDD + YER + YUD + YUM + YUN + YUR + ZAL + ZAR + ZMK + ZMW + ZRN + ZRZ + ZWD + ZWL + ZWR +) diff --git a/backend/vendor/github.com/go-playground/locales/logo.png b/backend/vendor/github.com/go-playground/locales/logo.png new file mode 100644 index 00000000..3038276e Binary files /dev/null and b/backend/vendor/github.com/go-playground/locales/logo.png differ diff --git a/backend/vendor/github.com/go-playground/locales/rules.go b/backend/vendor/github.com/go-playground/locales/rules.go new file mode 100644 index 00000000..92029001 --- /dev/null +++ b/backend/vendor/github.com/go-playground/locales/rules.go @@ -0,0 +1,293 @@ +package locales + +import ( + "strconv" + "time" + + "github.com/go-playground/locales/currency" +) + +// // ErrBadNumberValue is returned when the number passed for +// // plural rule determination cannot be parsed +// type ErrBadNumberValue struct { +// NumberValue string +// InnerError error +// } + +// // Error returns ErrBadNumberValue error string +// func (e *ErrBadNumberValue) Error() string { +// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError) +// } + +// var _ error = new(ErrBadNumberValue) + +// PluralRule denotes the type of plural rules +type PluralRule int + +// PluralRule's +const ( + PluralRuleUnknown PluralRule = iota + PluralRuleZero // zero + PluralRuleOne // one - singular + PluralRuleTwo // two - dual + PluralRuleFew // few - paucal + PluralRuleMany // many - also used for fractions if they have a separate class + PluralRuleOther // other - required—general plural form—also used if the language only has a single form +) + +const ( + pluralsString = "UnknownZeroOneTwoFewManyOther" +) + +// Translator encapsulates an instance of a locale +// NOTE: some values are returned as a []byte just in case the caller +// wishes to add more and can help avoid allocations; otherwise just cast as string +type Translator interface { + + // The following Functions are for overriding, debugging or developing + // with a Translator Locale + + // Locale returns the string value of the translator + Locale() string + + // returns an array of cardinal plural rules associated + // with this translator + PluralsCardinal() []PluralRule + + // returns an array of ordinal plural rules associated + // with this translator + PluralsOrdinal() []PluralRule + + // returns an array of range plural rules associated + // with this translator + PluralsRange() []PluralRule + + // returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale + CardinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale + OrdinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale + RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule + + // returns the locales abbreviated month given the 'month' provided + MonthAbbreviated(month time.Month) string + + // returns the locales abbreviated months + MonthsAbbreviated() []string + + // returns the locales narrow month given the 'month' provided + MonthNarrow(month time.Month) string + + // returns the locales narrow months + MonthsNarrow() []string + + // returns the locales wide month given the 'month' provided + MonthWide(month time.Month) string + + // returns the locales wide months + MonthsWide() []string + + // returns the locales abbreviated weekday given the 'weekday' provided + WeekdayAbbreviated(weekday time.Weekday) string + + // returns the locales abbreviated weekdays + WeekdaysAbbreviated() []string + + // returns the locales narrow weekday given the 'weekday' provided + WeekdayNarrow(weekday time.Weekday) string + + // WeekdaysNarrowreturns the locales narrow weekdays + WeekdaysNarrow() []string + + // returns the locales short weekday given the 'weekday' provided + WeekdayShort(weekday time.Weekday) string + + // returns the locales short weekdays + WeekdaysShort() []string + + // returns the locales wide weekday given the 'weekday' provided + WeekdayWide(weekday time.Weekday) string + + // returns the locales wide weekdays + WeekdaysWide() []string + + // The following Functions are common Formatting functionsfor the Translator's Locale + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + FmtNumber(num float64, v uint64) string + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + // NOTE: 'num' passed into FmtPercent is assumed to be in percent already + FmtPercent(num float64, v uint64) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + FmtCurrency(num float64, v uint64, currency currency.Type) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + // in accounting notation. + FmtAccounting(num float64, v uint64, currency currency.Type) string + + // returns the short date representation of 't' for locale + FmtDateShort(t time.Time) string + + // returns the medium date representation of 't' for locale + FmtDateMedium(t time.Time) string + + // returns the long date representation of 't' for locale + FmtDateLong(t time.Time) string + + // returns the full date representation of 't' for locale + FmtDateFull(t time.Time) string + + // returns the short time representation of 't' for locale + FmtTimeShort(t time.Time) string + + // returns the medium time representation of 't' for locale + FmtTimeMedium(t time.Time) string + + // returns the long time representation of 't' for locale + FmtTimeLong(t time.Time) string + + // returns the full time representation of 't' for locale + FmtTimeFull(t time.Time) string +} + +// String returns the string value of PluralRule +func (p PluralRule) String() string { + + switch p { + case PluralRuleZero: + return pluralsString[7:11] + case PluralRuleOne: + return pluralsString[11:14] + case PluralRuleTwo: + return pluralsString[14:17] + case PluralRuleFew: + return pluralsString[17:20] + case PluralRuleMany: + return pluralsString[20:24] + case PluralRuleOther: + return pluralsString[24:] + default: + return pluralsString[:7] + } +} + +// +// Precision Notes: +// +// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh +// +// v := float64(3.141) +// i := float64(int64(v)) +// +// fmt.Println(v - i) +// +// or +// +// s := strconv.FormatFloat(v-i, 'f', -1, 64) +// fmt.Println(s) +// +// these will not print what you'd expect: 0.14100000000000001 +// and so this library requires a precision to be specified, or +// inaccurate plural rules could be applied. +// +// +// +// n - absolute value of the source number (integer and decimals). +// i - integer digits of n. +// v - number of visible fraction digits in n, with trailing zeros. +// w - number of visible fraction digits in n, without trailing zeros. +// f - visible fractional digits in n, with trailing zeros. +// t - visible fractional digits in n, without trailing zeros. +// +// +// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above. +// +// n := math.Abs(num) +// i := int64(n) +// v := v +// +// +// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64 +// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// +// +// +// General Inclusion Rules +// - v will always be available inherently +// - all require n +// - w requires i +// + +// W returns the number of visible fraction digits in N, without trailing zeros. +func W(n float64, v uint64) (w int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then w will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + w = int64(len(s[:end])) + } + + return +} + +// F returns the visible fractional digits in N, with trailing zeros. +func F(n float64, v uint64) (f int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then f will be zero + // otherwise need to parse + if len(s) != 1 { + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + f, _ = strconv.ParseInt(s[2:], 10, 64) + } + + return +} + +// T returns the visible fractional digits in N, without trailing zeros. +func T(n float64, v uint64) (t int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then t will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + t, _ = strconv.ParseInt(s[:end], 10, 64) + } + + return +} diff --git a/backend/vendor/github.com/go-playground/universal-translator/.gitignore b/backend/vendor/github.com/go-playground/universal-translator/.gitignore new file mode 100644 index 00000000..26617857 --- /dev/null +++ b/backend/vendor/github.com/go-playground/universal-translator/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof \ No newline at end of file diff --git a/backend/vendor/github.com/go-playground/universal-translator/LICENSE b/backend/vendor/github.com/go-playground/universal-translator/LICENSE new file mode 100644 index 00000000..8d8aba15 --- /dev/null +++ b/backend/vendor/github.com/go-playground/universal-translator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/backend/vendor/github.com/go-playground/universal-translator/README.md b/backend/vendor/github.com/go-playground/universal-translator/README.md new file mode 100644 index 00000000..24aef158 --- /dev/null +++ b/backend/vendor/github.com/go-playground/universal-translator/README.md @@ -0,0 +1,90 @@ +## universal-translator + +![Project status](https://img.shields.io/badge/version-0.16.0-green.svg) +[![Build Status](https://semaphoreci.com/api/v1/joeybloggs/universal-translator/branches/master/badge.svg)](https://semaphoreci.com/joeybloggs/universal-translator) +[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator) +[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules + +Why another i18n library? +-------------------------- +Because none of the plural rules seem to be correct out there, including the previous implementation of this package, +so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package +is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for +use in your applications. + +Features +-------- +- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) +- [x] Support loading translations from files +- [x] Exporting translations to file(s), mainly for getting them professionally translated +- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated +- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1) + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/universal-translator +``` + +Usage & Documentation +------- + +Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs + +##### Examples: + +- [Basic](https://github.com/go-playground/universal-translator/tree/master/examples/basic) +- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/examples/full-no-files) +- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/examples/full-with-files) + +File formatting +-------------- +All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s); +they are only separated for easy viewing. + +##### Examples: + +- [Formats](https://github.com/go-playground/universal-translator/tree/master/examples/file-formats) + +##### Basic Makeup +NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/examples/file-formats) +```json +{ + "locale": "en", + "key": "days-left", + "trans": "You have {0} day left.", + "type": "Cardinal", + "rule": "One", + "override": false +} +``` +|Field|Description| +|---|---| +|locale|The locale for which the translation is for.| +|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.| +|trans|The actual translation text.| +|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)| +|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)| +|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.| + +Help With Tests +--------------- +To anyone interesting in helping or contributing, I sure could use some help creating tests for each language. +Please see issue [here](https://github.com/go-playground/locales/issues/1) for details. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/backend/vendor/github.com/go-playground/universal-translator/errors.go b/backend/vendor/github.com/go-playground/universal-translator/errors.go new file mode 100644 index 00000000..38b163b6 --- /dev/null +++ b/backend/vendor/github.com/go-playground/universal-translator/errors.go @@ -0,0 +1,148 @@ +package ut + +import ( + "errors" + "fmt" + + "github.com/go-playground/locales" +) + +var ( + // ErrUnknowTranslation indicates the translation could not be found + ErrUnknowTranslation = errors.New("Unknown Translation") +) + +var _ error = new(ErrConflictingTranslation) +var _ error = new(ErrRangeTranslation) +var _ error = new(ErrOrdinalTranslation) +var _ error = new(ErrCardinalTranslation) +var _ error = new(ErrMissingPluralTranslation) +var _ error = new(ErrExistingTranslator) + +// ErrExistingTranslator is the error representing a conflicting translator +type ErrExistingTranslator struct { + locale string +} + +// Error returns ErrExistingTranslator's internal error text +func (e *ErrExistingTranslator) Error() string { + return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale) +} + +// ErrConflictingTranslation is the error representing a conflicting translation +type ErrConflictingTranslation struct { + locale string + key interface{} + rule locales.PluralRule + text string +} + +// Error returns ErrConflictingTranslation's internal error text +func (e *ErrConflictingTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) + } + + return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) +} + +// ErrRangeTranslation is the error representing a range translation error +type ErrRangeTranslation struct { + text string +} + +// Error returns ErrRangeTranslation's internal error text +func (e *ErrRangeTranslation) Error() string { + return e.text +} + +// ErrOrdinalTranslation is the error representing an ordinal translation error +type ErrOrdinalTranslation struct { + text string +} + +// Error returns ErrOrdinalTranslation's internal error text +func (e *ErrOrdinalTranslation) Error() string { + return e.text +} + +// ErrCardinalTranslation is the error representing a cardinal translation error +type ErrCardinalTranslation struct { + text string +} + +// Error returns ErrCardinalTranslation's internal error text +func (e *ErrCardinalTranslation) Error() string { + return e.text +} + +// ErrMissingPluralTranslation is the error signifying a missing translation given +// the locales plural rules. +type ErrMissingPluralTranslation struct { + locale string + key interface{} + rule locales.PluralRule + translationType string +} + +// Error returns ErrMissingPluralTranslation's internal error text +func (e *ErrMissingPluralTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale) + } + + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale) +} + +// ErrMissingBracket is the error representing a missing bracket in a translation +// eg. This is a {0 <-- missing ending '}' +type ErrMissingBracket struct { + locale string + key interface{} + text string +} + +// Error returns ErrMissingBracket error message +func (e *ErrMissingBracket) Error() string { + return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text) +} + +// ErrBadParamSyntax is the error representing a bad parameter definition in a translation +// eg. This is a {must-be-int} +type ErrBadParamSyntax struct { + locale string + param string + key interface{} + text string +} + +// Error returns ErrBadParamSyntax error message +func (e *ErrBadParamSyntax) Error() string { + return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text) +} + +// import/export errors + +// ErrMissingLocale is the error representing an expected locale that could +// not be found aka locale not registered with the UniversalTranslator Instance +type ErrMissingLocale struct { + locale string +} + +// Error returns ErrMissingLocale's internal error text +func (e *ErrMissingLocale) Error() string { + return fmt.Sprintf("error: locale '%s' not registered.", e.locale) +} + +// ErrBadPluralDefinition is the error representing an incorrect plural definition +// usually found within translations defined within files during the import process. +type ErrBadPluralDefinition struct { + tl translation +} + +// Error returns ErrBadPluralDefinition's internal error text +func (e *ErrBadPluralDefinition) Error() string { + return fmt.Sprintf("error: bad plural definition '%#v'", e.tl) +} diff --git a/backend/vendor/github.com/go-playground/universal-translator/import_export.go b/backend/vendor/github.com/go-playground/universal-translator/import_export.go new file mode 100644 index 00000000..7bd76f26 --- /dev/null +++ b/backend/vendor/github.com/go-playground/universal-translator/import_export.go @@ -0,0 +1,274 @@ +package ut + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "io" + + "github.com/go-playground/locales" +) + +type translation struct { + Locale string `json:"locale"` + Key interface{} `json:"key"` // either string or integer + Translation string `json:"trans"` + PluralType string `json:"type,omitempty"` + PluralRule string `json:"rule,omitempty"` + OverrideExisting bool `json:"override,omitempty"` +} + +const ( + cardinalType = "Cardinal" + ordinalType = "Ordinal" + rangeType = "Range" +) + +// ImportExportFormat is the format of the file import or export +type ImportExportFormat uint8 + +// supported Export Formats +const ( + FormatJSON ImportExportFormat = iota +) + +// Export writes the translations out to a file on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error { + + _, err := os.Stat(dirname) + fmt.Println(dirname, err, os.IsNotExist(err)) + if err != nil { + + if !os.IsNotExist(err) { + return err + } + + if err = os.MkdirAll(dirname, 0744); err != nil { + return err + } + } + + // build up translations + var trans []translation + var b []byte + var ext string + + for _, locale := range t.translators { + + for k, v := range locale.(*translator).translations { + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k, + Translation: v.text, + }) + } + + for k, pluralTrans := range locale.(*translator).cardinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: cardinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).ordinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: ordinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).rangeTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: rangeType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + switch format { + case FormatJSON: + b, err = json.MarshalIndent(trans, "", " ") + ext = ".json" + } + + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) + if err != nil { + return err + } + + trans = trans[0:0] + } + + return nil +} + +// Import reads the translations out of a file or directory on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error { + + fi, err := os.Stat(dirnameOrFilename) + if err != nil { + return err + } + + processFn := func(filename string) error { + + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + + return t.ImportByReader(format, f) + } + + if !fi.IsDir() { + return processFn(dirnameOrFilename) + } + + // recursively go through directory + walker := func(path string, info os.FileInfo, err error) error { + + if info.IsDir() { + return nil + } + + switch format { + case FormatJSON: + // skip non JSON files + if filepath.Ext(info.Name()) != ".json" { + return nil + } + } + + return processFn(path) + } + + return filepath.Walk(dirnameOrFilename, walker) +} + +// ImportByReader imports the the translations found within the contents read from the supplied reader. +// +// NOTE: generally used when assets have been embedded into the binary and are already in memory. +func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error { + + b, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + var trans []translation + + switch format { + case FormatJSON: + err = json.Unmarshal(b, &trans) + } + + if err != nil { + return err + } + + for _, tl := range trans { + + locale, found := t.FindTranslator(tl.Locale) + if !found { + return &ErrMissingLocale{locale: tl.Locale} + } + + pr := stringToPR(tl.PluralRule) + + if pr == locales.PluralRuleUnknown { + + err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting) + if err != nil { + return err + } + + continue + } + + switch tl.PluralType { + case cardinalType: + err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case ordinalType: + err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case rangeType: + err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting) + default: + return &ErrBadPluralDefinition{tl: tl} + } + + if err != nil { + return err + } + } + + return nil +} + +func stringToPR(s string) locales.PluralRule { + + switch s { + case "One": + return locales.PluralRuleOne + case "Two": + return locales.PluralRuleTwo + case "Few": + return locales.PluralRuleFew + case "Many": + return locales.PluralRuleMany + case "Other": + return locales.PluralRuleOther + default: + return locales.PluralRuleUnknown + } + +} diff --git a/backend/vendor/github.com/go-playground/universal-translator/logo.png b/backend/vendor/github.com/go-playground/universal-translator/logo.png new file mode 100644 index 00000000..a37aa8c0 Binary files /dev/null and b/backend/vendor/github.com/go-playground/universal-translator/logo.png differ diff --git a/backend/vendor/github.com/go-playground/universal-translator/translator.go b/backend/vendor/github.com/go-playground/universal-translator/translator.go new file mode 100644 index 00000000..cfafce8a --- /dev/null +++ b/backend/vendor/github.com/go-playground/universal-translator/translator.go @@ -0,0 +1,420 @@ +package ut + +import ( + "fmt" + "strconv" + "strings" + + "github.com/go-playground/locales" +) + +const ( + paramZero = "{0}" + paramOne = "{1}" + unknownTranslation = "" +) + +// Translator is universal translators +// translator instance which is a thin wrapper +// around locales.Translator instance providing +// some extra functionality +type Translator interface { + locales.Translator + + // adds a normal translation for a particular language/locale + // {#} is the only replacement type accepted and are ad infinitum + // eg. one: '{0} day left' other: '{0} days left' + Add(key interface{}, text string, override bool) error + + // adds a cardinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0} day left' other: '{0} days left' + AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds an ordinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' + // - 1st, 2nd, 3rd... + AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds a range plural translation for a particular language/locale + // {0} and {1} are the only replacement types accepted and only these are accepted. + // eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' + AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error + + // creates the translation for the locale given the 'key' and params passed in + T(key interface{}, params ...string) (string, error) + + // creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + C(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + O(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and + // 'digit2' arguments and 'param1' and 'param2' passed in + R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) + + // VerifyTranslations checks to ensures that no plural rules have been + // missed within the translations. + VerifyTranslations() error +} + +var _ Translator = new(translator) +var _ locales.Translator = new(translator) + +type translator struct { + locales.Translator + translations map[interface{}]*transText + cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown + ordinalTanslations map[interface{}][]*transText + rangeTanslations map[interface{}][]*transText +} + +type transText struct { + text string + indexes []int +} + +func newTranslator(trans locales.Translator) Translator { + return &translator{ + Translator: trans, + translations: make(map[interface{}]*transText), // translation text broken up by byte index + cardinalTanslations: make(map[interface{}][]*transText), + ordinalTanslations: make(map[interface{}][]*transText), + rangeTanslations: make(map[interface{}][]*transText), + } +} + +// Add adds a normal translation for a particular language/locale +// {#} is the only replacement type accepted and are ad infinitum +// eg. one: '{0} day left' other: '{0} days left' +func (t *translator) Add(key interface{}, text string, override bool) error { + + if _, ok := t.translations[key]; ok && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text} + } + + lb := strings.Count(text, "{") + rb := strings.Count(text, "}") + + if lb != rb { + return &ErrMissingBracket{locale: t.Locale(), key: key, text: text} + } + + trans := &transText{ + text: text, + } + + var idx int + + for i := 0; i < lb; i++ { + s := "{" + strconv.Itoa(i) + "}" + idx = strings.Index(text, s) + if idx == -1 { + return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text} + } + + trans.indexes = append(trans.indexes, idx) + trans.indexes = append(trans.indexes, idx+len(s)) + } + + t.translations[key] = trans + + return nil +} + +// AddCardinal adds a cardinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0} day left' other: '{0} days left' +func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsCardinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.cardinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.cardinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddOrdinal adds an ordinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd... +func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsOrdinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.ordinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.ordinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddRange adds a range plural translation for a particular language/locale +// {0} and {1} are the only replacement types accepted and only these are accepted. +// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' +func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsRange() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.rangeTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.rangeTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 4, 4), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + idx = strings.Index(text, paramOne) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)} + } + + trans.indexes[2] = idx + trans.indexes[3] = idx + len(paramOne) + + return nil +} + +// T creates the translation for the locale given the 'key' and params passed in +func (t *translator) T(key interface{}, params ...string) (string, error) { + + trans, ok := t.translations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + b := make([]byte, 0, 64) + + var start, end, count int + + for i := 0; i < len(trans.indexes); i++ { + end = trans.indexes[i] + b = append(b, trans.text[start:end]...) + b = append(b, params[count]...) + i++ + start = trans.indexes[i] + count++ + } + + b = append(b, trans.text[start:]...) + + return string(b), nil +} + +// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.cardinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.CardinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.ordinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.OrdinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments +// and 'param1' and 'param2' passed in +func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) { + + tarr, ok := t.rangeTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.RangePluralRule(num1, digits1, num2, digits2) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param1...) + b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...) + b = append(b, param2...) + b = append(b, trans.text[trans.indexes[3]:]...) + + return string(b), nil +} + +// VerifyTranslations checks to ensures that no plural rules have been +// missed within the translations. +func (t *translator) VerifyTranslations() error { + + for k, v := range t.cardinalTanslations { + + for _, rule := range t.PluralsCardinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k} + } + } + } + + for k, v := range t.ordinalTanslations { + + for _, rule := range t.PluralsOrdinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k} + } + } + } + + for k, v := range t.rangeTanslations { + + for _, rule := range t.PluralsRange() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k} + } + } + } + + return nil +} diff --git a/backend/vendor/github.com/go-playground/universal-translator/universal_translator.go b/backend/vendor/github.com/go-playground/universal-translator/universal_translator.go new file mode 100644 index 00000000..dbf707f5 --- /dev/null +++ b/backend/vendor/github.com/go-playground/universal-translator/universal_translator.go @@ -0,0 +1,113 @@ +package ut + +import ( + "strings" + + "github.com/go-playground/locales" +) + +// UniversalTranslator holds all locale & translation data +type UniversalTranslator struct { + translators map[string]Translator + fallback Translator +} + +// New returns a new UniversalTranslator instance set with +// the fallback locale and locales it should support +func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator { + + t := &UniversalTranslator{ + translators: make(map[string]Translator), + } + + for _, v := range supportedLocales { + + trans := newTranslator(v) + t.translators[strings.ToLower(trans.Locale())] = trans + + if fallback.Locale() == v.Locale() { + t.fallback = trans + } + } + + if t.fallback == nil && fallback != nil { + t.fallback = newTranslator(fallback) + } + + return t +} + +// FindTranslator trys to find a Translator based on an array of locales +// and returns the first one it can find, otherwise returns the +// fallback translator. +func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) { + + for _, locale := range locales { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + } + + return t.fallback, false +} + +// GetTranslator returns the specified translator for the given locale, +// or fallback if not found +func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + + return t.fallback, false +} + +// GetFallback returns the fallback locale +func (t *UniversalTranslator) GetFallback() Translator { + return t.fallback +} + +// AddTranslator adds the supplied translator, if it already exists the override param +// will be checked and if false an error will be returned, otherwise the translator will be +// overridden; if the fallback matches the supplied translator it will be overridden as well +// NOTE: this is normally only used when translator is embedded within a library +func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error { + + lc := strings.ToLower(translator.Locale()) + _, ok := t.translators[lc] + if ok && !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + trans := newTranslator(translator) + + if t.fallback.Locale() == translator.Locale() { + + // because it's optional to have a fallback, I don't impose that limitation + // don't know why you wouldn't but... + if !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + t.fallback = trans + } + + t.translators[lc] = trans + + return nil +} + +// VerifyTranslations runs through all locales and identifies any issues +// eg. missing plural rules for a locale +func (t *UniversalTranslator) VerifyTranslations() (err error) { + + for _, trans := range t.translators { + err = trans.VerifyTranslations() + if err != nil { + return + } + } + + return +} diff --git a/backend/vendor/github.com/gopherjs/gopherjs/LICENSE b/backend/vendor/github.com/gopherjs/gopherjs/LICENSE new file mode 100644 index 00000000..d496fef1 --- /dev/null +++ b/backend/vendor/github.com/gopherjs/gopherjs/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2013 Richard Musiol. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/vendor/github.com/gopherjs/gopherjs/js/js.go b/backend/vendor/github.com/gopherjs/gopherjs/js/js.go new file mode 100644 index 00000000..3fbf1d88 --- /dev/null +++ b/backend/vendor/github.com/gopherjs/gopherjs/js/js.go @@ -0,0 +1,168 @@ +// Package js provides functions for interacting with native JavaScript APIs. Calls to these functions are treated specially by GopherJS and translated directly to their corresponding JavaScript syntax. +// +// Use MakeWrapper to expose methods to JavaScript. When passing values directly, the following type conversions are performed: +// +// | Go type | JavaScript type | Conversions back to interface{} | +// | --------------------- | --------------------- | ------------------------------- | +// | bool | Boolean | bool | +// | integers and floats | Number | float64 | +// | string | String | string | +// | []int8 | Int8Array | []int8 | +// | []int16 | Int16Array | []int16 | +// | []int32, []int | Int32Array | []int | +// | []uint8 | Uint8Array | []uint8 | +// | []uint16 | Uint16Array | []uint16 | +// | []uint32, []uint | Uint32Array | []uint | +// | []float32 | Float32Array | []float32 | +// | []float64 | Float64Array | []float64 | +// | all other slices | Array | []interface{} | +// | arrays | see slice type | see slice type | +// | functions | Function | func(...interface{}) *js.Object | +// | time.Time | Date | time.Time | +// | - | instanceof Node | *js.Object | +// | maps, structs | instanceof Object | map[string]interface{} | +// +// Additionally, for a struct containing a *js.Object field, only the content of the field will be passed to JavaScript and vice versa. +package js + +// Object is a container for a native JavaScript object. Calls to its methods are treated specially by GopherJS and translated directly to their JavaScript syntax. A nil pointer to Object is equal to JavaScript's "null". Object can not be used as a map key. +type Object struct{ object *Object } + +// Get returns the object's property with the given key. +func (o *Object) Get(key string) *Object { return o.object.Get(key) } + +// Set assigns the value to the object's property with the given key. +func (o *Object) Set(key string, value interface{}) { o.object.Set(key, value) } + +// Delete removes the object's property with the given key. +func (o *Object) Delete(key string) { o.object.Delete(key) } + +// Length returns the object's "length" property, converted to int. +func (o *Object) Length() int { return o.object.Length() } + +// Index returns the i'th element of an array. +func (o *Object) Index(i int) *Object { return o.object.Index(i) } + +// SetIndex sets the i'th element of an array. +func (o *Object) SetIndex(i int, value interface{}) { o.object.SetIndex(i, value) } + +// Call calls the object's method with the given name. +func (o *Object) Call(name string, args ...interface{}) *Object { return o.object.Call(name, args...) } + +// Invoke calls the object itself. This will fail if it is not a function. +func (o *Object) Invoke(args ...interface{}) *Object { return o.object.Invoke(args...) } + +// New creates a new instance of this type object. This will fail if it not a function (constructor). +func (o *Object) New(args ...interface{}) *Object { return o.object.New(args...) } + +// Bool returns the object converted to bool according to JavaScript type conversions. +func (o *Object) Bool() bool { return o.object.Bool() } + +// String returns the object converted to string according to JavaScript type conversions. +func (o *Object) String() string { return o.object.String() } + +// Int returns the object converted to int according to JavaScript type conversions (parseInt). +func (o *Object) Int() int { return o.object.Int() } + +// Int64 returns the object converted to int64 according to JavaScript type conversions (parseInt). +func (o *Object) Int64() int64 { return o.object.Int64() } + +// Uint64 returns the object converted to uint64 according to JavaScript type conversions (parseInt). +func (o *Object) Uint64() uint64 { return o.object.Uint64() } + +// Float returns the object converted to float64 according to JavaScript type conversions (parseFloat). +func (o *Object) Float() float64 { return o.object.Float() } + +// Interface returns the object converted to interface{}. See table in package comment for details. +func (o *Object) Interface() interface{} { return o.object.Interface() } + +// Unsafe returns the object as an uintptr, which can be converted via unsafe.Pointer. Not intended for public use. +func (o *Object) Unsafe() uintptr { return o.object.Unsafe() } + +// Error encapsulates JavaScript errors. Those are turned into a Go panic and may be recovered, giving an *Error that holds the JavaScript error object. +type Error struct { + *Object +} + +// Error returns the message of the encapsulated JavaScript error object. +func (err *Error) Error() string { + return "JavaScript error: " + err.Get("message").String() +} + +// Stack returns the stack property of the encapsulated JavaScript error object. +func (err *Error) Stack() string { + return err.Get("stack").String() +} + +// Global gives JavaScript's global object ("window" for browsers and "GLOBAL" for Node.js). +var Global *Object + +// Module gives the value of the "module" variable set by Node.js. Hint: Set a module export with 'js.Module.Get("exports").Set("exportName", ...)'. +var Module *Object + +// Undefined gives the JavaScript value "undefined". +var Undefined *Object + +// Debugger gets compiled to JavaScript's "debugger;" statement. +func Debugger() {} + +// InternalObject returns the internal JavaScript object that represents i. Not intended for public use. +func InternalObject(i interface{}) *Object { + return nil +} + +// MakeFunc wraps a function and gives access to the values of JavaScript's "this" and "arguments" keywords. +func MakeFunc(fn func(this *Object, arguments []*Object) interface{}) *Object { + return Global.Call("$makeFunc", InternalObject(fn)) +} + +// Keys returns the keys of the given JavaScript object. +func Keys(o *Object) []string { + if o == nil || o == Undefined { + return nil + } + a := Global.Get("Object").Call("keys", o) + s := make([]string, a.Length()) + for i := 0; i < a.Length(); i++ { + s[i] = a.Index(i).String() + } + return s +} + +// MakeWrapper creates a JavaScript object which has wrappers for the exported methods of i. Use explicit getter and setter methods to expose struct fields to JavaScript. +func MakeWrapper(i interface{}) *Object { + v := InternalObject(i) + o := Global.Get("Object").New() + o.Set("__internal_object__", v) + methods := v.Get("constructor").Get("methods") + for i := 0; i < methods.Length(); i++ { + m := methods.Index(i) + if m.Get("pkg").String() != "" { // not exported + continue + } + o.Set(m.Get("name").String(), func(args ...*Object) *Object { + return Global.Call("$externalizeFunction", v.Get(m.Get("prop").String()), m.Get("typ"), true).Call("apply", v, args) + }) + } + return o +} + +// NewArrayBuffer creates a JavaScript ArrayBuffer from a byte slice. +func NewArrayBuffer(b []byte) *Object { + slice := InternalObject(b) + offset := slice.Get("$offset").Int() + length := slice.Get("$length").Int() + return slice.Get("$array").Get("buffer").Call("slice", offset, offset+length) +} + +// M is a simple map type. It is intended as a shorthand for JavaScript objects (before conversion). +type M map[string]interface{} + +// S is a simple slice type. It is intended as a shorthand for JavaScript arrays (before conversion). +type S []interface{} + +func init() { + // avoid dead code elimination + e := Error{} + _ = e +} diff --git a/backend/vendor/github.com/jtolds/gls/LICENSE b/backend/vendor/github.com/jtolds/gls/LICENSE new file mode 100644 index 00000000..9b4a822d --- /dev/null +++ b/backend/vendor/github.com/jtolds/gls/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013, Space Monkey, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/backend/vendor/github.com/jtolds/gls/README.md b/backend/vendor/github.com/jtolds/gls/README.md new file mode 100644 index 00000000..4ebb692f --- /dev/null +++ b/backend/vendor/github.com/jtolds/gls/README.md @@ -0,0 +1,89 @@ +gls +=== + +Goroutine local storage + +### IMPORTANT NOTE ### + +It is my duty to point you to https://blog.golang.org/context, which is how +Google solves all of the problems you'd perhaps consider using this package +for at scale. + +One downside to Google's approach is that *all* of your functions must have +a new first argument, but after clearing that hurdle everything else is much +better. + +If you aren't interested in this warning, read on. + +### Huhwaht? Why? ### + +Every so often, a thread shows up on the +[golang-nuts](https://groups.google.com/d/forum/golang-nuts) asking for some +form of goroutine-local-storage, or some kind of goroutine id, or some kind of +context. There are a few valid use cases for goroutine-local-storage, one of +the most prominent being log line context. One poster was interested in being +able to log an HTTP request context id in every log line in the same goroutine +as the incoming HTTP request, without having to change every library and +function call he was interested in logging. + +This would be pretty useful. Provided that you could get some kind of +goroutine-local-storage, you could call +[log.SetOutput](http://golang.org/pkg/log/#SetOutput) with your own logging +writer that checks goroutine-local-storage for some context information and +adds that context to your log lines. + +But alas, Andrew Gerrand's typically diplomatic answer to the question of +goroutine-local variables was: + +> We wouldn't even be having this discussion if thread local storage wasn't +> useful. But every feature comes at a cost, and in my opinion the cost of +> threadlocals far outweighs their benefits. They're just not a good fit for +> Go. + +So, yeah, that makes sense. That's a pretty good reason for why the language +won't support a specific and (relatively) unuseful feature that requires some +runtime changes, just for the sake of a little bit of log improvement. + +But does Go require runtime changes? + +### How it works ### + +Go has pretty fantastic introspective and reflective features, but one thing Go +doesn't give you is any kind of access to the stack pointer, or frame pointer, +or goroutine id, or anything contextual about your current stack. It gives you +access to your list of callers, but only along with program counters, which are +fixed at compile time. + +But it does give you the stack. + +So, we define 16 special functions and embed base-16 tags into the stack using +the call order of those 16 functions. Then, we can read our tags back out of +the stack looking at the callers list. + +We then use these tags as an index into a traditional map for implementing +this library. + +### What are people saying? ### + +"Wow, that's horrifying." + +"This is the most terrible thing I have seen in a very long time." + +"Where is it getting a context from? Is this serializing all the requests? +What the heck is the client being bound to? What are these tags? Why does he +need callers? Oh god no. No no no." + +### Docs ### + +Please see the docs at http://godoc.org/github.com/jtolds/gls + +### Related ### + +If you're okay relying on the string format of the current runtime stacktrace +including a unique goroutine id (not guaranteed by the spec or anything, but +very unlikely to change within a Go release), you might be able to squeeze +out a bit more performance by using this similar library, inspired by some +code Brad Fitzpatrick wrote for debugging his HTTP/2 library: +https://github.com/tylerb/gls (in contrast, jtolds/gls doesn't require +any knowledge of the string format of the runtime stacktrace, which +probably adds unnecessary overhead). diff --git a/backend/vendor/github.com/jtolds/gls/context.go b/backend/vendor/github.com/jtolds/gls/context.go new file mode 100644 index 00000000..618a1710 --- /dev/null +++ b/backend/vendor/github.com/jtolds/gls/context.go @@ -0,0 +1,153 @@ +// Package gls implements goroutine-local storage. +package gls + +import ( + "sync" +) + +var ( + mgrRegistry = make(map[*ContextManager]bool) + mgrRegistryMtx sync.RWMutex +) + +// Values is simply a map of key types to value types. Used by SetValues to +// set multiple values at once. +type Values map[interface{}]interface{} + +// ContextManager is the main entrypoint for interacting with +// Goroutine-local-storage. You can have multiple independent ContextManagers +// at any given time. ContextManagers are usually declared globally for a given +// class of context variables. You should use NewContextManager for +// construction. +type ContextManager struct { + mtx sync.Mutex + values map[uint]Values +} + +// NewContextManager returns a brand new ContextManager. It also registers the +// new ContextManager in the ContextManager registry which is used by the Go +// method. ContextManagers are typically defined globally at package scope. +func NewContextManager() *ContextManager { + mgr := &ContextManager{values: make(map[uint]Values)} + mgrRegistryMtx.Lock() + defer mgrRegistryMtx.Unlock() + mgrRegistry[mgr] = true + return mgr +} + +// Unregister removes a ContextManager from the global registry, used by the +// Go method. Only intended for use when you're completely done with a +// ContextManager. Use of Unregister at all is rare. +func (m *ContextManager) Unregister() { + mgrRegistryMtx.Lock() + defer mgrRegistryMtx.Unlock() + delete(mgrRegistry, m) +} + +// SetValues takes a collection of values and a function to call for those +// values to be set in. Anything further down the stack will have the set +// values available through GetValue. SetValues will add new values or replace +// existing values of the same key and will not mutate or change values for +// previous stack frames. +// SetValues is slow (makes a copy of all current and new values for the new +// gls-context) in order to reduce the amount of lookups GetValue requires. +func (m *ContextManager) SetValues(new_values Values, context_call func()) { + if len(new_values) == 0 { + context_call() + return + } + + mutated_keys := make([]interface{}, 0, len(new_values)) + mutated_vals := make(Values, len(new_values)) + + EnsureGoroutineId(func(gid uint) { + m.mtx.Lock() + state, found := m.values[gid] + if !found { + state = make(Values, len(new_values)) + m.values[gid] = state + } + m.mtx.Unlock() + + for key, new_val := range new_values { + mutated_keys = append(mutated_keys, key) + if old_val, ok := state[key]; ok { + mutated_vals[key] = old_val + } + state[key] = new_val + } + + defer func() { + if !found { + m.mtx.Lock() + delete(m.values, gid) + m.mtx.Unlock() + return + } + + for _, key := range mutated_keys { + if val, ok := mutated_vals[key]; ok { + state[key] = val + } else { + delete(state, key) + } + } + }() + + context_call() + }) +} + +// GetValue will return a previously set value, provided that the value was set +// by SetValues somewhere higher up the stack. If the value is not found, ok +// will be false. +func (m *ContextManager) GetValue(key interface{}) ( + value interface{}, ok bool) { + gid, ok := GetGoroutineId() + if !ok { + return nil, false + } + + m.mtx.Lock() + state, found := m.values[gid] + m.mtx.Unlock() + + if !found { + return nil, false + } + value, ok = state[key] + return value, ok +} + +func (m *ContextManager) getValues() Values { + gid, ok := GetGoroutineId() + if !ok { + return nil + } + m.mtx.Lock() + state, _ := m.values[gid] + m.mtx.Unlock() + return state +} + +// Go preserves ContextManager values and Goroutine-local-storage across new +// goroutine invocations. The Go method makes a copy of all existing values on +// all registered context managers and makes sure they are still set after +// kicking off the provided function in a new goroutine. If you don't use this +// Go method instead of the standard 'go' keyword, you will lose values in +// ContextManagers, as goroutines have brand new stacks. +func Go(cb func()) { + mgrRegistryMtx.RLock() + defer mgrRegistryMtx.RUnlock() + + for mgr := range mgrRegistry { + values := mgr.getValues() + if len(values) > 0 { + cb = func(mgr *ContextManager, cb func()) func() { + return func() { mgr.SetValues(values, cb) } + }(mgr, cb) + } + } + + go cb() +} diff --git a/backend/vendor/github.com/jtolds/gls/gen_sym.go b/backend/vendor/github.com/jtolds/gls/gen_sym.go new file mode 100644 index 00000000..7f615cce --- /dev/null +++ b/backend/vendor/github.com/jtolds/gls/gen_sym.go @@ -0,0 +1,21 @@ +package gls + +import ( + "sync" +) + +var ( + keyMtx sync.Mutex + keyCounter uint64 +) + +// ContextKey is a throwaway value you can use as a key to a ContextManager +type ContextKey struct{ id uint64 } + +// GenSym will return a brand new, never-before-used ContextKey +func GenSym() ContextKey { + keyMtx.Lock() + defer keyMtx.Unlock() + keyCounter += 1 + return ContextKey{id: keyCounter} +} diff --git a/backend/vendor/github.com/jtolds/gls/gid.go b/backend/vendor/github.com/jtolds/gls/gid.go new file mode 100644 index 00000000..c16bf3a5 --- /dev/null +++ b/backend/vendor/github.com/jtolds/gls/gid.go @@ -0,0 +1,25 @@ +package gls + +var ( + stackTagPool = &idPool{} +) + +// Will return this goroutine's identifier if set. If you always need a +// goroutine identifier, you should use EnsureGoroutineId which will make one +// if there isn't one already. +func GetGoroutineId() (gid uint, ok bool) { + return readStackTag() +} + +// Will call cb with the current goroutine identifier. If one hasn't already +// been generated, one will be created and set first. The goroutine identifier +// might be invalid after cb returns. +func EnsureGoroutineId(cb func(gid uint)) { + if gid, ok := readStackTag(); ok { + cb(gid) + return + } + gid := stackTagPool.Acquire() + defer stackTagPool.Release(gid) + addStackTag(gid, func() { cb(gid) }) +} diff --git a/backend/vendor/github.com/jtolds/gls/id_pool.go b/backend/vendor/github.com/jtolds/gls/id_pool.go new file mode 100644 index 00000000..b7974ae0 --- /dev/null +++ b/backend/vendor/github.com/jtolds/gls/id_pool.go @@ -0,0 +1,34 @@ +package gls + +// though this could probably be better at keeping ids smaller, the goal of +// this class is to keep a registry of the smallest unique integer ids +// per-process possible + +import ( + "sync" +) + +type idPool struct { + mtx sync.Mutex + released []uint + max_id uint +} + +func (p *idPool) Acquire() (id uint) { + p.mtx.Lock() + defer p.mtx.Unlock() + if len(p.released) > 0 { + id = p.released[len(p.released)-1] + p.released = p.released[:len(p.released)-1] + return id + } + id = p.max_id + p.max_id++ + return id +} + +func (p *idPool) Release(id uint) { + p.mtx.Lock() + defer p.mtx.Unlock() + p.released = append(p.released, id) +} diff --git a/backend/vendor/github.com/jtolds/gls/stack_tags.go b/backend/vendor/github.com/jtolds/gls/stack_tags.go new file mode 100644 index 00000000..37bbd334 --- /dev/null +++ b/backend/vendor/github.com/jtolds/gls/stack_tags.go @@ -0,0 +1,147 @@ +package gls + +// so, basically, we're going to encode integer tags in base-16 on the stack + +const ( + bitWidth = 4 + stackBatchSize = 16 +) + +var ( + pc_lookup = make(map[uintptr]int8, 17) + mark_lookup [16]func(uint, func()) +) + +func init() { + setEntries := func(f func(uint, func()), v int8) { + var ptr uintptr + f(0, func() { + ptr = findPtr() + }) + pc_lookup[ptr] = v + if v >= 0 { + mark_lookup[v] = f + } + } + setEntries(github_com_jtolds_gls_markS, -0x1) + setEntries(github_com_jtolds_gls_mark0, 0x0) + setEntries(github_com_jtolds_gls_mark1, 0x1) + setEntries(github_com_jtolds_gls_mark2, 0x2) + setEntries(github_com_jtolds_gls_mark3, 0x3) + setEntries(github_com_jtolds_gls_mark4, 0x4) + setEntries(github_com_jtolds_gls_mark5, 0x5) + setEntries(github_com_jtolds_gls_mark6, 0x6) + setEntries(github_com_jtolds_gls_mark7, 0x7) + setEntries(github_com_jtolds_gls_mark8, 0x8) + setEntries(github_com_jtolds_gls_mark9, 0x9) + setEntries(github_com_jtolds_gls_markA, 0xa) + setEntries(github_com_jtolds_gls_markB, 0xb) + setEntries(github_com_jtolds_gls_markC, 0xc) + setEntries(github_com_jtolds_gls_markD, 0xd) + setEntries(github_com_jtolds_gls_markE, 0xe) + setEntries(github_com_jtolds_gls_markF, 0xf) +} + +func addStackTag(tag uint, context_call func()) { + if context_call == nil { + return + } + github_com_jtolds_gls_markS(tag, context_call) +} + +// these private methods are named this horrendous name so gopherjs support +// is easier. it shouldn't add any runtime cost in non-js builds. + +//go:noinline +func github_com_jtolds_gls_markS(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark0(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark1(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark2(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark3(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark4(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark5(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark6(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark7(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark8(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_mark9(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markA(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markB(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markC(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markD(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markE(tag uint, cb func()) { _m(tag, cb) } + +//go:noinline +func github_com_jtolds_gls_markF(tag uint, cb func()) { _m(tag, cb) } + +func _m(tag_remainder uint, cb func()) { + if tag_remainder == 0 { + cb() + } else { + mark_lookup[tag_remainder&0xf](tag_remainder>>bitWidth, cb) + } +} + +func readStackTag() (tag uint, ok bool) { + var current_tag uint + offset := 0 + for { + batch, next_offset := getStack(offset, stackBatchSize) + for _, pc := range batch { + val, ok := pc_lookup[pc] + if !ok { + continue + } + if val < 0 { + return current_tag, true + } + current_tag <<= bitWidth + current_tag += uint(val) + } + if next_offset == 0 { + break + } + offset = next_offset + } + return 0, false +} + +func (m *ContextManager) preventInlining() { + // dunno if findPtr or getStack are likely to get inlined in a future release + // of go, but if they are inlined and their callers are inlined, that could + // hork some things. let's do our best to explain to the compiler that we + // really don't want those two functions inlined by saying they could change + // at any time. assumes preventInlining doesn't get compiled out. + // this whole thing is probably overkill. + findPtr = m.values[0][0].(func() uintptr) + getStack = m.values[0][1].(func(int, int) ([]uintptr, int)) +} diff --git a/backend/vendor/github.com/jtolds/gls/stack_tags_js.go b/backend/vendor/github.com/jtolds/gls/stack_tags_js.go new file mode 100644 index 00000000..c4e8b801 --- /dev/null +++ b/backend/vendor/github.com/jtolds/gls/stack_tags_js.go @@ -0,0 +1,75 @@ +// +build js + +package gls + +// This file is used for GopherJS builds, which don't have normal runtime +// stack trace support + +import ( + "strconv" + "strings" + + "github.com/gopherjs/gopherjs/js" +) + +const ( + jsFuncNamePrefix = "github_com_jtolds_gls_mark" +) + +func jsMarkStack() (f []uintptr) { + lines := strings.Split( + js.Global.Get("Error").New().Get("stack").String(), "\n") + f = make([]uintptr, 0, len(lines)) + for i, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + if i == 0 { + if line != "Error" { + panic("didn't understand js stack trace") + } + continue + } + fields := strings.Fields(line) + if len(fields) < 2 || fields[0] != "at" { + panic("didn't understand js stack trace") + } + + pos := strings.Index(fields[1], jsFuncNamePrefix) + if pos < 0 { + continue + } + pos += len(jsFuncNamePrefix) + if pos >= len(fields[1]) { + panic("didn't understand js stack trace") + } + char := string(fields[1][pos]) + switch char { + case "S": + f = append(f, uintptr(0)) + default: + val, err := strconv.ParseUint(char, 16, 8) + if err != nil { + panic("didn't understand js stack trace") + } + f = append(f, uintptr(val)+1) + } + } + return f +} + +// variables to prevent inlining +var ( + findPtr = func() uintptr { + funcs := jsMarkStack() + if len(funcs) == 0 { + panic("failed to find function pointer") + } + return funcs[0] + } + + getStack = func(offset, amount int) (stack []uintptr, next_offset int) { + return jsMarkStack(), 0 + } +) diff --git a/backend/vendor/github.com/jtolds/gls/stack_tags_main.go b/backend/vendor/github.com/jtolds/gls/stack_tags_main.go new file mode 100644 index 00000000..4da89e44 --- /dev/null +++ b/backend/vendor/github.com/jtolds/gls/stack_tags_main.go @@ -0,0 +1,30 @@ +// +build !js + +package gls + +// This file is used for standard Go builds, which have the expected runtime +// support + +import ( + "runtime" +) + +var ( + findPtr = func() uintptr { + var pc [1]uintptr + n := runtime.Callers(4, pc[:]) + if n != 1 { + panic("failed to find function pointer") + } + return pc[0] + } + + getStack = func(offset, amount int) (stack []uintptr, next_offset int) { + stack = make([]uintptr, amount) + stack = stack[:runtime.Callers(offset, stack)] + if len(stack) < amount { + return stack, 0 + } + return stack, offset + len(stack) + } +) diff --git a/backend/vendor/github.com/leodido/go-urn/.gitignore b/backend/vendor/github.com/leodido/go-urn/.gitignore new file mode 100644 index 00000000..a30b5ab0 --- /dev/null +++ b/backend/vendor/github.com/leodido/go-urn/.gitignore @@ -0,0 +1,9 @@ +*.exe +*.dll +*.so +*.dylib + +*.test + +*.out +*.txt \ No newline at end of file diff --git a/backend/vendor/github.com/leodido/go-urn/.travis.yml b/backend/vendor/github.com/leodido/go-urn/.travis.yml new file mode 100644 index 00000000..913b6418 --- /dev/null +++ b/backend/vendor/github.com/leodido/go-urn/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.9.x + - 1.10.x + - tip + +before_install: + - go get -t -v ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/backend/vendor/github.com/leodido/go-urn/README.md b/backend/vendor/github.com/leodido/go-urn/README.md new file mode 100644 index 00000000..cc902ec0 --- /dev/null +++ b/backend/vendor/github.com/leodido/go-urn/README.md @@ -0,0 +1,55 @@ +[![Build](https://img.shields.io/travis/leodido/go-urn/master.svg?style=for-the-badge)](https://travis-ci.org/leodido/go-urn) [![Coverage](https://img.shields.io/codecov/c/github/leodido/go-urn.svg?style=for-the-badge)](https://codecov.io/gh/leodido/go-urn) [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](https://godoc.org/github.com/leodido/go-urn) + +**A parser for URNs**. + +> As seen on [RFC 2141](https://tools.ietf.org/html/rfc2141#ref-1). + +[API documentation](https://godoc.org/github.com/leodido/go-urn). + +## Installation + +``` +go get github.com/leodido/go-urn +``` + +## Performances + +This implementation results to be really fast. + +Usually below ½ microsecond on my machine[1](#mymachine). + +Notice it also performs, while parsing: + +1. fine-grained and informative erroring +2. specific-string normalization + +``` +ok/00/urn:a:b______________________________________/-4 20000000 265 ns/op 182 B/op 6 allocs/op +ok/01/URN:foo:a123,456_____________________________/-4 30000000 296 ns/op 200 B/op 6 allocs/op +ok/02/urn:foo:a123%2c456___________________________/-4 20000000 331 ns/op 208 B/op 6 allocs/op +ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-4 20000000 430 ns/op 280 B/op 6 allocs/op +ok/04/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 411 ns/op 312 B/op 6 allocs/op +ok/05/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 472 ns/op 344 B/op 6 allocs/op +ok/06/urn:burnout:nss______________________________/-4 30000000 257 ns/op 192 B/op 6 allocs/op +ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-4 20000000 375 ns/op 213 B/op 6 allocs/op +ok/08/urn:urnurnurn:urn____________________________/-4 30000000 265 ns/op 197 B/op 6 allocs/op +ok/09/urn:ciao:@!=%2c(xyz)+a,b.*@g=$_'_____________/-4 20000000 307 ns/op 248 B/op 6 allocs/op +ok/10/URN:x:abc%1dz%2f%3az_________________________/-4 30000000 259 ns/op 212 B/op 6 allocs/op +no/11/URN:-xxx:x___________________________________/-4 20000000 445 ns/op 320 B/op 6 allocs/op +no/12/urn::colon:nss_______________________________/-4 20000000 461 ns/op 320 B/op 6 allocs/op +no/13/urn:abcdefghilmnopqrstuvzabcdefghilmn:specifi/-4 10000000 660 ns/op 320 B/op 6 allocs/op +no/14/URN:a!?:x____________________________________/-4 20000000 507 ns/op 320 B/op 6 allocs/op +no/15/urn:urn:NSS__________________________________/-4 20000000 429 ns/op 288 B/op 6 allocs/op +no/16/urn:white_space:NSS__________________________/-4 20000000 482 ns/op 320 B/op 6 allocs/op +no/17/urn:concat:no_spaces_________________________/-4 20000000 539 ns/op 328 B/op 7 allocs/op +no/18/urn:a:/______________________________________/-4 20000000 470 ns/op 320 B/op 7 allocs/op +no/19/urn:UrN:NSS__________________________________/-4 20000000 399 ns/op 288 B/op 6 allocs/op +``` + +--- + +* [1]: Intel Core i7-7600U CPU @ 2.80GHz + +--- + +[![Analytics](https://ga-beacon.appspot.com/UA-49657176-1/go-urn?flat)](https://github.com/igrigorik/ga-beacon) \ No newline at end of file diff --git a/backend/vendor/github.com/leodido/go-urn/machine.go b/backend/vendor/github.com/leodido/go-urn/machine.go new file mode 100644 index 00000000..d621ea6e --- /dev/null +++ b/backend/vendor/github.com/leodido/go-urn/machine.go @@ -0,0 +1,1670 @@ +package urn + +import ( + "fmt" +) + +var ( + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]" + errParse = "parsing error [col %d]" +) + + +const start int = 1 +const first_final int = 44 + +const en_fail int = 46 +const en_main int = 1 + + +// Machine is the interface representing the FSM +type Machine interface { + Error() error + Parse(input []byte) (*URN, error) +} + +type machine struct { + data []byte + cs int + p, pe, eof, pb int + err error + tolower []int +} + +// NewMachine creates a new FSM able to parse RFC 2141 strings. +func NewMachine() Machine { + m := &machine{} + + return m +} + +// Err returns the error that occurred on the last call to Parse. +// +// If the result is nil, then the line was parsed successfully. +func (m *machine) Error() error { + return m.err +} + +func (m *machine) text() []byte { + return m.data[m.pb:m.p] +} + +// Parse parses the input byte array as a RFC 2141 string. +func (m *machine) Parse(input []byte) (*URN, error) { + m.data = input + m.p = 0 + m.pb = 0 + m.pe = len(input) + m.eof = len(input) + m.err = nil + m.tolower = []int{} + output := &URN{} + + { + m.cs = start + } + + + { + if (m.p) == (m.pe) { + goto _test_eof + } + switch m.cs { + case 1: + goto st_case_1 + case 0: + goto st_case_0 + case 2: + goto st_case_2 + case 3: + goto st_case_3 + case 4: + goto st_case_4 + case 5: + goto st_case_5 + case 6: + goto st_case_6 + case 7: + goto st_case_7 + case 8: + goto st_case_8 + case 9: + goto st_case_9 + case 10: + goto st_case_10 + case 11: + goto st_case_11 + case 12: + goto st_case_12 + case 13: + goto st_case_13 + case 14: + goto st_case_14 + case 15: + goto st_case_15 + case 16: + goto st_case_16 + case 17: + goto st_case_17 + case 18: + goto st_case_18 + case 19: + goto st_case_19 + case 20: + goto st_case_20 + case 21: + goto st_case_21 + case 22: + goto st_case_22 + case 23: + goto st_case_23 + case 24: + goto st_case_24 + case 25: + goto st_case_25 + case 26: + goto st_case_26 + case 27: + goto st_case_27 + case 28: + goto st_case_28 + case 29: + goto st_case_29 + case 30: + goto st_case_30 + case 31: + goto st_case_31 + case 32: + goto st_case_32 + case 33: + goto st_case_33 + case 34: + goto st_case_34 + case 35: + goto st_case_35 + case 36: + goto st_case_36 + case 37: + goto st_case_37 + case 38: + goto st_case_38 + case 44: + goto st_case_44 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 45: + goto st_case_45 + case 41: + goto st_case_41 + case 42: + goto st_case_42 + case 43: + goto st_case_43 + case 46: + goto st_case_46 + } + goto st_out + st_case_1: + switch (m.data)[(m.p)] { + case 85: + goto tr1 + case 117: + goto tr1 + } + goto tr0 + tr0: + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr3: + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr6: + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr41: + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr44: + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr50: + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr52: + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + st_case_0: + st0: + m.cs = 0 + goto _out + tr1: + m.pb = m.p + + goto st2 + st2: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof2 + } + st_case_2: + switch (m.data)[(m.p)] { + case 82: + goto st3 + case 114: + goto st3 + } + goto tr0 + st3: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof3 + } + st_case_3: + switch (m.data)[(m.p)] { + case 78: + goto st4 + case 110: + goto st4 + } + goto tr3 + st4: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof4 + } + st_case_4: + if (m.data)[(m.p)] == 58 { + goto tr5 + } + goto tr0 + tr5: + output.prefix = string(m.text()) + + goto st5 + st5: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof5 + } + st_case_5: + switch (m.data)[(m.p)] { + case 85: + goto tr8 + case 117: + goto tr8 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr7 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr7 + } + default: + goto tr7 + } + goto tr6 + tr7: + m.pb = m.p + + goto st6 + st6: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof6 + } + st_case_6: + switch (m.data)[(m.p)] { + case 45: + goto st7 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st7 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st7 + } + default: + goto st7 + } + goto tr6 + st7: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof7 + } + st_case_7: + switch (m.data)[(m.p)] { + case 45: + goto st8 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st8 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st8 + } + default: + goto st8 + } + goto tr6 + st8: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof8 + } + st_case_8: + switch (m.data)[(m.p)] { + case 45: + goto st9 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st9 + } + default: + goto st9 + } + goto tr6 + st9: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof9 + } + st_case_9: + switch (m.data)[(m.p)] { + case 45: + goto st10 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st10 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st10 + } + default: + goto st10 + } + goto tr6 + st10: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof10 + } + st_case_10: + switch (m.data)[(m.p)] { + case 45: + goto st11 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st11 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st11 + } + default: + goto st11 + } + goto tr6 + st11: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof11 + } + st_case_11: + switch (m.data)[(m.p)] { + case 45: + goto st12 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st12 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st12 + } + default: + goto st12 + } + goto tr6 + st12: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof12 + } + st_case_12: + switch (m.data)[(m.p)] { + case 45: + goto st13 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st13 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st13 + } + default: + goto st13 + } + goto tr6 + st13: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof13 + } + st_case_13: + switch (m.data)[(m.p)] { + case 45: + goto st14 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st14 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st14 + } + default: + goto st14 + } + goto tr6 + st14: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof14 + } + st_case_14: + switch (m.data)[(m.p)] { + case 45: + goto st15 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st15 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st15 + } + default: + goto st15 + } + goto tr6 + st15: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof15 + } + st_case_15: + switch (m.data)[(m.p)] { + case 45: + goto st16 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st16 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st16 + } + default: + goto st16 + } + goto tr6 + st16: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof16 + } + st_case_16: + switch (m.data)[(m.p)] { + case 45: + goto st17 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st17 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st17 + } + default: + goto st17 + } + goto tr6 + st17: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof17 + } + st_case_17: + switch (m.data)[(m.p)] { + case 45: + goto st18 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st18 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st18 + } + default: + goto st18 + } + goto tr6 + st18: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof18 + } + st_case_18: + switch (m.data)[(m.p)] { + case 45: + goto st19 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st19 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st19 + } + default: + goto st19 + } + goto tr6 + st19: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof19 + } + st_case_19: + switch (m.data)[(m.p)] { + case 45: + goto st20 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st20 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st20 + } + default: + goto st20 + } + goto tr6 + st20: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof20 + } + st_case_20: + switch (m.data)[(m.p)] { + case 45: + goto st21 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st21 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st21 + } + default: + goto st21 + } + goto tr6 + st21: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof21 + } + st_case_21: + switch (m.data)[(m.p)] { + case 45: + goto st22 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st22 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st22 + } + default: + goto st22 + } + goto tr6 + st22: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof22 + } + st_case_22: + switch (m.data)[(m.p)] { + case 45: + goto st23 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st23 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st23 + } + default: + goto st23 + } + goto tr6 + st23: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof23 + } + st_case_23: + switch (m.data)[(m.p)] { + case 45: + goto st24 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st24 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st24 + } + default: + goto st24 + } + goto tr6 + st24: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof24 + } + st_case_24: + switch (m.data)[(m.p)] { + case 45: + goto st25 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st25 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st25 + } + default: + goto st25 + } + goto tr6 + st25: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof25 + } + st_case_25: + switch (m.data)[(m.p)] { + case 45: + goto st26 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st26 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st26 + } + default: + goto st26 + } + goto tr6 + st26: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof26 + } + st_case_26: + switch (m.data)[(m.p)] { + case 45: + goto st27 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st27 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st27 + } + default: + goto st27 + } + goto tr6 + st27: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof27 + } + st_case_27: + switch (m.data)[(m.p)] { + case 45: + goto st28 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st28 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st28 + } + default: + goto st28 + } + goto tr6 + st28: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof28 + } + st_case_28: + switch (m.data)[(m.p)] { + case 45: + goto st29 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st29 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st29 + } + default: + goto st29 + } + goto tr6 + st29: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof29 + } + st_case_29: + switch (m.data)[(m.p)] { + case 45: + goto st30 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st30 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st30 + } + default: + goto st30 + } + goto tr6 + st30: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof30 + } + st_case_30: + switch (m.data)[(m.p)] { + case 45: + goto st31 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st31 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st31 + } + default: + goto st31 + } + goto tr6 + st31: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof31 + } + st_case_31: + switch (m.data)[(m.p)] { + case 45: + goto st32 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st32 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st32 + } + default: + goto st32 + } + goto tr6 + st32: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof32 + } + st_case_32: + switch (m.data)[(m.p)] { + case 45: + goto st33 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st33 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st33 + } + default: + goto st33 + } + goto tr6 + st33: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof33 + } + st_case_33: + switch (m.data)[(m.p)] { + case 45: + goto st34 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st34 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st34 + } + default: + goto st34 + } + goto tr6 + st34: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof34 + } + st_case_34: + switch (m.data)[(m.p)] { + case 45: + goto st35 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st35 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st35 + } + default: + goto st35 + } + goto tr6 + st35: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof35 + } + st_case_35: + switch (m.data)[(m.p)] { + case 45: + goto st36 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st36 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st36 + } + default: + goto st36 + } + goto tr6 + st36: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof36 + } + st_case_36: + switch (m.data)[(m.p)] { + case 45: + goto st37 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st37 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st37 + } + default: + goto st37 + } + goto tr6 + st37: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof37 + } + st_case_37: + if (m.data)[(m.p)] == 58 { + goto tr10 + } + goto tr6 + tr10: + output.ID = string(m.text()) + + goto st38 + st38: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof38 + } + st_case_38: + switch (m.data)[(m.p)] { + case 33: + goto tr42 + case 36: + goto tr42 + case 37: + goto tr43 + case 61: + goto tr42 + case 95: + goto tr42 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr42 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr42 + } + case (m.data)[(m.p)] >= 64: + goto tr42 + } + default: + goto tr42 + } + goto tr41 + tr42: + m.pb = m.p + + goto st44 + st44: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof44 + } + st_case_44: + switch (m.data)[(m.p)] { + case 33: + goto st44 + case 36: + goto st44 + case 37: + goto st39 + case 61: + goto st44 + case 95: + goto st44 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto st44 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st44 + } + case (m.data)[(m.p)] >= 64: + goto st44 + } + default: + goto st44 + } + goto tr41 + tr43: + m.pb = m.p + + goto st39 + st39: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof39 + } + st_case_39: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st40 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st40 + } + default: + goto tr46 + } + goto tr44 + tr46: + m.tolower = append(m.tolower, m.p-m.pb) + + goto st40 + st40: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof40 + } + st_case_40: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st45 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st45 + } + default: + goto tr48 + } + goto tr44 + tr48: + m.tolower = append(m.tolower, m.p-m.pb) + + goto st45 + st45: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof45 + } + st_case_45: + switch (m.data)[(m.p)] { + case 33: + goto st44 + case 36: + goto st44 + case 37: + goto st39 + case 61: + goto st44 + case 95: + goto st44 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto st44 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st44 + } + case (m.data)[(m.p)] >= 64: + goto st44 + } + default: + goto st44 + } + goto tr44 + tr8: + m.pb = m.p + + goto st41 + st41: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof41 + } + st_case_41: + switch (m.data)[(m.p)] { + case 45: + goto st7 + case 58: + goto tr10 + case 82: + goto st42 + case 114: + goto st42 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st7 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st7 + } + default: + goto st7 + } + goto tr6 + st42: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof42 + } + st_case_42: + switch (m.data)[(m.p)] { + case 45: + goto st8 + case 58: + goto tr10 + case 78: + goto st43 + case 110: + goto st43 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st8 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st8 + } + default: + goto st8 + } + goto tr50 + st43: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof43 + } + st_case_43: + if (m.data)[(m.p)] == 45 { + goto st9 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st9 + } + default: + goto st9 + } + goto tr52 + st46: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof46 + } + st_case_46: + switch (m.data)[(m.p)] { + case 10: + goto st0 + case 13: + goto st0 + } + goto st46 + st_out: + _test_eof2: + m.cs = 2 + goto _test_eof + _test_eof3: + m.cs = 3 + goto _test_eof + _test_eof4: + m.cs = 4 + goto _test_eof + _test_eof5: + m.cs = 5 + goto _test_eof + _test_eof6: + m.cs = 6 + goto _test_eof + _test_eof7: + m.cs = 7 + goto _test_eof + _test_eof8: + m.cs = 8 + goto _test_eof + _test_eof9: + m.cs = 9 + goto _test_eof + _test_eof10: + m.cs = 10 + goto _test_eof + _test_eof11: + m.cs = 11 + goto _test_eof + _test_eof12: + m.cs = 12 + goto _test_eof + _test_eof13: + m.cs = 13 + goto _test_eof + _test_eof14: + m.cs = 14 + goto _test_eof + _test_eof15: + m.cs = 15 + goto _test_eof + _test_eof16: + m.cs = 16 + goto _test_eof + _test_eof17: + m.cs = 17 + goto _test_eof + _test_eof18: + m.cs = 18 + goto _test_eof + _test_eof19: + m.cs = 19 + goto _test_eof + _test_eof20: + m.cs = 20 + goto _test_eof + _test_eof21: + m.cs = 21 + goto _test_eof + _test_eof22: + m.cs = 22 + goto _test_eof + _test_eof23: + m.cs = 23 + goto _test_eof + _test_eof24: + m.cs = 24 + goto _test_eof + _test_eof25: + m.cs = 25 + goto _test_eof + _test_eof26: + m.cs = 26 + goto _test_eof + _test_eof27: + m.cs = 27 + goto _test_eof + _test_eof28: + m.cs = 28 + goto _test_eof + _test_eof29: + m.cs = 29 + goto _test_eof + _test_eof30: + m.cs = 30 + goto _test_eof + _test_eof31: + m.cs = 31 + goto _test_eof + _test_eof32: + m.cs = 32 + goto _test_eof + _test_eof33: + m.cs = 33 + goto _test_eof + _test_eof34: + m.cs = 34 + goto _test_eof + _test_eof35: + m.cs = 35 + goto _test_eof + _test_eof36: + m.cs = 36 + goto _test_eof + _test_eof37: + m.cs = 37 + goto _test_eof + _test_eof38: + m.cs = 38 + goto _test_eof + _test_eof44: + m.cs = 44 + goto _test_eof + _test_eof39: + m.cs = 39 + goto _test_eof + _test_eof40: + m.cs = 40 + goto _test_eof + _test_eof45: + m.cs = 45 + goto _test_eof + _test_eof41: + m.cs = 41 + goto _test_eof + _test_eof42: + m.cs = 42 + goto _test_eof + _test_eof43: + m.cs = 43 + goto _test_eof + _test_eof46: + m.cs = 46 + goto _test_eof + + _test_eof: + { + } + if (m.p) == (m.eof) { + switch m.cs { + case 44, 45: + raw := m.text() + output.SS = string(raw) + // Iterate upper letters lowering them + for _, i := range m.tolower { + raw[i] = raw[i] + 32 + } + output.norm = string(raw) + + case 1, 2, 4: + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 3: + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 41: + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 38: + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 42: + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 43: + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 39, 40: + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + } + } + + _out: + { + } + } + + if m.cs < first_final || m.cs == en_fail { + return nil, m.err + } + + return output, nil +} diff --git a/backend/vendor/github.com/leodido/go-urn/machine.go.rl b/backend/vendor/github.com/leodido/go-urn/machine.go.rl new file mode 100644 index 00000000..3bc05a65 --- /dev/null +++ b/backend/vendor/github.com/leodido/go-urn/machine.go.rl @@ -0,0 +1,159 @@ +package urn + +import ( + "fmt" +) + +var ( + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]" + errParse = "parsing error [col %d]" +) + +%%{ +machine urn; + +# unsigned alphabet +alphtype uint8; + +action mark { + m.pb = m.p +} + +action tolower { + m.tolower = append(m.tolower, m.p - m.pb) +} + +action set_pre { + output.prefix = string(m.text()) +} + +action set_nid { + output.ID = string(m.text()) +} + +action set_nss { + raw := m.text() + output.SS = string(raw) + // Iterate upper letters lowering them + for _, i := range m.tolower { + raw[i] = raw[i] + 32 + } + output.norm = string(raw) +} + +action err_pre { + m.err = fmt.Errorf(errPrefix, m.p) + fhold; + fgoto fail; +} + +action err_nid { + m.err = fmt.Errorf(errIdentifier, m.p) + fhold; + fgoto fail; +} + +action err_nss { + m.err = fmt.Errorf(errSpecificString, m.p) + fhold; + fgoto fail; +} + +action err_urn { + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + fhold; + fgoto fail; +} + +action err_hex { + m.err = fmt.Errorf(errHex, m.p) + fhold; + fgoto fail; +} + +action err_parse { + m.err = fmt.Errorf(errParse, m.p) + fhold; + fgoto fail; +} + +pre = ([uU][rR][nN] @err(err_pre)) >mark %set_pre; + +nid = (alnum >mark (alnum | '-'){0,31}) %set_nid; + +hex = '%' (digit | lower | upper >tolower){2} $err(err_hex); + +sss = (alnum | [()+,\-.:=@;$_!*']); + +nss = (sss | hex)+ $err(err_nss); + +fail := (any - [\n\r])* @err{ fgoto main; }; + +main := (pre ':' (nid - pre %err(err_urn)) $err(err_nid) ':' nss >mark %set_nss) $err(err_parse); + +}%% + +%% write data noerror noprefix; + +// Machine is the interface representing the FSM +type Machine interface { + Error() error + Parse(input []byte) (*URN, error) +} + +type machine struct { + data []byte + cs int + p, pe, eof, pb int + err error + tolower []int +} + +// NewMachine creates a new FSM able to parse RFC 2141 strings. +func NewMachine() Machine { + m := &machine{} + + %% access m.; + %% variable p m.p; + %% variable pe m.pe; + %% variable eof m.eof; + %% variable data m.data; + + return m +} + +// Err returns the error that occurred on the last call to Parse. +// +// If the result is nil, then the line was parsed successfully. +func (m *machine) Error() error { + return m.err +} + +func (m *machine) text() []byte { + return m.data[m.pb:m.p] +} + +// Parse parses the input byte array as a RFC 2141 string. +func (m *machine) Parse(input []byte) (*URN, error) { + m.data = input + m.p = 0 + m.pb = 0 + m.pe = len(input) + m.eof = len(input) + m.err = nil + m.tolower = []int{} + output := &URN{} + + %% write init; + %% write exec; + + if m.cs < first_final || m.cs == en_fail { + return nil, m.err + } + + return output, nil +} diff --git a/backend/vendor/github.com/leodido/go-urn/makefile b/backend/vendor/github.com/leodido/go-urn/makefile new file mode 100644 index 00000000..362137ad --- /dev/null +++ b/backend/vendor/github.com/leodido/go-urn/makefile @@ -0,0 +1,17 @@ +SHELL := /bin/bash + +machine.go: machine.go.rl + ragel -Z -G2 -e -o $@ $< + @gofmt -w -s $@ + @sed -i '/^\/\/line/d' $@ + +.PHONY: build +build: machine.go + +.PHONY: bench +bench: *_test.go machine.go + go test -bench=. -benchmem -benchtime=5s ./... + +.PHONY: tests +tests: *_test.go machine.go + go test -race -timeout 10s -coverprofile=coverage.out -covermode=atomic -v ./... \ No newline at end of file diff --git a/backend/vendor/github.com/leodido/go-urn/urn.go b/backend/vendor/github.com/leodido/go-urn/urn.go new file mode 100644 index 00000000..b903b7b3 --- /dev/null +++ b/backend/vendor/github.com/leodido/go-urn/urn.go @@ -0,0 +1,63 @@ +package urn + +import ( + "strings" +) + +// URN represents an Uniform Resource Name. +// +// The general form represented is: +// +// urn:: +// +// Details at https://tools.ietf.org/html/rfc2141. +type URN struct { + prefix string // Static prefix. Equal to "urn" when empty. + ID string // Namespace identifier + SS string // Namespace specific string + norm string // Normalized namespace specific string +} + +// Normalize turns the receiving URN into its norm version. +// +// Which means: lowercase prefix, lowercase namespace identifier, and immutate namespace specific string chars (except tokens which are lowercased). +func (u *URN) Normalize() *URN { + return &URN{ + prefix: "urn", + ID: strings.ToLower(u.ID), + SS: u.norm, + } +} + +// Equal checks the lexical equivalence of the current URN with another one. +func (u *URN) Equal(x *URN) bool { + return *u.Normalize() == *x.Normalize() +} + +// String reassembles the URN into a valid URN string. +// +// This requires both ID and SS fields to be non-empty. +// Otherwise it returns an empty string. +// +// Default URN prefix is "urn". +func (u *URN) String() string { + var res string + if u.ID != "" && u.SS != "" { + if u.prefix == "" { + res += "urn" + } + res += u.prefix + ":" + u.ID + ":" + u.SS + } + + return res +} + +// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax. +func Parse(u []byte) (*URN, bool) { + urn, err := NewMachine().Parse(u) + if err != nil { + return nil, false + } + + return urn, true +} diff --git a/backend/vendor/github.com/smartystreets/assertions/.gitignore b/backend/vendor/github.com/smartystreets/assertions/.gitignore new file mode 100644 index 00000000..07d3c71c --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +Thumbs.db +*.iml +/.idea +coverage.out diff --git a/backend/vendor/github.com/smartystreets/assertions/.travis.yml b/backend/vendor/github.com/smartystreets/assertions/.travis.yml new file mode 100644 index 00000000..72df752f --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.x + +install: + - go get -t ./... + +script: go test ./... -v + +sudo: false diff --git a/backend/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md b/backend/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md new file mode 100644 index 00000000..1820ecb3 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contributing + +In general, the code posted to the [SmartyStreets github organization](https://github.com/smartystreets) is created to solve specific problems at SmartyStreets that are ancillary to our core products in the address verification industry and may or may not be useful to other organizations or developers. Our reason for posting said code isn't necessarily to solicit feedback or contributions from the community but more as a showcase of some of the approaches to solving problems we have adopted. + +Having stated that, we do consider issues raised by other githubbers as well as contributions submitted via pull requests. When submitting such a pull request, please follow these guidelines: + +- _Look before you leap:_ If the changes you plan to make are significant, it's in everyone's best interest for you to discuss them with a SmartyStreets team member prior to opening a pull request. +- _License and ownership:_ If modifying the `LICENSE.md` file, limit your changes to fixing typographical mistakes. Do NOT modify the actual terms in the license or the copyright by **SmartyStreets, LLC**. Code submitted to SmartyStreets projects becomes property of SmartyStreets and must be compatible with the associated license. +- _Testing:_ If the code you are submitting resides in packages/modules covered by automated tests, be sure to add passing tests that cover your changes and assert expected behavior and state. Submit the additional test cases as part of your change set. +- _Style:_ Match your approach to **naming** and **formatting** with the surrounding code. Basically, the code you submit shouldn't stand out. + - "Naming" refers to such constructs as variables, methods, functions, classes, structs, interfaces, packages, modules, directories, files, etc... + - "Formatting" refers to such constructs as whitespace, horizontal line length, vertical function length, vertical file length, indentation, curly braces, etc... diff --git a/backend/vendor/github.com/smartystreets/assertions/LICENSE.md b/backend/vendor/github.com/smartystreets/assertions/LICENSE.md new file mode 100644 index 00000000..8ea6f945 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/LICENSE.md @@ -0,0 +1,23 @@ +Copyright (c) 2016 SmartyStreets, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +NOTE: Various optional and subordinate components carry their own licensing +requirements and restrictions. Use of those components is subject to the terms +and conditions outlined the respective license of each component. diff --git a/backend/vendor/github.com/smartystreets/assertions/README.md b/backend/vendor/github.com/smartystreets/assertions/README.md new file mode 100644 index 00000000..208a4040 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/README.md @@ -0,0 +1,619 @@ +# assertions +-- + import "github.com/smartystreets/assertions" + +Package assertions contains the implementations for all assertions which are +referenced in goconvey's `convey` package +(github.com/smartystreets/goconvey/convey) and gunit +(github.com/smartystreets/gunit) for use with the So(...) method. They can also +be used in traditional Go test functions and even in applications. + +https://smartystreets.com + +Many of the assertions lean heavily on work done by Aaron Jacobs in his +excellent oglematchers library. (https://github.com/jacobsa/oglematchers) The +ShouldResemble assertion leans heavily on work done by Daniel Jacques in his +very helpful go-render library. (https://github.com/luci/go-render) + +## Usage + +#### func GoConveyMode + +```go +func GoConveyMode(yes bool) +``` +GoConveyMode provides control over JSON serialization of failures. When using +the assertions in this package from the convey package JSON results are very +helpful and can be rendered in a DIFF view. In that case, this function will be +called with a true value to enable the JSON serialization. By default, the +assertions in this package will not serializer a JSON result, making standalone +usage more convenient. + +#### func ShouldAlmostEqual + +```go +func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string +``` +ShouldAlmostEqual makes sure that two parameters are close enough to being +equal. The acceptable delta may be specified with a third argument, or a very +small default delta will be used. + +#### func ShouldBeBetween + +```go +func ShouldBeBetween(actual interface{}, expected ...interface{}) string +``` +ShouldBeBetween receives exactly three parameters: an actual value, a lower +bound, and an upper bound. It ensures that the actual value is between both +bounds (but not equal to either of them). + +#### func ShouldBeBetweenOrEqual + +```go +func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string +``` +ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a +lower bound, and an upper bound. It ensures that the actual value is between +both bounds or equal to one of them. + +#### func ShouldBeBlank + +```go +func ShouldBeBlank(actual interface{}, expected ...interface{}) string +``` +ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal +to "". + +#### func ShouldBeChronological + +```go +func ShouldBeChronological(actual interface{}, expected ...interface{}) string +``` +ShouldBeChronological receives a []time.Time slice and asserts that they are in +chronological order starting with the first time.Time as the earliest. + +#### func ShouldBeEmpty + +```go +func ShouldBeEmpty(actual interface{}, expected ...interface{}) string +``` +ShouldBeEmpty receives a single parameter (actual) and determines whether or not +calling len(actual) would return `0`. It obeys the rules specified by the len +function for determining length: http://golang.org/pkg/builtin/#len + +#### func ShouldBeError + +```go +func ShouldBeError(actual interface{}, expected ...interface{}) string +``` +ShouldBeError asserts that the first argument implements the error interface. It +also compares the first argument against the second argument if provided (which +must be an error message string or another error value). + +#### func ShouldBeFalse + +```go +func ShouldBeFalse(actual interface{}, expected ...interface{}) string +``` +ShouldBeFalse receives a single parameter and ensures that it is false. + +#### func ShouldBeGreaterThan + +```go +func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string +``` +ShouldBeGreaterThan receives exactly two parameters and ensures that the first +is greater than the second. + +#### func ShouldBeGreaterThanOrEqualTo + +```go +func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string +``` +ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that +the first is greater than or equal to the second. + +#### func ShouldBeIn + +```go +func ShouldBeIn(actual interface{}, expected ...interface{}) string +``` +ShouldBeIn receives at least 2 parameters. The first is a proposed member of the +collection that is passed in either as the second parameter, or of the +collection that is comprised of all the remaining parameters. This assertion +ensures that the proposed member is in the collection (using ShouldEqual). + +#### func ShouldBeLessThan + +```go +func ShouldBeLessThan(actual interface{}, expected ...interface{}) string +``` +ShouldBeLessThan receives exactly two parameters and ensures that the first is +less than the second. + +#### func ShouldBeLessThanOrEqualTo + +```go +func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string +``` +ShouldBeLessThan receives exactly two parameters and ensures that the first is +less than or equal to the second. + +#### func ShouldBeNil + +```go +func ShouldBeNil(actual interface{}, expected ...interface{}) string +``` +ShouldBeNil receives a single parameter and ensures that it is nil. + +#### func ShouldBeTrue + +```go +func ShouldBeTrue(actual interface{}, expected ...interface{}) string +``` +ShouldBeTrue receives a single parameter and ensures that it is true. + +#### func ShouldBeZeroValue + +```go +func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string +``` +ShouldBeZeroValue receives a single parameter and ensures that it is the Go +equivalent of the default value, or "zero" value. + +#### func ShouldContain + +```go +func ShouldContain(actual interface{}, expected ...interface{}) string +``` +ShouldContain receives exactly two parameters. The first is a slice and the +second is a proposed member. Membership is determined using ShouldEqual. + +#### func ShouldContainKey + +```go +func ShouldContainKey(actual interface{}, expected ...interface{}) string +``` +ShouldContainKey receives exactly two parameters. The first is a map and the +second is a proposed key. Keys are compared with a simple '=='. + +#### func ShouldContainSubstring + +```go +func ShouldContainSubstring(actual interface{}, expected ...interface{}) string +``` +ShouldContainSubstring receives exactly 2 string parameters and ensures that the +first contains the second as a substring. + +#### func ShouldEndWith + +```go +func ShouldEndWith(actual interface{}, expected ...interface{}) string +``` +ShouldEndWith receives exactly 2 string parameters and ensures that the first +ends with the second. + +#### func ShouldEqual + +```go +func ShouldEqual(actual interface{}, expected ...interface{}) string +``` +ShouldEqual receives exactly two parameters and does an equality check using the +following semantics: 1. If the expected and actual values implement an Equal +method in the form `func (this T) Equal(that T) bool` then call the method. If +true, they are equal. 2. The expected and actual values are judged equal or not +by oglematchers.Equals. + +#### func ShouldEqualJSON + +```go +func ShouldEqualJSON(actual interface{}, expected ...interface{}) string +``` +ShouldEqualJSON receives exactly two parameters and does an equality check by +marshalling to JSON + +#### func ShouldEqualTrimSpace + +```go +func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string +``` +ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the +first is equal to the second after removing all leading and trailing whitespace +using strings.TrimSpace(first). + +#### func ShouldEqualWithout + +```go +func ShouldEqualWithout(actual interface{}, expected ...interface{}) string +``` +ShouldEqualWithout receives exactly 3 string parameters and ensures that the +first is equal to the second after removing all instances of the third from the +first using strings.Replace(first, third, "", -1). + +#### func ShouldHappenAfter + +```go +func ShouldHappenAfter(actual interface{}, expected ...interface{}) string +``` +ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the +first happens after the second. + +#### func ShouldHappenBefore + +```go +func ShouldHappenBefore(actual interface{}, expected ...interface{}) string +``` +ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the +first happens before the second. + +#### func ShouldHappenBetween + +```go +func ShouldHappenBetween(actual interface{}, expected ...interface{}) string +``` +ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the +first happens between (not on) the second and third. + +#### func ShouldHappenOnOrAfter + +```go +func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string +``` +ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that +the first happens on or after the second. + +#### func ShouldHappenOnOrBefore + +```go +func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string +``` +ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that +the first happens on or before the second. + +#### func ShouldHappenOnOrBetween + +```go +func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string +``` +ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that +the first happens between or on the second and third. + +#### func ShouldHappenWithin + +```go +func ShouldHappenWithin(actual interface{}, expected ...interface{}) string +``` +ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 +arguments) and asserts that the first time.Time happens within or on the +duration specified relative to the other time.Time. + +#### func ShouldHaveLength + +```go +func ShouldHaveLength(actual interface{}, expected ...interface{}) string +``` +ShouldHaveLength receives 2 parameters. The first is a collection to check the +length of, the second being the expected length. It obeys the rules specified by +the len function for determining length: http://golang.org/pkg/builtin/#len + +#### func ShouldHaveSameTypeAs + +```go +func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string +``` +ShouldHaveSameTypeAs receives exactly two parameters and compares their +underlying types for equality. + +#### func ShouldImplement + +```go +func ShouldImplement(actual interface{}, expectedList ...interface{}) string +``` +ShouldImplement receives exactly two parameters and ensures that the first +implements the interface type of the second. + +#### func ShouldNotAlmostEqual + +```go +func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string +``` +ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual + +#### func ShouldNotBeBetween + +```go +func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeBetween receives exactly three parameters: an actual value, a lower +bound, and an upper bound. It ensures that the actual value is NOT between both +bounds. + +#### func ShouldNotBeBetweenOrEqual + +```go +func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a +lower bound, and an upper bound. It ensures that the actual value is nopt +between the bounds nor equal to either of them. + +#### func ShouldNotBeBlank + +```go +func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is +equal to "". + +#### func ShouldNotBeChronological + +```go +func ShouldNotBeChronological(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeChronological receives a []time.Time slice and asserts that they are +NOT in chronological order. + +#### func ShouldNotBeEmpty + +```go +func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeEmpty receives a single parameter (actual) and determines whether or +not calling len(actual) would return a value greater than zero. It obeys the +rules specified by the `len` function for determining length: +http://golang.org/pkg/builtin/#len + +#### func ShouldNotBeIn + +```go +func ShouldNotBeIn(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of +the collection that is passed in either as the second parameter, or of the +collection that is comprised of all the remaining parameters. This assertion +ensures that the proposed member is NOT in the collection (using ShouldEqual). + +#### func ShouldNotBeNil + +```go +func ShouldNotBeNil(actual interface{}, expected ...interface{}) string +``` +ShouldNotBeNil receives a single parameter and ensures that it is not nil. + +#### func ShouldNotBeZeroValue + +```go +func ShouldNotBeZeroValue(actual interface{}, expected ...interface{}) string +``` +ShouldBeZeroValue receives a single parameter and ensures that it is NOT the Go +equivalent of the default value, or "zero" value. + +#### func ShouldNotContain + +```go +func ShouldNotContain(actual interface{}, expected ...interface{}) string +``` +ShouldNotContain receives exactly two parameters. The first is a slice and the +second is a proposed member. Membership is determinied using ShouldEqual. + +#### func ShouldNotContainKey + +```go +func ShouldNotContainKey(actual interface{}, expected ...interface{}) string +``` +ShouldNotContainKey receives exactly two parameters. The first is a map and the +second is a proposed absent key. Keys are compared with a simple '=='. + +#### func ShouldNotContainSubstring + +```go +func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string +``` +ShouldNotContainSubstring receives exactly 2 string parameters and ensures that +the first does NOT contain the second as a substring. + +#### func ShouldNotEndWith + +```go +func ShouldNotEndWith(actual interface{}, expected ...interface{}) string +``` +ShouldEndWith receives exactly 2 string parameters and ensures that the first +does not end with the second. + +#### func ShouldNotEqual + +```go +func ShouldNotEqual(actual interface{}, expected ...interface{}) string +``` +ShouldNotEqual receives exactly two parameters and does an inequality check. See +ShouldEqual for details on how equality is determined. + +#### func ShouldNotHappenOnOrBetween + +```go +func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string +``` +ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts +that the first does NOT happen between or on the second or third. + +#### func ShouldNotHappenWithin + +```go +func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string +``` +ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 +arguments) and asserts that the first time.Time does NOT happen within or on the +duration specified relative to the other time.Time. + +#### func ShouldNotHaveSameTypeAs + +```go +func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string +``` +ShouldNotHaveSameTypeAs receives exactly two parameters and compares their +underlying types for inequality. + +#### func ShouldNotImplement + +```go +func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string +``` +ShouldNotImplement receives exactly two parameters and ensures that the first +does NOT implement the interface type of the second. + +#### func ShouldNotPanic + +```go +func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string) +``` +ShouldNotPanic receives a void, niladic function and expects to execute the +function without any panic. + +#### func ShouldNotPanicWith + +```go +func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string) +``` +ShouldNotPanicWith receives a void, niladic function and expects to recover a +panic whose content differs from the second argument. + +#### func ShouldNotPointTo + +```go +func ShouldNotPointTo(actual interface{}, expected ...interface{}) string +``` +ShouldNotPointTo receives exactly two parameters and checks to see that they +point to different addresess. + +#### func ShouldNotResemble + +```go +func ShouldNotResemble(actual interface{}, expected ...interface{}) string +``` +ShouldNotResemble receives exactly two parameters and does an inverse deep equal +check (see reflect.DeepEqual) + +#### func ShouldNotStartWith + +```go +func ShouldNotStartWith(actual interface{}, expected ...interface{}) string +``` +ShouldNotStartWith receives exactly 2 string parameters and ensures that the +first does not start with the second. + +#### func ShouldPanic + +```go +func ShouldPanic(actual interface{}, expected ...interface{}) (message string) +``` +ShouldPanic receives a void, niladic function and expects to recover a panic. + +#### func ShouldPanicWith + +```go +func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string) +``` +ShouldPanicWith receives a void, niladic function and expects to recover a panic +with the second argument as the content. + +#### func ShouldPointTo + +```go +func ShouldPointTo(actual interface{}, expected ...interface{}) string +``` +ShouldPointTo receives exactly two parameters and checks to see that they point +to the same address. + +#### func ShouldResemble + +```go +func ShouldResemble(actual interface{}, expected ...interface{}) string +``` +ShouldResemble receives exactly two parameters and does a deep equal check (see +reflect.DeepEqual) + +#### func ShouldStartWith + +```go +func ShouldStartWith(actual interface{}, expected ...interface{}) string +``` +ShouldStartWith receives exactly 2 string parameters and ensures that the first +starts with the second. + +#### func So + +```go +func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) +``` +So is a convenience function (as opposed to an inconvenience function?) for +running assertions on arbitrary arguments in any context, be it for testing or +even application logging. It allows you to perform assertion-like behavior (and +get nicely formatted messages detailing discrepancies) but without the program +blowing up or panicking. All that is required is to import this package and call +`So` with one of the assertions exported by this package as the second +parameter. The first return parameter is a boolean indicating if the assertion +was true. The second return parameter is the well-formatted message showing why +an assertion was incorrect, or blank if the assertion was correct. + +Example: + + if ok, message := So(x, ShouldBeGreaterThan, y); !ok { + log.Println(message) + } + +For an alternative implementation of So (that provides more flexible return +options) see the `So` function in the package at +github.com/smartystreets/assertions/assert. + +#### type Assertion + +```go +type Assertion struct { +} +``` + + +#### func New + +```go +func New(t testingT) *Assertion +``` +New swallows the *testing.T struct and prints failed assertions using t.Error. +Example: assertions.New(t).So(1, should.Equal, 1) + +#### func (*Assertion) Failed + +```go +func (this *Assertion) Failed() bool +``` +Failed reports whether any calls to So (on this Assertion instance) have failed. + +#### func (*Assertion) So + +```go +func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool +``` +So calls the standalone So function and additionally, calls t.Error in failure +scenarios. + +#### type FailureView + +```go +type FailureView struct { + Message string `json:"Message"` + Expected string `json:"Expected"` + Actual string `json:"Actual"` +} +``` + +This struct is also declared in +github.com/smartystreets/goconvey/convey/reporting. The json struct tags should +be equal in both declarations. + +#### type Serializer + +```go +type Serializer interface { + // contains filtered or unexported methods +} +``` diff --git a/backend/vendor/github.com/smartystreets/assertions/collections.go b/backend/vendor/github.com/smartystreets/assertions/collections.go new file mode 100644 index 00000000..b534d4ba --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/collections.go @@ -0,0 +1,244 @@ +package assertions + +import ( + "fmt" + "reflect" + + "github.com/smartystreets/assertions/internal/oglematchers" +) + +// ShouldContain receives exactly two parameters. The first is a slice and the +// second is a proposed member. Membership is determined using ShouldEqual. +func ShouldContain(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil { + typeName := reflect.TypeOf(actual) + + if fmt.Sprintf("%v", matchError) == "which is not a slice or array" { + return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName) + } + return fmt.Sprintf(shouldHaveContained, typeName, expected[0]) + } + return success +} + +// ShouldNotContain receives exactly two parameters. The first is a slice and the +// second is a proposed member. Membership is determinied using ShouldEqual. +func ShouldNotContain(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + typeName := reflect.TypeOf(actual) + + if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil { + if fmt.Sprintf("%v", matchError) == "which is not a slice or array" { + return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName) + } + return success + } + return fmt.Sprintf(shouldNotHaveContained, typeName, expected[0]) +} + +// ShouldContainKey receives exactly two parameters. The first is a map and the +// second is a proposed key. Keys are compared with a simple '=='. +func ShouldContainKey(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + keys, isMap := mapKeys(actual) + if !isMap { + return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual)) + } + + if !keyFound(keys, expected[0]) { + return fmt.Sprintf(shouldHaveContainedKey, reflect.TypeOf(actual), expected) + } + + return "" +} + +// ShouldNotContainKey receives exactly two parameters. The first is a map and the +// second is a proposed absent key. Keys are compared with a simple '=='. +func ShouldNotContainKey(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + keys, isMap := mapKeys(actual) + if !isMap { + return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual)) + } + + if keyFound(keys, expected[0]) { + return fmt.Sprintf(shouldNotHaveContainedKey, reflect.TypeOf(actual), expected) + } + + return "" +} + +func mapKeys(m interface{}) ([]reflect.Value, bool) { + value := reflect.ValueOf(m) + if value.Kind() != reflect.Map { + return nil, false + } + return value.MapKeys(), true +} +func keyFound(keys []reflect.Value, expectedKey interface{}) bool { + found := false + for _, key := range keys { + if key.Interface() == expectedKey { + found = true + } + } + return found +} + +// ShouldBeIn receives at least 2 parameters. The first is a proposed member of the collection +// that is passed in either as the second parameter, or of the collection that is comprised +// of all the remaining parameters. This assertion ensures that the proposed member is in +// the collection (using ShouldEqual). +func ShouldBeIn(actual interface{}, expected ...interface{}) string { + if fail := atLeast(1, expected); fail != success { + return fail + } + + if len(expected) == 1 { + return shouldBeIn(actual, expected[0]) + } + return shouldBeIn(actual, expected) +} +func shouldBeIn(actual interface{}, expected interface{}) string { + if matchError := oglematchers.Contains(actual).Matches(expected); matchError != nil { + return fmt.Sprintf(shouldHaveBeenIn, actual, reflect.TypeOf(expected)) + } + return success +} + +// ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of the collection +// that is passed in either as the second parameter, or of the collection that is comprised +// of all the remaining parameters. This assertion ensures that the proposed member is NOT in +// the collection (using ShouldEqual). +func ShouldNotBeIn(actual interface{}, expected ...interface{}) string { + if fail := atLeast(1, expected); fail != success { + return fail + } + + if len(expected) == 1 { + return shouldNotBeIn(actual, expected[0]) + } + return shouldNotBeIn(actual, expected) +} +func shouldNotBeIn(actual interface{}, expected interface{}) string { + if matchError := oglematchers.Contains(actual).Matches(expected); matchError == nil { + return fmt.Sprintf(shouldNotHaveBeenIn, actual, reflect.TypeOf(expected)) + } + return success +} + +// ShouldBeEmpty receives a single parameter (actual) and determines whether or not +// calling len(actual) would return `0`. It obeys the rules specified by the len +// function for determining length: http://golang.org/pkg/builtin/#len +func ShouldBeEmpty(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + + if actual == nil { + return success + } + + value := reflect.ValueOf(actual) + switch value.Kind() { + case reflect.Slice: + if value.Len() == 0 { + return success + } + case reflect.Chan: + if value.Len() == 0 { + return success + } + case reflect.Map: + if value.Len() == 0 { + return success + } + case reflect.String: + if value.Len() == 0 { + return success + } + case reflect.Ptr: + elem := value.Elem() + kind := elem.Kind() + if (kind == reflect.Slice || kind == reflect.Array) && elem.Len() == 0 { + return success + } + } + + return fmt.Sprintf(shouldHaveBeenEmpty, actual) +} + +// ShouldNotBeEmpty receives a single parameter (actual) and determines whether or not +// calling len(actual) would return a value greater than zero. It obeys the rules +// specified by the `len` function for determining length: http://golang.org/pkg/builtin/#len +func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + + if empty := ShouldBeEmpty(actual, expected...); empty != success { + return success + } + return fmt.Sprintf(shouldNotHaveBeenEmpty, actual) +} + +// ShouldHaveLength receives 2 parameters. The first is a collection to check +// the length of, the second being the expected length. It obeys the rules +// specified by the len function for determining length: +// http://golang.org/pkg/builtin/#len +func ShouldHaveLength(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + var expectedLen int64 + lenValue := reflect.ValueOf(expected[0]) + switch lenValue.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + expectedLen = lenValue.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + expectedLen = int64(lenValue.Uint()) + default: + return fmt.Sprintf(shouldHaveBeenAValidInteger, reflect.TypeOf(expected[0])) + } + + if expectedLen < 0 { + return fmt.Sprintf(shouldHaveBeenAValidLength, expected[0]) + } + + value := reflect.ValueOf(actual) + switch value.Kind() { + case reflect.Slice, + reflect.Chan, + reflect.Map, + reflect.String: + if int64(value.Len()) == expectedLen { + return success + } else { + return fmt.Sprintf(shouldHaveHadLength, expectedLen, value.Len(), actual) + } + case reflect.Ptr: + elem := value.Elem() + kind := elem.Kind() + if kind == reflect.Slice || kind == reflect.Array { + if int64(elem.Len()) == expectedLen { + return success + } else { + return fmt.Sprintf(shouldHaveHadLength, expectedLen, elem.Len(), actual) + } + } + } + return fmt.Sprintf(shouldHaveBeenAValidCollection, reflect.TypeOf(actual)) +} diff --git a/backend/vendor/github.com/smartystreets/assertions/doc.go b/backend/vendor/github.com/smartystreets/assertions/doc.go new file mode 100644 index 00000000..ba30a926 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/doc.go @@ -0,0 +1,109 @@ +// Package assertions contains the implementations for all assertions which +// are referenced in goconvey's `convey` package +// (github.com/smartystreets/goconvey/convey) and gunit (github.com/smartystreets/gunit) +// for use with the So(...) method. +// They can also be used in traditional Go test functions and even in +// applications. +// +// https://smartystreets.com +// +// Many of the assertions lean heavily on work done by Aaron Jacobs in his excellent oglematchers library. +// (https://github.com/jacobsa/oglematchers) +// The ShouldResemble assertion leans heavily on work done by Daniel Jacques in his very helpful go-render library. +// (https://github.com/luci/go-render) +package assertions + +import ( + "fmt" + "runtime" +) + +// By default we use a no-op serializer. The actual Serializer provides a JSON +// representation of failure results on selected assertions so the goconvey +// web UI can display a convenient diff. +var serializer Serializer = new(noopSerializer) + +// GoConveyMode provides control over JSON serialization of failures. When +// using the assertions in this package from the convey package JSON results +// are very helpful and can be rendered in a DIFF view. In that case, this function +// will be called with a true value to enable the JSON serialization. By default, +// the assertions in this package will not serializer a JSON result, making +// standalone usage more convenient. +func GoConveyMode(yes bool) { + if yes { + serializer = newSerializer() + } else { + serializer = new(noopSerializer) + } +} + +type testingT interface { + Error(args ...interface{}) +} + +type Assertion struct { + t testingT + failed bool +} + +// New swallows the *testing.T struct and prints failed assertions using t.Error. +// Example: assertions.New(t).So(1, should.Equal, 1) +func New(t testingT) *Assertion { + return &Assertion{t: t} +} + +// Failed reports whether any calls to So (on this Assertion instance) have failed. +func (this *Assertion) Failed() bool { + return this.failed +} + +// So calls the standalone So function and additionally, calls t.Error in failure scenarios. +func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool { + ok, result := So(actual, assert, expected...) + if !ok { + this.failed = true + _, file, line, _ := runtime.Caller(1) + this.t.Error(fmt.Sprintf("\n%s:%d\n%s", file, line, result)) + } + return ok +} + +// So is a convenience function (as opposed to an inconvenience function?) +// for running assertions on arbitrary arguments in any context, be it for testing or even +// application logging. It allows you to perform assertion-like behavior (and get nicely +// formatted messages detailing discrepancies) but without the program blowing up or panicking. +// All that is required is to import this package and call `So` with one of the assertions +// exported by this package as the second parameter. +// The first return parameter is a boolean indicating if the assertion was true. The second +// return parameter is the well-formatted message showing why an assertion was incorrect, or +// blank if the assertion was correct. +// +// Example: +// +// if ok, message := So(x, ShouldBeGreaterThan, y); !ok { +// log.Println(message) +// } +// +// For an alternative implementation of So (that provides more flexible return options) +// see the `So` function in the package at github.com/smartystreets/assertions/assert. +func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) { + if result := so(actual, assert, expected...); len(result) == 0 { + return true, result + } else { + return false, result + } +} + +// so is like So, except that it only returns the string message, which is blank if the +// assertion passed. Used to facilitate testing. +func so(actual interface{}, assert func(interface{}, ...interface{}) string, expected ...interface{}) string { + return assert(actual, expected...) +} + +// assertion is an alias for a function with a signature that the So() +// function can handle. Any future or custom assertions should conform to this +// method signature. The return value should be an empty string if the assertion +// passes and a well-formed failure message if not. +type assertion func(actual interface{}, expected ...interface{}) string + +//////////////////////////////////////////////////////////////////////////// diff --git a/backend/vendor/github.com/smartystreets/assertions/equal_method.go b/backend/vendor/github.com/smartystreets/assertions/equal_method.go new file mode 100644 index 00000000..c4fc38fa --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/equal_method.go @@ -0,0 +1,75 @@ +package assertions + +import "reflect" + +type equalityMethodSpecification struct { + a interface{} + b interface{} + + aType reflect.Type + bType reflect.Type + + equalMethod reflect.Value +} + +func newEqualityMethodSpecification(a, b interface{}) *equalityMethodSpecification { + return &equalityMethodSpecification{ + a: a, + b: b, + } +} + +func (this *equalityMethodSpecification) IsSatisfied() bool { + if !this.bothAreSameType() { + return false + } + if !this.typeHasEqualMethod() { + return false + } + if !this.equalMethodReceivesSameTypeForComparison() { + return false + } + if !this.equalMethodReturnsBool() { + return false + } + return true +} + +func (this *equalityMethodSpecification) bothAreSameType() bool { + this.aType = reflect.TypeOf(this.a) + if this.aType == nil { + return false + } + if this.aType.Kind() == reflect.Ptr { + this.aType = this.aType.Elem() + } + this.bType = reflect.TypeOf(this.b) + return this.aType == this.bType +} +func (this *equalityMethodSpecification) typeHasEqualMethod() bool { + aInstance := reflect.ValueOf(this.a) + this.equalMethod = aInstance.MethodByName("Equal") + return this.equalMethod != reflect.Value{} +} + +func (this *equalityMethodSpecification) equalMethodReceivesSameTypeForComparison() bool { + signature := this.equalMethod.Type() + return signature.NumIn() == 1 && signature.In(0) == this.aType +} + +func (this *equalityMethodSpecification) equalMethodReturnsBool() bool { + signature := this.equalMethod.Type() + return signature.NumOut() == 1 && signature.Out(0) == reflect.TypeOf(true) +} + +func (this *equalityMethodSpecification) AreEqual() bool { + a := reflect.ValueOf(this.a) + b := reflect.ValueOf(this.b) + return areEqual(a, b) && areEqual(b, a) +} +func areEqual(receiver reflect.Value, argument reflect.Value) bool { + equalMethod := receiver.MethodByName("Equal") + argumentList := []reflect.Value{argument} + result := equalMethod.Call(argumentList) + return result[0].Bool() +} diff --git a/backend/vendor/github.com/smartystreets/assertions/equality.go b/backend/vendor/github.com/smartystreets/assertions/equality.go new file mode 100644 index 00000000..37a49f4e --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/equality.go @@ -0,0 +1,331 @@ +package assertions + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "strings" + + "github.com/smartystreets/assertions/internal/go-render/render" + "github.com/smartystreets/assertions/internal/oglematchers" +) + +// ShouldEqual receives exactly two parameters and does an equality check +// using the following semantics: +// 1. If the expected and actual values implement an Equal method in the form +// `func (this T) Equal(that T) bool` then call the method. If true, they are equal. +// 2. The expected and actual values are judged equal or not by oglematchers.Equals. +func ShouldEqual(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + return shouldEqual(actual, expected[0]) +} +func shouldEqual(actual, expected interface{}) (message string) { + defer func() { + if r := recover(); r != nil { + message = serializer.serialize(expected, actual, composeEqualityMismatchMessage(expected, actual)) + } + }() + + if spec := newEqualityMethodSpecification(expected, actual); spec.IsSatisfied() && spec.AreEqual() { + return success + } else if matchError := oglematchers.Equals(expected).Matches(actual); matchError == nil { + return success + } + + return serializer.serialize(expected, actual, composeEqualityMismatchMessage(expected, actual)) +} +func composeEqualityMismatchMessage(expected, actual interface{}) string { + var ( + renderedExpected = fmt.Sprintf("%v", expected) + renderedActual = fmt.Sprintf("%v", actual) + ) + + if renderedExpected != renderedActual { + return fmt.Sprintf(shouldHaveBeenEqual+composePrettyDiff(renderedExpected, renderedActual), expected, actual) + } else if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf(shouldHaveBeenEqualTypeMismatch, expected, expected, actual, actual) + } else { + return fmt.Sprintf(shouldHaveBeenEqualNoResemblance, renderedExpected) + } +} + +// ShouldNotEqual receives exactly two parameters and does an inequality check. +// See ShouldEqual for details on how equality is determined. +func ShouldNotEqual(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } else if ShouldEqual(actual, expected[0]) == success { + return fmt.Sprintf(shouldNotHaveBeenEqual, actual, expected[0]) + } + return success +} + +// ShouldAlmostEqual makes sure that two parameters are close enough to being equal. +// The acceptable delta may be specified with a third argument, +// or a very small default delta will be used. +func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string { + actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...) + + if err != "" { + return err + } + + if math.Abs(actualFloat-expectedFloat) <= deltaFloat { + return success + } else { + return fmt.Sprintf(shouldHaveBeenAlmostEqual, actualFloat, expectedFloat) + } +} + +// ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual +func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string { + actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...) + + if err != "" { + return err + } + + if math.Abs(actualFloat-expectedFloat) > deltaFloat { + return success + } else { + return fmt.Sprintf(shouldHaveNotBeenAlmostEqual, actualFloat, expectedFloat) + } +} + +func cleanAlmostEqualInput(actual interface{}, expected ...interface{}) (float64, float64, float64, string) { + deltaFloat := 0.0000000001 + + if len(expected) == 0 { + return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided neither)" + } else if len(expected) == 2 { + delta, err := getFloat(expected[1]) + + if err != nil { + return 0.0, 0.0, 0.0, "The delta value " + err.Error() + } + + deltaFloat = delta + } else if len(expected) > 2 { + return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided more values)" + } + + actualFloat, err := getFloat(actual) + if err != nil { + return 0.0, 0.0, 0.0, "The actual value " + err.Error() + } + + expectedFloat, err := getFloat(expected[0]) + if err != nil { + return 0.0, 0.0, 0.0, "The comparison value " + err.Error() + } + + return actualFloat, expectedFloat, deltaFloat, "" +} + +// returns the float value of any real number, or error if it is not a numerical type +func getFloat(num interface{}) (float64, error) { + numValue := reflect.ValueOf(num) + numKind := numValue.Kind() + + if numKind == reflect.Int || + numKind == reflect.Int8 || + numKind == reflect.Int16 || + numKind == reflect.Int32 || + numKind == reflect.Int64 { + return float64(numValue.Int()), nil + } else if numKind == reflect.Uint || + numKind == reflect.Uint8 || + numKind == reflect.Uint16 || + numKind == reflect.Uint32 || + numKind == reflect.Uint64 { + return float64(numValue.Uint()), nil + } else if numKind == reflect.Float32 || + numKind == reflect.Float64 { + return numValue.Float(), nil + } else { + return 0.0, errors.New("must be a numerical type, but was: " + numKind.String()) + } +} + +// ShouldEqualJSON receives exactly two parameters and does an equality check by marshalling to JSON +func ShouldEqualJSON(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + + expectedString, expectedErr := remarshal(expected[0].(string)) + if expectedErr != nil { + return "Expected value not valid JSON: " + expectedErr.Error() + } + + actualString, actualErr := remarshal(actual.(string)) + if actualErr != nil { + return "Actual value not valid JSON: " + actualErr.Error() + } + + return ShouldEqual(actualString, expectedString) +} +func remarshal(value string) (string, error) { + var structured interface{} + err := json.Unmarshal([]byte(value), &structured) + if err != nil { + return "", err + } + canonical, _ := json.Marshal(structured) + return string(canonical), nil +} + +// ShouldResemble receives exactly two parameters and does a deep equal check (see reflect.DeepEqual) +func ShouldResemble(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + + if matchError := oglematchers.DeepEquals(expected[0]).Matches(actual); matchError != nil { + renderedExpected, renderedActual := render.Render(expected[0]), render.Render(actual) + message := fmt.Sprintf(shouldHaveResembled, renderedExpected, renderedActual) + + composePrettyDiff(renderedExpected, renderedActual) + return serializer.serializeDetailed(expected[0], actual, message) + } + + return success +} + +// ShouldNotResemble receives exactly two parameters and does an inverse deep equal check (see reflect.DeepEqual) +func ShouldNotResemble(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } else if ShouldResemble(actual, expected[0]) == success { + return fmt.Sprintf(shouldNotHaveResembled, render.Render(actual), render.Render(expected[0])) + } + return success +} + +// ShouldPointTo receives exactly two parameters and checks to see that they point to the same address. +func ShouldPointTo(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + return shouldPointTo(actual, expected[0]) + +} +func shouldPointTo(actual, expected interface{}) string { + actualValue := reflect.ValueOf(actual) + expectedValue := reflect.ValueOf(expected) + + if ShouldNotBeNil(actual) != success { + return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "nil") + } else if ShouldNotBeNil(expected) != success { + return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "nil") + } else if actualValue.Kind() != reflect.Ptr { + return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "not") + } else if expectedValue.Kind() != reflect.Ptr { + return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "not") + } else if ShouldEqual(actualValue.Pointer(), expectedValue.Pointer()) != success { + actualAddress := reflect.ValueOf(actual).Pointer() + expectedAddress := reflect.ValueOf(expected).Pointer() + return serializer.serialize(expectedAddress, actualAddress, fmt.Sprintf(shouldHavePointedTo, + actual, actualAddress, + expected, expectedAddress)) + } + return success +} + +// ShouldNotPointTo receives exactly two parameters and checks to see that they point to different addresess. +func ShouldNotPointTo(actual interface{}, expected ...interface{}) string { + if message := need(1, expected); message != success { + return message + } + compare := ShouldPointTo(actual, expected[0]) + if strings.HasPrefix(compare, shouldBePointers) { + return compare + } else if compare == success { + return fmt.Sprintf(shouldNotHavePointedTo, actual, expected[0], reflect.ValueOf(actual).Pointer()) + } + return success +} + +// ShouldBeNil receives a single parameter and ensures that it is nil. +func ShouldBeNil(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } else if actual == nil { + return success + } else if interfaceHasNilValue(actual) { + return success + } + return fmt.Sprintf(shouldHaveBeenNil, actual) +} +func interfaceHasNilValue(actual interface{}) bool { + value := reflect.ValueOf(actual) + kind := value.Kind() + nilable := kind == reflect.Slice || + kind == reflect.Chan || + kind == reflect.Func || + kind == reflect.Ptr || + kind == reflect.Map + + // Careful: reflect.Value.IsNil() will panic unless it's an interface, chan, map, func, slice, or ptr + // Reference: http://golang.org/pkg/reflect/#Value.IsNil + return nilable && value.IsNil() +} + +// ShouldNotBeNil receives a single parameter and ensures that it is not nil. +func ShouldNotBeNil(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } else if ShouldBeNil(actual) == success { + return fmt.Sprintf(shouldNotHaveBeenNil, actual) + } + return success +} + +// ShouldBeTrue receives a single parameter and ensures that it is true. +func ShouldBeTrue(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } else if actual != true { + return fmt.Sprintf(shouldHaveBeenTrue, actual) + } + return success +} + +// ShouldBeFalse receives a single parameter and ensures that it is false. +func ShouldBeFalse(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } else if actual != false { + return fmt.Sprintf(shouldHaveBeenFalse, actual) + } + return success +} + +// ShouldBeZeroValue receives a single parameter and ensures that it is +// the Go equivalent of the default value, or "zero" value. +func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + zeroVal := reflect.Zero(reflect.TypeOf(actual)).Interface() + if !reflect.DeepEqual(zeroVal, actual) { + return serializer.serialize(zeroVal, actual, fmt.Sprintf(shouldHaveBeenZeroValue, actual)) + } + return success +} + +// ShouldBeZeroValue receives a single parameter and ensures that it is NOT +// the Go equivalent of the default value, or "zero" value. +func ShouldNotBeZeroValue(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + zeroVal := reflect.Zero(reflect.TypeOf(actual)).Interface() + if reflect.DeepEqual(zeroVal, actual) { + return serializer.serialize(zeroVal, actual, fmt.Sprintf(shouldNotHaveBeenZeroValue, actual)) + } + return success +} diff --git a/backend/vendor/github.com/smartystreets/assertions/equality_diff.go b/backend/vendor/github.com/smartystreets/assertions/equality_diff.go new file mode 100644 index 00000000..bd698ff6 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/equality_diff.go @@ -0,0 +1,37 @@ +package assertions + +import ( + "fmt" + + "github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch" +) + +func composePrettyDiff(expected, actual string) string { + diff := diffmatchpatch.New() + diffs := diff.DiffMain(expected, actual, false) + if prettyDiffIsLikelyToBeHelpful(diffs) { + return fmt.Sprintf("\nDiff: '%s'", diff.DiffPrettyText(diffs)) + } + return "" +} + +// prettyDiffIsLikelyToBeHelpful returns true if the diff listing contains +// more 'equal' segments than 'deleted'/'inserted' segments. +func prettyDiffIsLikelyToBeHelpful(diffs []diffmatchpatch.Diff) bool { + equal, deleted, inserted := measureDiffTypeLengths(diffs) + return equal > deleted && equal > inserted +} + +func measureDiffTypeLengths(diffs []diffmatchpatch.Diff) (equal, deleted, inserted int) { + for _, segment := range diffs { + switch segment.Type { + case diffmatchpatch.DiffEqual: + equal += len(segment.Text) + case diffmatchpatch.DiffDelete: + deleted += len(segment.Text) + case diffmatchpatch.DiffInsert: + inserted += len(segment.Text) + } + } + return equal, deleted, inserted +} diff --git a/backend/vendor/github.com/smartystreets/assertions/filter.go b/backend/vendor/github.com/smartystreets/assertions/filter.go new file mode 100644 index 00000000..cbf75667 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/filter.go @@ -0,0 +1,31 @@ +package assertions + +import "fmt" + +const ( + success = "" + needExactValues = "This assertion requires exactly %d comparison values (you provided %d)." + needNonEmptyCollection = "This assertion requires at least 1 comparison value (you provided 0)." + needFewerValues = "This assertion allows %d or fewer comparison values (you provided %d)." +) + +func need(needed int, expected []interface{}) string { + if len(expected) != needed { + return fmt.Sprintf(needExactValues, needed, len(expected)) + } + return success +} + +func atLeast(minimum int, expected []interface{}) string { + if len(expected) < minimum { + return needNonEmptyCollection + } + return success +} + +func atMost(max int, expected []interface{}) string { + if len(expected) > max { + return fmt.Sprintf(needFewerValues, max, len(expected)) + } + return success +} diff --git a/backend/vendor/github.com/smartystreets/assertions/go.mod b/backend/vendor/github.com/smartystreets/assertions/go.mod new file mode 100644 index 00000000..c0daaa3d --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/go.mod @@ -0,0 +1,3 @@ +module github.com/smartystreets/assertions + +go 1.12 diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/AUTHORS b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/AUTHORS new file mode 100644 index 00000000..2d7bb2bf --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/AUTHORS @@ -0,0 +1,25 @@ +# This is the official list of go-diff authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/CONTRIBUTORS b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/CONTRIBUTORS new file mode 100644 index 00000000..369e3d55 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-diff +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, ACME Inc. employees would be listed here +# but not in AUTHORS, because ACME Inc. would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/LICENSE b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/LICENSE new file mode 100644 index 00000000..937942c2 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diff.go b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diff.go new file mode 100644 index 00000000..cb25b437 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1345 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +//go:generate stringer -type=Operation -trimprefix=Diff + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +// splice removes amount elements from slice at index index, replacing them with elements. +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + if len(elements) == amount { + // Easy case: overwrite the relevant items. + copy(slice[index:], elements) + return slice + } + if len(elements) < amount { + // Fewer new items than old. + // Copy in the new items. + copy(slice[index:], elements) + // Shift the remaining items left. + copy(slice[index+len(elements):], slice[index+amount:]) + // Calculate the new end of the slice. + end := len(slice) - amount + len(elements) + // Zero stranded elements at end so that they can be garbage collected. + tail := slice[end:] + for i := range tail { + tail[i] = Diff{} + } + return slice[:end] + } + // More new items than old. + // Make room in slice for new elements. + // There's probably an even more efficient way to do this, + // but this is simple and clear. + need := len(slice) - amount + len(elements) + for len(slice) < need { + slice = append(slice, Diff{}) + } + // Shift slice elements right to make room for new elements. + copy(slice[index+len(elements):], slice[index+amount:]) + // Copy in new elements. + copy(slice[index:], elements) + return slice +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + Diff{DiffDelete, string(text1)}, + Diff{DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + diffs := diffsA + diffs = append(diffs, Diff{DiffEqual, string(midCommon)}) + diffs = append(diffs, diffsB...) + return diffs + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && d%16 == 0 && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + Diff{DiffDelete, string(runes1)}, + Diff{DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) + return string(chars1), string(chars2), lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + + chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) + chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) + + return chars1, chars2, lineArray +} + +func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { + return dmp.DiffLinesToRunes(string(text1), string(text2)) +} + +// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. +// We use strings instead of []runes as input mainly because you can't use []rune as a map key. +func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + runes := []rune{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + runes = append(runes, rune(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + runes = append(runes, rune(len(*lineArray)-1)) + } + } + + return runes +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := aDiff.Text + text := make([]string, len(chars)) + + for i, r := range chars { + text[i] = lineArray[r] + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + // Linear search. See comment in commonSuffixLength. + n := 0 + for ; n < len(text1) && n < len(text2); n++ { + if text1[n] != text2[n] { + return n + } + } + return n +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + // Use linear search rather than the binary search discussed at https://neil.fraser.name/news/2007/10/09/. + // See discussion at https://github.com/sergi/go-diff/issues/54. + i1 := len(text1) + i2 := len(text2) + for n := 0; ; n++ { + i1-- + i2-- + if i1 < 0 || i2 < 0 || text1[i1] != text2[i2] { + return n + } + } +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + equalities := make([]int, 0, len(diffs)) + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + equalities = append(equalities, pointer) + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += len(diffs[pointer].Text) + } else { + lengthDeletions2 += len(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if len(lastequality) > 0 && + (len(lastequality) <= difference1) && + (len(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities[len(equalities)-1] + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities[:len(equalities)-1] + + if len(equalities) > 0 { + equalities = equalities[:len(equalities)-1] + } + pointer = -1 + if len(equalities) > 0 { + pointer = equalities[len(equalities)-1] + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(len(deletion))/2 || + float64(overlapLength1) >= float64(len(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = splice(diffs, pointer, 0, Diff{DiffEqual, insertion[:overlapLength1]}) + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(len(deletion))/2 || + float64(overlapLength2) >= float64(len(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = splice(diffs, pointer, 0, overlap) + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // ABXYCD + // AXCD + // ABXC + // AXCD + // ABXC + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = splice(diffs, insPoint, 0, Diff{DiffDelete, lastequality}) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffDelete: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffEqual: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += utf8.RuneCountInString(aDiff.Text) + case DiffDelete: + deletions += utf8.RuneCountInString(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + runes := []rune(text1) + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + i += int(n) + // Break out if we are out of bounds, go1.6 can't handle this very well + if i > len(runes) { + break + } + // Remember that string slicing is by byte - we want by rune here. + text := string(runes[i-int(n) : i]) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len(runes) { + return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) + } + + return diffs, nil +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diffmatchpatch.go b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 00000000..d3acc32c --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/match.go b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/match.go new file mode 100644 index 00000000..17374e10 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)< y { + return x + } + return y +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/operation_string.go b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/operation_string.go new file mode 100644 index 00000000..533ec0da --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/operation_string.go @@ -0,0 +1,17 @@ +// Code generated by "stringer -type=Operation -trimprefix=Diff"; DO NOT EDIT. + +package diffmatchpatch + +import "fmt" + +const _Operation_name = "DeleteEqualInsert" + +var _Operation_index = [...]uint8{0, 6, 11, 17} + +func (i Operation) String() string { + i -= -1 + if i < 0 || i >= Operation(len(_Operation_index)-1) { + return fmt.Sprintf("Operation(%d)", i+-1) + } + return _Operation_name[_Operation_index[i]:_Operation_index[i+1]] +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/patch.go b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/patch.go new file mode 100644 index 00000000..223c43c4 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + Start1 int + Start2 int + Length1 int + Length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indices are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.Length1 == 0 { + coords1 = strconv.Itoa(p.Start1) + ",0" + } else if p.Length1 == 1 { + coords1 = strconv.Itoa(p.Start1 + 1) + } else { + coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1) + } + + if p.Length2 == 0 { + coords2 = strconv.Itoa(p.Start2) + ",0" + } else if p.Length2 == 1 { + coords2 = strconv.Itoa(p.Start2 + 1) + } else { + coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.Start2 : patch.Start2+patch.Length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.Start2-padding) + minEnd := min(len(text), patch.Start2+patch.Length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.Start2-padding):patch.Start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.Start1 -= len(prefix) + patch.Start2 -= len(prefix) + // Extend the lengths. + patch.Length1 += len(prefix) + len(suffix) + patch.Length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.Start1 = charCount1 + patch.Start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.Length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.Length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.Length1 += len(aDiff.Text) + patch.Length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.Start1 = aPatch.Start1 + patchCopy.Start2 = aPatch.Start2 + patchCopy.Length1 = aPatch.Length1 + patchCopy.Length2 = aPatch.Length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.Start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.Length2 - aPatch.Length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(x) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].Start1 += paddingLength + patches[i].Start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].Start1 -= paddingLength // Should be 0. + patches[0].Start2 -= paddingLength // Should be 0. + patches[0].Length1 += paddingLength + patches[0].Length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].Start1 -= extraLength + patches[0].Start2 -= extraLength + patches[0].Length1 += extraLength + patches[0].Length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].Length1 += paddingLength + patches[last].Length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].Length1 += extraLength + patches[last].Length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].Length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + Start1 := bigpatch.Start1 + Start2 := bigpatch.Start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.Start1 = Start1 - len(precontext) + patch.Start2 = Start2 - len(precontext) + if len(precontext) != 0 { + patch.Length1 = len(precontext) + patch.Length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.Length2 += len(diffText) + Start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.Length1 += len(diffText) + Start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)] + + patch.Length1 += len(diffText) + Start1 += len(diffText) + if diffType == DiffEqual { + patch.Length2 += len(diffText) + Start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.Length1 += len(postcontext) + patch.Length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.Start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.Start1-- + patch.Length1 = 1 + } else if m[2] == "0" { + patch.Length1 = 0 + } else { + patch.Start1-- + patch.Length1, _ = strconv.Atoi(m[2]) + } + + patch.Start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.Start2-- + patch.Length2 = 1 + } else if m[4] == "0" { + patch.Length2 = 0 + } else { + patch.Start2-- + patch.Length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/stringutil.go b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 00000000..265f29cc --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,88 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE b/backend/vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE new file mode 100644 index 00000000..6280ff0e --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-render/LICENSE @@ -0,0 +1,27 @@ +// Copyright (c) 2015 The Chromium Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go b/backend/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go new file mode 100644 index 00000000..313611ef --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-render/render/render.go @@ -0,0 +1,481 @@ +// Copyright 2015 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package render + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" +) + +var builtinTypeMap = map[reflect.Kind]string{ + reflect.Bool: "bool", + reflect.Complex128: "complex128", + reflect.Complex64: "complex64", + reflect.Float32: "float32", + reflect.Float64: "float64", + reflect.Int16: "int16", + reflect.Int32: "int32", + reflect.Int64: "int64", + reflect.Int8: "int8", + reflect.Int: "int", + reflect.String: "string", + reflect.Uint16: "uint16", + reflect.Uint32: "uint32", + reflect.Uint64: "uint64", + reflect.Uint8: "uint8", + reflect.Uint: "uint", + reflect.Uintptr: "uintptr", +} + +var builtinTypeSet = map[string]struct{}{} + +func init() { + for _, v := range builtinTypeMap { + builtinTypeSet[v] = struct{}{} + } +} + +var typeOfString = reflect.TypeOf("") +var typeOfInt = reflect.TypeOf(int(1)) +var typeOfUint = reflect.TypeOf(uint(1)) +var typeOfFloat = reflect.TypeOf(10.1) + +// Render converts a structure to a string representation. Unline the "%#v" +// format string, this resolves pointer types' contents in structs, maps, and +// slices/arrays and prints their field values. +func Render(v interface{}) string { + buf := bytes.Buffer{} + s := (*traverseState)(nil) + s.render(&buf, 0, reflect.ValueOf(v), false) + return buf.String() +} + +// renderPointer is called to render a pointer value. +// +// This is overridable so that the test suite can have deterministic pointer +// values in its expectations. +var renderPointer = func(buf *bytes.Buffer, p uintptr) { + fmt.Fprintf(buf, "0x%016x", p) +} + +// traverseState is used to note and avoid recursion as struct members are being +// traversed. +// +// traverseState is allowed to be nil. Specifically, the root state is nil. +type traverseState struct { + parent *traverseState + ptr uintptr +} + +func (s *traverseState) forkFor(ptr uintptr) *traverseState { + for cur := s; cur != nil; cur = cur.parent { + if ptr == cur.ptr { + return nil + } + } + + fs := &traverseState{ + parent: s, + ptr: ptr, + } + return fs +} + +func (s *traverseState) render(buf *bytes.Buffer, ptrs int, v reflect.Value, implicit bool) { + if v.Kind() == reflect.Invalid { + buf.WriteString("nil") + return + } + vt := v.Type() + + // If the type being rendered is a potentially recursive type (a type that + // can contain itself as a member), we need to avoid recursion. + // + // If we've already seen this type before, mark that this is the case and + // write a recursion placeholder instead of actually rendering it. + // + // If we haven't seen it before, fork our `seen` tracking so any higher-up + // renderers will also render it at least once, then mark that we've seen it + // to avoid recursing on lower layers. + pe := uintptr(0) + vk := vt.Kind() + switch vk { + case reflect.Ptr: + // Since structs and arrays aren't pointers, they can't directly be + // recursed, but they can contain pointers to themselves. Record their + // pointer to avoid this. + switch v.Elem().Kind() { + case reflect.Struct, reflect.Array: + pe = v.Pointer() + } + + case reflect.Slice, reflect.Map: + pe = v.Pointer() + } + if pe != 0 { + s = s.forkFor(pe) + if s == nil { + buf.WriteString("") + return + } + } + + isAnon := func(t reflect.Type) bool { + if t.Name() != "" { + if _, ok := builtinTypeSet[t.Name()]; !ok { + return false + } + } + return t.Kind() != reflect.Interface + } + + switch vk { + case reflect.Struct: + if !implicit { + writeType(buf, ptrs, vt) + } + buf.WriteRune('{') + if rendered, ok := renderTime(v); ok { + buf.WriteString(rendered) + } else { + structAnon := vt.Name() == "" + for i := 0; i < vt.NumField(); i++ { + if i > 0 { + buf.WriteString(", ") + } + anon := structAnon && isAnon(vt.Field(i).Type) + + if !anon { + buf.WriteString(vt.Field(i).Name) + buf.WriteRune(':') + } + + s.render(buf, 0, v.Field(i), anon) + } + } + buf.WriteRune('}') + + case reflect.Slice: + if v.IsNil() { + if !implicit { + writeType(buf, ptrs, vt) + buf.WriteString("(nil)") + } else { + buf.WriteString("nil") + } + return + } + fallthrough + + case reflect.Array: + if !implicit { + writeType(buf, ptrs, vt) + } + anon := vt.Name() == "" && isAnon(vt.Elem()) + buf.WriteString("{") + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + s.render(buf, 0, v.Index(i), anon) + } + buf.WriteRune('}') + + case reflect.Map: + if !implicit { + writeType(buf, ptrs, vt) + } + if v.IsNil() { + buf.WriteString("(nil)") + } else { + buf.WriteString("{") + + mkeys := v.MapKeys() + tryAndSortMapKeys(vt, mkeys) + + kt := vt.Key() + keyAnon := typeOfString.ConvertibleTo(kt) || typeOfInt.ConvertibleTo(kt) || typeOfUint.ConvertibleTo(kt) || typeOfFloat.ConvertibleTo(kt) + valAnon := vt.Name() == "" && isAnon(vt.Elem()) + for i, mk := range mkeys { + if i > 0 { + buf.WriteString(", ") + } + + s.render(buf, 0, mk, keyAnon) + buf.WriteString(":") + s.render(buf, 0, v.MapIndex(mk), valAnon) + } + buf.WriteRune('}') + } + + case reflect.Ptr: + ptrs++ + fallthrough + case reflect.Interface: + if v.IsNil() { + writeType(buf, ptrs, v.Type()) + buf.WriteString("(nil)") + } else { + s.render(buf, ptrs, v.Elem(), false) + } + + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + writeType(buf, ptrs, vt) + buf.WriteRune('(') + renderPointer(buf, v.Pointer()) + buf.WriteRune(')') + + default: + tstr := vt.String() + implicit = implicit || (ptrs == 0 && builtinTypeMap[vk] == tstr) + if !implicit { + writeType(buf, ptrs, vt) + buf.WriteRune('(') + } + + switch vk { + case reflect.String: + fmt.Fprintf(buf, "%q", v.String()) + case reflect.Bool: + fmt.Fprintf(buf, "%v", v.Bool()) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fmt.Fprintf(buf, "%d", v.Int()) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + fmt.Fprintf(buf, "%d", v.Uint()) + + case reflect.Float32, reflect.Float64: + fmt.Fprintf(buf, "%g", v.Float()) + + case reflect.Complex64, reflect.Complex128: + fmt.Fprintf(buf, "%g", v.Complex()) + } + + if !implicit { + buf.WriteRune(')') + } + } +} + +func writeType(buf *bytes.Buffer, ptrs int, t reflect.Type) { + parens := ptrs > 0 + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + parens = true + } + + if parens { + buf.WriteRune('(') + for i := 0; i < ptrs; i++ { + buf.WriteRune('*') + } + } + + switch t.Kind() { + case reflect.Ptr: + if ptrs == 0 { + // This pointer was referenced from within writeType (e.g., as part of + // rendering a list), and so hasn't had its pointer asterisk accounted + // for. + buf.WriteRune('*') + } + writeType(buf, 0, t.Elem()) + + case reflect.Interface: + if n := t.Name(); n != "" { + buf.WriteString(t.String()) + } else { + buf.WriteString("interface{}") + } + + case reflect.Array: + buf.WriteRune('[') + buf.WriteString(strconv.FormatInt(int64(t.Len()), 10)) + buf.WriteRune(']') + writeType(buf, 0, t.Elem()) + + case reflect.Slice: + if t == reflect.SliceOf(t.Elem()) { + buf.WriteString("[]") + writeType(buf, 0, t.Elem()) + } else { + // Custom slice type, use type name. + buf.WriteString(t.String()) + } + + case reflect.Map: + if t == reflect.MapOf(t.Key(), t.Elem()) { + buf.WriteString("map[") + writeType(buf, 0, t.Key()) + buf.WriteRune(']') + writeType(buf, 0, t.Elem()) + } else { + // Custom map type, use type name. + buf.WriteString(t.String()) + } + + default: + buf.WriteString(t.String()) + } + + if parens { + buf.WriteRune(')') + } +} + +type cmpFn func(a, b reflect.Value) int + +type sortableValueSlice struct { + cmp cmpFn + elements []reflect.Value +} + +func (s sortableValueSlice) Len() int { + return len(s.elements) +} + +func (s sortableValueSlice) Less(i, j int) bool { + return s.cmp(s.elements[i], s.elements[j]) < 0 +} + +func (s sortableValueSlice) Swap(i, j int) { + s.elements[i], s.elements[j] = s.elements[j], s.elements[i] +} + +// cmpForType returns a cmpFn which sorts the data for some type t in the same +// order that a go-native map key is compared for equality. +func cmpForType(t reflect.Type) cmpFn { + switch t.Kind() { + case reflect.String: + return func(av, bv reflect.Value) int { + a, b := av.String(), bv.String() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Bool: + return func(av, bv reflect.Value) int { + a, b := av.Bool(), bv.Bool() + if !a && b { + return -1 + } else if a && !b { + return 1 + } + return 0 + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return func(av, bv reflect.Value) int { + a, b := av.Int(), bv.Int() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, reflect.Uintptr, reflect.UnsafePointer: + return func(av, bv reflect.Value) int { + a, b := av.Uint(), bv.Uint() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Float32, reflect.Float64: + return func(av, bv reflect.Value) int { + a, b := av.Float(), bv.Float() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Interface: + return func(av, bv reflect.Value) int { + a, b := av.InterfaceData(), bv.InterfaceData() + if a[0] < b[0] { + return -1 + } else if a[0] > b[0] { + return 1 + } + if a[1] < b[1] { + return -1 + } else if a[1] > b[1] { + return 1 + } + return 0 + } + + case reflect.Complex64, reflect.Complex128: + return func(av, bv reflect.Value) int { + a, b := av.Complex(), bv.Complex() + if real(a) < real(b) { + return -1 + } else if real(a) > real(b) { + return 1 + } + if imag(a) < imag(b) { + return -1 + } else if imag(a) > imag(b) { + return 1 + } + return 0 + } + + case reflect.Ptr, reflect.Chan: + return func(av, bv reflect.Value) int { + a, b := av.Pointer(), bv.Pointer() + if a < b { + return -1 + } else if a > b { + return 1 + } + return 0 + } + + case reflect.Struct: + cmpLst := make([]cmpFn, t.NumField()) + for i := range cmpLst { + cmpLst[i] = cmpForType(t.Field(i).Type) + } + return func(a, b reflect.Value) int { + for i, cmp := range cmpLst { + if rslt := cmp(a.Field(i), b.Field(i)); rslt != 0 { + return rslt + } + } + return 0 + } + } + + return nil +} + +func tryAndSortMapKeys(mt reflect.Type, k []reflect.Value) { + if cmp := cmpForType(mt.Key()); cmp != nil { + sort.Sort(sortableValueSlice{cmp, k}) + } +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_time.go b/backend/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_time.go new file mode 100644 index 00000000..990c75d0 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_time.go @@ -0,0 +1,26 @@ +package render + +import ( + "reflect" + "time" +) + +func renderTime(value reflect.Value) (string, bool) { + if instant, ok := convertTime(value); !ok { + return "", false + } else if instant.IsZero() { + return "0", true + } else { + return instant.String(), true + } +} + +func convertTime(value reflect.Value) (t time.Time, ok bool) { + if value.Type() == timeType { + defer func() { recover() }() + t, ok = value.Interface().(time.Time) + } + return +} + +var timeType = reflect.TypeOf(time.Time{}) diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/.gitignore b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/.gitignore new file mode 100644 index 00000000..dd8fc746 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/.gitignore @@ -0,0 +1,5 @@ +*.6 +6.out +_obj/ +_test/ +_testmain.go diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml new file mode 100644 index 00000000..b9721192 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml @@ -0,0 +1,4 @@ +# Cf. http://docs.travis-ci.com/user/getting-started/ +# Cf. http://docs.travis-ci.com/user/languages/go/ + +language: go diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md new file mode 100644 index 00000000..215a2bb7 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/README.md @@ -0,0 +1,58 @@ +[![GoDoc](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers?status.svg)](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers) + +`oglematchers` is a package for the Go programming language containing a set of +matchers, useful in a testing or mocking framework, inspired by and mostly +compatible with [Google Test][googletest] for C++ and +[Google JS Test][google-js-test]. The package is used by the +[ogletest][ogletest] testing framework and [oglemock][oglemock] mocking +framework, which may be more directly useful to you, but can be generically used +elsewhere as well. + +A "matcher" is simply an object with a `Matches` method defining a set of golang +values matched by the matcher, and a `Description` method describing that set. +For example, here are some matchers: + +```go +// Numbers +Equals(17.13) +LessThan(19) + +// Strings +Equals("taco") +HasSubstr("burrito") +MatchesRegex("t.*o") + +// Combining matchers +AnyOf(LessThan(17), GreaterThan(19)) +``` + +There are lots more; see [here][reference] for a reference. You can also add +your own simply by implementing the `oglematchers.Matcher` interface. + + +Installation +------------ + +First, make sure you have installed Go 1.0.2 or newer. See +[here][golang-install] for instructions. + +Use the following command to install `oglematchers` and keep it up to date: + + go get -u github.com/smartystreets/assertions/internal/oglematchers + + +Documentation +------------- + +See [here][reference] for documentation. Alternatively, you can install the +package and then use `godoc`: + + godoc github.com/smartystreets/assertions/internal/oglematchers + + +[reference]: http://godoc.org/github.com/smartystreets/assertions/internal/oglematchers +[golang-install]: http://golang.org/doc/install.html +[googletest]: http://code.google.com/p/googletest/ +[google-js-test]: http://code.google.com/p/google-js-test/ +[ogletest]: http://github.com/smartystreets/assertions/internal/ogletest +[oglemock]: http://github.com/smartystreets/assertions/internal/oglemock diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go new file mode 100644 index 00000000..2918b51f --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of.go @@ -0,0 +1,94 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +// AnyOf accepts a set of values S and returns a matcher that follows the +// algorithm below when considering a candidate c: +// +// 1. If there exists a value m in S such that m implements the Matcher +// interface and m matches c, return true. +// +// 2. Otherwise, if there exists a value v in S such that v does not implement +// the Matcher interface and the matcher Equals(v) matches c, return true. +// +// 3. Otherwise, if there is a value m in S such that m implements the Matcher +// interface and m returns a fatal error for c, return that fatal error. +// +// 4. Otherwise, return false. +// +// This is akin to a logical OR operation for matchers, with non-matchers x +// being treated as Equals(x). +func AnyOf(vals ...interface{}) Matcher { + // Get ahold of a type variable for the Matcher interface. + var dummy *Matcher + matcherType := reflect.TypeOf(dummy).Elem() + + // Create a matcher for each value, or use the value itself if it's already a + // matcher. + wrapped := make([]Matcher, len(vals)) + for i, v := range vals { + t := reflect.TypeOf(v) + if t != nil && t.Implements(matcherType) { + wrapped[i] = v.(Matcher) + } else { + wrapped[i] = Equals(v) + } + } + + return &anyOfMatcher{wrapped} +} + +type anyOfMatcher struct { + wrapped []Matcher +} + +func (m *anyOfMatcher) Description() string { + wrappedDescs := make([]string, len(m.wrapped)) + for i, matcher := range m.wrapped { + wrappedDescs[i] = matcher.Description() + } + + return fmt.Sprintf("or(%s)", strings.Join(wrappedDescs, ", ")) +} + +func (m *anyOfMatcher) Matches(c interface{}) (err error) { + err = errors.New("") + + // Try each matcher in turn. + for _, matcher := range m.wrapped { + wrappedErr := matcher.Matches(c) + + // Return immediately if there's a match. + if wrappedErr == nil { + err = nil + return + } + + // Note the fatal error, if any. + if _, isFatal := wrappedErr.(*FatalError); isFatal { + err = wrappedErr + } + } + + return +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go new file mode 100644 index 00000000..87f107d3 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains.go @@ -0,0 +1,61 @@ +// Copyright 2012 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "fmt" + "reflect" +) + +// Return a matcher that matches arrays slices with at least one element that +// matches the supplied argument. If the argument x is not itself a Matcher, +// this is equivalent to Contains(Equals(x)). +func Contains(x interface{}) Matcher { + var result containsMatcher + var ok bool + + if result.elementMatcher, ok = x.(Matcher); !ok { + result.elementMatcher = DeepEquals(x) + } + + return &result +} + +type containsMatcher struct { + elementMatcher Matcher +} + +func (m *containsMatcher) Description() string { + return fmt.Sprintf("contains: %s", m.elementMatcher.Description()) +} + +func (m *containsMatcher) Matches(candidate interface{}) error { + // The candidate must be a slice or an array. + v := reflect.ValueOf(candidate) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return NewFatalError("which is not a slice or array") + } + + // Check each element. + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + if matchErr := m.elementMatcher.Matches(elem.Interface()); matchErr == nil { + return nil + } + } + + return fmt.Errorf("") +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go new file mode 100644 index 00000000..1d91baef --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go @@ -0,0 +1,88 @@ +// Copyright 2012 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "bytes" + "errors" + "fmt" + "reflect" +) + +var byteSliceType reflect.Type = reflect.TypeOf([]byte{}) + +// DeepEquals returns a matcher that matches based on 'deep equality', as +// defined by the reflect package. This matcher requires that values have +// identical types to x. +func DeepEquals(x interface{}) Matcher { + return &deepEqualsMatcher{x} +} + +type deepEqualsMatcher struct { + x interface{} +} + +func (m *deepEqualsMatcher) Description() string { + xDesc := fmt.Sprintf("%v", m.x) + xValue := reflect.ValueOf(m.x) + + // Special case: fmt.Sprintf presents nil slices as "[]", but + // reflect.DeepEqual makes a distinction between nil and empty slices. Make + // this less confusing. + if xValue.Kind() == reflect.Slice && xValue.IsNil() { + xDesc = "" + } + + return fmt.Sprintf("deep equals: %s", xDesc) +} + +func (m *deepEqualsMatcher) Matches(c interface{}) error { + // Make sure the types match. + ct := reflect.TypeOf(c) + xt := reflect.TypeOf(m.x) + + if ct != xt { + return NewFatalError(fmt.Sprintf("which is of type %v", ct)) + } + + // Special case: handle byte slices more efficiently. + cValue := reflect.ValueOf(c) + xValue := reflect.ValueOf(m.x) + + if ct == byteSliceType && !cValue.IsNil() && !xValue.IsNil() { + xBytes := m.x.([]byte) + cBytes := c.([]byte) + + if bytes.Equal(cBytes, xBytes) { + return nil + } + + return errors.New("") + } + + // Defer to the reflect package. + if reflect.DeepEqual(m.x, c) { + return nil + } + + // Special case: if the comparison failed because c is the nil slice, given + // an indication of this (since its value is printed as "[]"). + if cValue.Kind() == reflect.Slice && cValue.IsNil() { + return errors.New("which is nil") + } + + return errors.New("") +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go new file mode 100644 index 00000000..a510707b --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals.go @@ -0,0 +1,541 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +// Equals(x) returns a matcher that matches values v such that v and x are +// equivalent. This includes the case when the comparison v == x using Go's +// built-in comparison operator is legal (except for structs, which this +// matcher does not support), but for convenience the following rules also +// apply: +// +// * Type checking is done based on underlying types rather than actual +// types, so that e.g. two aliases for string can be compared: +// +// type stringAlias1 string +// type stringAlias2 string +// +// a := "taco" +// b := stringAlias1("taco") +// c := stringAlias2("taco") +// +// ExpectTrue(a == b) // Legal, passes +// ExpectTrue(b == c) // Illegal, doesn't compile +// +// ExpectThat(a, Equals(b)) // Passes +// ExpectThat(b, Equals(c)) // Passes +// +// * Values of numeric type are treated as if they were abstract numbers, and +// compared accordingly. Therefore Equals(17) will match int(17), +// int16(17), uint(17), float32(17), complex64(17), and so on. +// +// If you want a stricter matcher that contains no such cleverness, see +// IdenticalTo instead. +// +// Arrays are supported by this matcher, but do not participate in the +// exceptions above. Two arrays compared with this matcher must have identical +// types, and their element type must itself be comparable according to Go's == +// operator. +func Equals(x interface{}) Matcher { + v := reflect.ValueOf(x) + + // This matcher doesn't support structs. + if v.Kind() == reflect.Struct { + panic(fmt.Sprintf("oglematchers.Equals: unsupported kind %v", v.Kind())) + } + + // The == operator is not defined for non-nil slices. + if v.Kind() == reflect.Slice && v.Pointer() != uintptr(0) { + panic(fmt.Sprintf("oglematchers.Equals: non-nil slice")) + } + + return &equalsMatcher{v} +} + +type equalsMatcher struct { + expectedValue reflect.Value +} + +//////////////////////////////////////////////////////////////////////// +// Numeric types +//////////////////////////////////////////////////////////////////////// + +func isSignedInteger(v reflect.Value) bool { + k := v.Kind() + return k >= reflect.Int && k <= reflect.Int64 +} + +func isUnsignedInteger(v reflect.Value) bool { + k := v.Kind() + return k >= reflect.Uint && k <= reflect.Uintptr +} + +func isInteger(v reflect.Value) bool { + return isSignedInteger(v) || isUnsignedInteger(v) +} + +func isFloat(v reflect.Value) bool { + k := v.Kind() + return k == reflect.Float32 || k == reflect.Float64 +} + +func isComplex(v reflect.Value) bool { + k := v.Kind() + return k == reflect.Complex64 || k == reflect.Complex128 +} + +func checkAgainstInt64(e int64, c reflect.Value) (err error) { + err = errors.New("") + + switch { + case isSignedInteger(c): + if c.Int() == e { + err = nil + } + + case isUnsignedInteger(c): + u := c.Uint() + if u <= math.MaxInt64 && int64(u) == e { + err = nil + } + + // Turn around the various floating point types so that the checkAgainst* + // functions for them can deal with precision issues. + case isFloat(c), isComplex(c): + return Equals(c.Interface()).Matches(e) + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstUint64(e uint64, c reflect.Value) (err error) { + err = errors.New("") + + switch { + case isSignedInteger(c): + i := c.Int() + if i >= 0 && uint64(i) == e { + err = nil + } + + case isUnsignedInteger(c): + if c.Uint() == e { + err = nil + } + + // Turn around the various floating point types so that the checkAgainst* + // functions for them can deal with precision issues. + case isFloat(c), isComplex(c): + return Equals(c.Interface()).Matches(e) + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstFloat32(e float32, c reflect.Value) (err error) { + err = errors.New("") + + switch { + case isSignedInteger(c): + if float32(c.Int()) == e { + err = nil + } + + case isUnsignedInteger(c): + if float32(c.Uint()) == e { + err = nil + } + + case isFloat(c): + // Compare using float32 to avoid a false sense of precision; otherwise + // e.g. Equals(float32(0.1)) won't match float32(0.1). + if float32(c.Float()) == e { + err = nil + } + + case isComplex(c): + comp := c.Complex() + rl := real(comp) + im := imag(comp) + + // Compare using float32 to avoid a false sense of precision; otherwise + // e.g. Equals(float32(0.1)) won't match (0.1 + 0i). + if im == 0 && float32(rl) == e { + err = nil + } + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstFloat64(e float64, c reflect.Value) (err error) { + err = errors.New("") + + ck := c.Kind() + + switch { + case isSignedInteger(c): + if float64(c.Int()) == e { + err = nil + } + + case isUnsignedInteger(c): + if float64(c.Uint()) == e { + err = nil + } + + // If the actual value is lower precision, turn the comparison around so we + // apply the low-precision rules. Otherwise, e.g. Equals(0.1) may not match + // float32(0.1). + case ck == reflect.Float32 || ck == reflect.Complex64: + return Equals(c.Interface()).Matches(e) + + // Otherwise, compare with double precision. + case isFloat(c): + if c.Float() == e { + err = nil + } + + case isComplex(c): + comp := c.Complex() + rl := real(comp) + im := imag(comp) + + if im == 0 && rl == e { + err = nil + } + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstComplex64(e complex64, c reflect.Value) (err error) { + err = errors.New("") + realPart := real(e) + imaginaryPart := imag(e) + + switch { + case isInteger(c) || isFloat(c): + // If we have no imaginary part, then we should just compare against the + // real part. Otherwise, we can't be equal. + if imaginaryPart != 0 { + return + } + + return checkAgainstFloat32(realPart, c) + + case isComplex(c): + // Compare using complex64 to avoid a false sense of precision; otherwise + // e.g. Equals(0.1 + 0i) won't match float32(0.1). + if complex64(c.Complex()) == e { + err = nil + } + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +func checkAgainstComplex128(e complex128, c reflect.Value) (err error) { + err = errors.New("") + realPart := real(e) + imaginaryPart := imag(e) + + switch { + case isInteger(c) || isFloat(c): + // If we have no imaginary part, then we should just compare against the + // real part. Otherwise, we can't be equal. + if imaginaryPart != 0 { + return + } + + return checkAgainstFloat64(realPart, c) + + case isComplex(c): + if c.Complex() == e { + err = nil + } + + default: + err = NewFatalError("which is not numeric") + } + + return +} + +//////////////////////////////////////////////////////////////////////// +// Other types +//////////////////////////////////////////////////////////////////////// + +func checkAgainstBool(e bool, c reflect.Value) (err error) { + if c.Kind() != reflect.Bool { + err = NewFatalError("which is not a bool") + return + } + + err = errors.New("") + if c.Bool() == e { + err = nil + } + return +} + +func checkAgainstChan(e reflect.Value, c reflect.Value) (err error) { + // Create a description of e's type, e.g. "chan int". + typeStr := fmt.Sprintf("%s %s", e.Type().ChanDir(), e.Type().Elem()) + + // Make sure c is a chan of the correct type. + if c.Kind() != reflect.Chan || + c.Type().ChanDir() != e.Type().ChanDir() || + c.Type().Elem() != e.Type().Elem() { + err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr)) + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstFunc(e reflect.Value, c reflect.Value) (err error) { + // Make sure c is a function. + if c.Kind() != reflect.Func { + err = NewFatalError("which is not a function") + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstMap(e reflect.Value, c reflect.Value) (err error) { + // Make sure c is a map. + if c.Kind() != reflect.Map { + err = NewFatalError("which is not a map") + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstPtr(e reflect.Value, c reflect.Value) (err error) { + // Create a description of e's type, e.g. "*int". + typeStr := fmt.Sprintf("*%v", e.Type().Elem()) + + // Make sure c is a pointer of the correct type. + if c.Kind() != reflect.Ptr || + c.Type().Elem() != e.Type().Elem() { + err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr)) + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstSlice(e reflect.Value, c reflect.Value) (err error) { + // Create a description of e's type, e.g. "[]int". + typeStr := fmt.Sprintf("[]%v", e.Type().Elem()) + + // Make sure c is a slice of the correct type. + if c.Kind() != reflect.Slice || + c.Type().Elem() != e.Type().Elem() { + err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr)) + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkAgainstString(e reflect.Value, c reflect.Value) (err error) { + // Make sure c is a string. + if c.Kind() != reflect.String { + err = NewFatalError("which is not a string") + return + } + + err = errors.New("") + if c.String() == e.String() { + err = nil + } + return +} + +func checkAgainstArray(e reflect.Value, c reflect.Value) (err error) { + // Create a description of e's type, e.g. "[2]int". + typeStr := fmt.Sprintf("%v", e.Type()) + + // Make sure c is the correct type. + if c.Type() != e.Type() { + err = NewFatalError(fmt.Sprintf("which is not %s", typeStr)) + return + } + + // Check for equality. + if e.Interface() != c.Interface() { + err = errors.New("") + return + } + + return +} + +func checkAgainstUnsafePointer(e reflect.Value, c reflect.Value) (err error) { + // Make sure c is a pointer. + if c.Kind() != reflect.UnsafePointer { + err = NewFatalError("which is not a unsafe.Pointer") + return + } + + err = errors.New("") + if c.Pointer() == e.Pointer() { + err = nil + } + return +} + +func checkForNil(c reflect.Value) (err error) { + err = errors.New("") + + // Make sure it is legal to call IsNil. + switch c.Kind() { + case reflect.Invalid: + case reflect.Chan: + case reflect.Func: + case reflect.Interface: + case reflect.Map: + case reflect.Ptr: + case reflect.Slice: + + default: + err = NewFatalError("which cannot be compared to nil") + return + } + + // Ask whether the value is nil. Handle a nil literal (kind Invalid) + // specially, since it's not legal to call IsNil there. + if c.Kind() == reflect.Invalid || c.IsNil() { + err = nil + } + return +} + +//////////////////////////////////////////////////////////////////////// +// Public implementation +//////////////////////////////////////////////////////////////////////// + +func (m *equalsMatcher) Matches(candidate interface{}) error { + e := m.expectedValue + c := reflect.ValueOf(candidate) + ek := e.Kind() + + switch { + case ek == reflect.Bool: + return checkAgainstBool(e.Bool(), c) + + case isSignedInteger(e): + return checkAgainstInt64(e.Int(), c) + + case isUnsignedInteger(e): + return checkAgainstUint64(e.Uint(), c) + + case ek == reflect.Float32: + return checkAgainstFloat32(float32(e.Float()), c) + + case ek == reflect.Float64: + return checkAgainstFloat64(e.Float(), c) + + case ek == reflect.Complex64: + return checkAgainstComplex64(complex64(e.Complex()), c) + + case ek == reflect.Complex128: + return checkAgainstComplex128(complex128(e.Complex()), c) + + case ek == reflect.Chan: + return checkAgainstChan(e, c) + + case ek == reflect.Func: + return checkAgainstFunc(e, c) + + case ek == reflect.Map: + return checkAgainstMap(e, c) + + case ek == reflect.Ptr: + return checkAgainstPtr(e, c) + + case ek == reflect.Slice: + return checkAgainstSlice(e, c) + + case ek == reflect.String: + return checkAgainstString(e, c) + + case ek == reflect.Array: + return checkAgainstArray(e, c) + + case ek == reflect.UnsafePointer: + return checkAgainstUnsafePointer(e, c) + + case ek == reflect.Invalid: + return checkForNil(c) + } + + panic(fmt.Sprintf("equalsMatcher.Matches: unexpected kind: %v", ek)) +} + +func (m *equalsMatcher) Description() string { + // Special case: handle nil. + if !m.expectedValue.IsValid() { + return "is nil" + } + + return fmt.Sprintf("%v", m.expectedValue.Interface()) +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go new file mode 100644 index 00000000..4b9d103a --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go @@ -0,0 +1,39 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "fmt" + "reflect" +) + +// GreaterOrEqual returns a matcher that matches integer, floating point, or +// strings values v such that v >= x. Comparison is not defined between numeric +// and string types, but is defined between all integer and floating point +// types. +// +// x must itself be an integer, floating point, or string type; otherwise, +// GreaterOrEqual will panic. +func GreaterOrEqual(x interface{}) Matcher { + desc := fmt.Sprintf("greater than or equal to %v", x) + + // Special case: make it clear that strings are strings. + if reflect.TypeOf(x).Kind() == reflect.String { + desc = fmt.Sprintf("greater than or equal to \"%s\"", x) + } + + return transformDescription(Not(LessThan(x)), desc) +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go new file mode 100644 index 00000000..3eef3217 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go @@ -0,0 +1,39 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "fmt" + "reflect" +) + +// GreaterThan returns a matcher that matches integer, floating point, or +// strings values v such that v > x. Comparison is not defined between numeric +// and string types, but is defined between all integer and floating point +// types. +// +// x must itself be an integer, floating point, or string type; otherwise, +// GreaterThan will panic. +func GreaterThan(x interface{}) Matcher { + desc := fmt.Sprintf("greater than %v", x) + + // Special case: make it clear that strings are strings. + if reflect.TypeOf(x).Kind() == reflect.String { + desc = fmt.Sprintf("greater than \"%s\"", x) + } + + return transformDescription(Not(LessOrEqual(x)), desc) +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go new file mode 100644 index 00000000..8402cdea --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go @@ -0,0 +1,41 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "fmt" + "reflect" +) + +// LessOrEqual returns a matcher that matches integer, floating point, or +// strings values v such that v <= x. Comparison is not defined between numeric +// and string types, but is defined between all integer and floating point +// types. +// +// x must itself be an integer, floating point, or string type; otherwise, +// LessOrEqual will panic. +func LessOrEqual(x interface{}) Matcher { + desc := fmt.Sprintf("less than or equal to %v", x) + + // Special case: make it clear that strings are strings. + if reflect.TypeOf(x).Kind() == reflect.String { + desc = fmt.Sprintf("less than or equal to \"%s\"", x) + } + + // Put LessThan last so that its error messages will be used in the event of + // failure. + return transformDescription(AnyOf(Equals(x), LessThan(x)), desc) +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go new file mode 100644 index 00000000..8258e45d --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than.go @@ -0,0 +1,152 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +// LessThan returns a matcher that matches integer, floating point, or strings +// values v such that v < x. Comparison is not defined between numeric and +// string types, but is defined between all integer and floating point types. +// +// x must itself be an integer, floating point, or string type; otherwise, +// LessThan will panic. +func LessThan(x interface{}) Matcher { + v := reflect.ValueOf(x) + kind := v.Kind() + + switch { + case isInteger(v): + case isFloat(v): + case kind == reflect.String: + + default: + panic(fmt.Sprintf("LessThan: unexpected kind %v", kind)) + } + + return &lessThanMatcher{v} +} + +type lessThanMatcher struct { + limit reflect.Value +} + +func (m *lessThanMatcher) Description() string { + // Special case: make it clear that strings are strings. + if m.limit.Kind() == reflect.String { + return fmt.Sprintf("less than \"%s\"", m.limit.String()) + } + + return fmt.Sprintf("less than %v", m.limit.Interface()) +} + +func compareIntegers(v1, v2 reflect.Value) (err error) { + err = errors.New("") + + switch { + case isSignedInteger(v1) && isSignedInteger(v2): + if v1.Int() < v2.Int() { + err = nil + } + return + + case isSignedInteger(v1) && isUnsignedInteger(v2): + if v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() { + err = nil + } + return + + case isUnsignedInteger(v1) && isSignedInteger(v2): + if v1.Uint() <= math.MaxInt64 && int64(v1.Uint()) < v2.Int() { + err = nil + } + return + + case isUnsignedInteger(v1) && isUnsignedInteger(v2): + if v1.Uint() < v2.Uint() { + err = nil + } + return + } + + panic(fmt.Sprintf("compareIntegers: %v %v", v1, v2)) +} + +func getFloat(v reflect.Value) float64 { + switch { + case isSignedInteger(v): + return float64(v.Int()) + + case isUnsignedInteger(v): + return float64(v.Uint()) + + case isFloat(v): + return v.Float() + } + + panic(fmt.Sprintf("getFloat: %v", v)) +} + +func (m *lessThanMatcher) Matches(c interface{}) (err error) { + v1 := reflect.ValueOf(c) + v2 := m.limit + + err = errors.New("") + + // Handle strings as a special case. + if v1.Kind() == reflect.String && v2.Kind() == reflect.String { + if v1.String() < v2.String() { + err = nil + } + return + } + + // If we get here, we require that we are dealing with integers or floats. + v1Legal := isInteger(v1) || isFloat(v1) + v2Legal := isInteger(v2) || isFloat(v2) + if !v1Legal || !v2Legal { + err = NewFatalError("which is not comparable") + return + } + + // Handle the various comparison cases. + switch { + // Both integers + case isInteger(v1) && isInteger(v2): + return compareIntegers(v1, v2) + + // At least one float32 + case v1.Kind() == reflect.Float32 || v2.Kind() == reflect.Float32: + if float32(getFloat(v1)) < float32(getFloat(v2)) { + err = nil + } + return + + // At least one float64 + case v1.Kind() == reflect.Float64 || v2.Kind() == reflect.Float64: + if getFloat(v1) < getFloat(v2) { + err = nil + } + return + } + + // We shouldn't get here. + panic(fmt.Sprintf("lessThanMatcher.Matches: Shouldn't get here: %v %v", v1, v2)) +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go new file mode 100644 index 00000000..78159a07 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/matcher.go @@ -0,0 +1,86 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package oglematchers provides a set of matchers useful in a testing or +// mocking framework. These matchers are inspired by and mostly compatible with +// Google Test for C++ and Google JS Test. +// +// This package is used by github.com/smartystreets/assertions/internal/ogletest and +// github.com/smartystreets/assertions/internal/oglemock, which may be more directly useful if you're not +// writing your own testing package or defining your own matchers. +package oglematchers + +// A Matcher is some predicate implicitly defining a set of values that it +// matches. For example, GreaterThan(17) matches all numeric values greater +// than 17, and HasSubstr("taco") matches all strings with the substring +// "taco". +// +// Matchers are typically exposed to tests via constructor functions like +// HasSubstr. In order to implement such a function you can either define your +// own matcher type or use NewMatcher. +type Matcher interface { + // Check whether the supplied value belongs to the the set defined by the + // matcher. Return a non-nil error if and only if it does not. + // + // The error describes why the value doesn't match. The error text is a + // relative clause that is suitable for being placed after the value. For + // example, a predicate that matches strings with a particular substring may, + // when presented with a numerical value, return the following error text: + // + // "which is not a string" + // + // Then the failure message may look like: + // + // Expected: has substring "taco" + // Actual: 17, which is not a string + // + // If the error is self-apparent based on the description of the matcher, the + // error text may be empty (but the error still non-nil). For example: + // + // Expected: 17 + // Actual: 19 + // + // If you are implementing a new matcher, see also the documentation on + // FatalError. + Matches(candidate interface{}) error + + // Description returns a string describing the property that values matching + // this matcher have, as a verb phrase where the subject is the value. For + // example, "is greather than 17" or "has substring "taco"". + Description() string +} + +// FatalError is an implementation of the error interface that may be returned +// from matchers, indicating the error should be propagated. Returning a +// *FatalError indicates that the matcher doesn't process values of the +// supplied type, or otherwise doesn't know how to handle the value. +// +// For example, if GreaterThan(17) returned false for the value "taco" without +// a fatal error, then Not(GreaterThan(17)) would return true. This is +// technically correct, but is surprising and may mask failures where the wrong +// sort of matcher is accidentally used. Instead, GreaterThan(17) can return a +// fatal error, which will be propagated by Not(). +type FatalError struct { + errorText string +} + +// NewFatalError creates a FatalError struct with the supplied error text. +func NewFatalError(s string) *FatalError { + return &FatalError{s} +} + +func (e *FatalError) Error() string { + return e.errorText +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go new file mode 100644 index 00000000..623789fe --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/not.go @@ -0,0 +1,53 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +import ( + "errors" + "fmt" +) + +// Not returns a matcher that inverts the set of values matched by the wrapped +// matcher. It does not transform the result for values for which the wrapped +// matcher returns a fatal error. +func Not(m Matcher) Matcher { + return ¬Matcher{m} +} + +type notMatcher struct { + wrapped Matcher +} + +func (m *notMatcher) Matches(c interface{}) (err error) { + err = m.wrapped.Matches(c) + + // Did the wrapped matcher say yes? + if err == nil { + return errors.New("") + } + + // Did the wrapped matcher return a fatal error? + if _, isFatal := err.(*FatalError); isFatal { + return err + } + + // The wrapped matcher returned a non-fatal error. + return nil +} + +func (m *notMatcher) Description() string { + return fmt.Sprintf("not(%s)", m.wrapped.Description()) +} diff --git a/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go new file mode 100644 index 00000000..8ea2807c --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go @@ -0,0 +1,36 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers + +// transformDescription returns a matcher that is equivalent to the supplied +// one, except that it has the supplied description instead of the one attached +// to the existing matcher. +func transformDescription(m Matcher, newDesc string) Matcher { + return &transformDescriptionMatcher{newDesc, m} +} + +type transformDescriptionMatcher struct { + desc string + wrappedMatcher Matcher +} + +func (m *transformDescriptionMatcher) Description() string { + return m.desc +} + +func (m *transformDescriptionMatcher) Matches(c interface{}) error { + return m.wrappedMatcher.Matches(c) +} diff --git a/backend/vendor/github.com/smartystreets/assertions/messages.go b/backend/vendor/github.com/smartystreets/assertions/messages.go new file mode 100644 index 00000000..72782b00 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/messages.go @@ -0,0 +1,106 @@ +package assertions + +const ( + shouldHaveBeenEqual = "Expected: '%v'\nActual: '%v'\n(Should be equal)" + shouldHaveBeenEqualNoResemblance = "Both the actual and expected values render equally ('%s') and their types are the same. Try using ShouldResemble instead." + shouldNotHaveBeenEqual = "Expected '%v'\nto NOT equal '%v'\n(but it did)!" + shouldHaveBeenEqualTypeMismatch = "Expected: '%v' (%T)\nActual: '%v' (%T)\n(Should be equal, type mismatch)" + + shouldHaveBeenAlmostEqual = "Expected '%v' to almost equal '%v' (but it didn't)!" + shouldHaveNotBeenAlmostEqual = "Expected '%v' to NOT almost equal '%v' (but it did)!" + + shouldHaveResembled = "Expected: '%s'\nActual: '%s'\n(Should resemble)!" + shouldNotHaveResembled = "Expected '%#v'\nto NOT resemble '%#v'\n(but it did)!" + + shouldBePointers = "Both arguments should be pointers " + shouldHaveBeenNonNilPointer = shouldBePointers + "(the %s was %s)!" + shouldHavePointedTo = "Expected '%+v' (address: '%v') and '%+v' (address: '%v') to be the same address (but their weren't)!" + shouldNotHavePointedTo = "Expected '%+v' and '%+v' to be different references (but they matched: '%v')!" + + shouldHaveBeenNil = "Expected: nil\nActual: '%v'" + shouldNotHaveBeenNil = "Expected '%+v' to NOT be nil (but it was)!" + + shouldHaveBeenTrue = "Expected: true\nActual: %v" + shouldHaveBeenFalse = "Expected: false\nActual: %v" + + shouldHaveBeenZeroValue = "'%+v' should have been the zero value" //"Expected: (zero value)\nActual: %v" + shouldNotHaveBeenZeroValue = "'%+v' should NOT have been the zero value" + + shouldHaveBeenGreater = "Expected '%v' to be greater than '%v' (but it wasn't)!" + shouldHaveBeenGreaterOrEqual = "Expected '%v' to be greater than or equal to '%v' (but it wasn't)!" + + shouldHaveBeenLess = "Expected '%v' to be less than '%v' (but it wasn't)!" + shouldHaveBeenLessOrEqual = "Expected '%v' to be less than or equal to '%v' (but it wasn't)!" + + shouldHaveBeenBetween = "Expected '%v' to be between '%v' and '%v' (but it wasn't)!" + shouldNotHaveBeenBetween = "Expected '%v' NOT to be between '%v' and '%v' (but it was)!" + shouldHaveDifferentUpperAndLower = "The lower and upper bounds must be different values (they were both '%v')." + + shouldHaveBeenBetweenOrEqual = "Expected '%v' to be between '%v' and '%v' or equal to one of them (but it wasn't)!" + shouldNotHaveBeenBetweenOrEqual = "Expected '%v' NOT to be between '%v' and '%v' or equal to one of them (but it was)!" + + shouldHaveContained = "Expected the container (%v) to contain: '%v' (but it didn't)!" + shouldNotHaveContained = "Expected the container (%v) NOT to contain: '%v' (but it did)!" + shouldHaveBeenAValidCollection = "You must provide a valid container (was %v)!" + + shouldHaveContainedKey = "Expected the %v to contain the key: %v (but it didn't)!" + shouldNotHaveContainedKey = "Expected the %v NOT to contain the key: %v (but it did)!" + shouldHaveBeenAValidMap = "You must provide a valid map type (was %v)!" + + shouldHaveBeenIn = "Expected '%v' to be in the container (%v), but it wasn't!" + shouldNotHaveBeenIn = "Expected '%v' NOT to be in the container (%v), but it was!" + + shouldHaveBeenEmpty = "Expected %+v to be empty (but it wasn't)!" + shouldNotHaveBeenEmpty = "Expected %+v to NOT be empty (but it was)!" + + shouldHaveBeenAValidInteger = "You must provide a valid integer (was %v)!" + shouldHaveBeenAValidLength = "You must provide a valid positive integer (was %v)!" + shouldHaveHadLength = "Expected collection to have length equal to [%v], but it's length was [%v] instead! contents: %+v" + + shouldHaveStartedWith = "Expected '%v'\nto start with '%v'\n(but it didn't)!" + shouldNotHaveStartedWith = "Expected '%v'\nNOT to start with '%v'\n(but it did)!" + + shouldHaveEndedWith = "Expected '%v'\nto end with '%v'\n(but it didn't)!" + shouldNotHaveEndedWith = "Expected '%v'\nNOT to end with '%v'\n(but it did)!" + + shouldAllBeStrings = "All arguments to this assertion must be strings (you provided: %v)." + shouldBothBeStrings = "Both arguments to this assertion must be strings (you provided %v and %v)." + + shouldHaveContainedSubstring = "Expected '%s' to contain substring '%s' (but it didn't)!" + shouldNotHaveContainedSubstring = "Expected '%s' NOT to contain substring '%s' (but it did)!" + + shouldBeString = "The argument to this assertion must be a string (you provided %v)." + shouldHaveBeenBlank = "Expected '%s' to be blank (but it wasn't)!" + shouldNotHaveBeenBlank = "Expected value to NOT be blank (but it was)!" + + shouldUseVoidNiladicFunction = "You must provide a void, niladic function as the first argument!" + shouldHavePanicked = "Expected func() to panic (but it didn't)!" + shouldNotHavePanicked = "Expected func() NOT to panic (error: '%+v')!" + + shouldHavePanickedWith = "Expected func() to panic with '%v' (but it panicked with '%v')!" + shouldNotHavePanickedWith = "Expected func() NOT to panic with '%v' (but it did)!" + + shouldHaveBeenA = "Expected '%v' to be: '%v' (but was: '%v')!" + shouldNotHaveBeenA = "Expected '%v' to NOT be: '%v' (but it was)!" + + shouldHaveImplemented = "Expected: '%v interface support'\nActual: '%v' does not implement the interface!" + shouldNotHaveImplemented = "Expected '%v'\nto NOT implement '%v'\n(but it did)!" + shouldCompareWithInterfacePointer = "The expected value must be a pointer to an interface type (eg. *fmt.Stringer)" + shouldNotBeNilActual = "The actual value was 'nil' and should be a value or a pointer to a value!" + + shouldBeError = "Expected an error value (but was '%v' instead)!" + shouldBeErrorInvalidComparisonValue = "The final argument to this assertion must be a string or an error value (you provided: '%v')." + + shouldUseTimes = "You must provide time instances as arguments to this assertion." + shouldUseTimeSlice = "You must provide a slice of time instances as the first argument to this assertion." + shouldUseDurationAndTime = "You must provide a duration and a time as arguments to this assertion." + + shouldHaveHappenedBefore = "Expected '%v' to happen before '%v' (it happened '%v' after)!" + shouldHaveHappenedAfter = "Expected '%v' to happen after '%v' (it happened '%v' before)!" + shouldHaveHappenedBetween = "Expected '%v' to happen between '%v' and '%v' (it happened '%v' outside threshold)!" + shouldNotHaveHappenedOnOrBetween = "Expected '%v' to NOT happen on or between '%v' and '%v' (but it did)!" + + // format params: incorrect-index, previous-index, previous-time, incorrect-index, incorrect-time + shouldHaveBeenChronological = "The 'Time' at index [%d] should have happened after the previous one (but it didn't!):\n [%d]: %s\n [%d]: %s (see, it happened before!)" + shouldNotHaveBeenchronological = "The provided times should NOT be chronological, but they were." +) diff --git a/backend/vendor/github.com/smartystreets/assertions/panic.go b/backend/vendor/github.com/smartystreets/assertions/panic.go new file mode 100644 index 00000000..7e75db17 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/panic.go @@ -0,0 +1,115 @@ +package assertions + +import "fmt" + +// ShouldPanic receives a void, niladic function and expects to recover a panic. +func ShouldPanic(actual interface{}, expected ...interface{}) (message string) { + if fail := need(0, expected); fail != success { + return fail + } + + action, _ := actual.(func()) + + if action == nil { + message = shouldUseVoidNiladicFunction + return + } + + defer func() { + recovered := recover() + if recovered == nil { + message = shouldHavePanicked + } else { + message = success + } + }() + action() + + return +} + +// ShouldNotPanic receives a void, niladic function and expects to execute the function without any panic. +func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string) { + if fail := need(0, expected); fail != success { + return fail + } + + action, _ := actual.(func()) + + if action == nil { + message = shouldUseVoidNiladicFunction + return + } + + defer func() { + recovered := recover() + if recovered != nil { + message = fmt.Sprintf(shouldNotHavePanicked, recovered) + } else { + message = success + } + }() + action() + + return +} + +// ShouldPanicWith receives a void, niladic function and expects to recover a panic with the second argument as the content. +func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string) { + if fail := need(1, expected); fail != success { + return fail + } + + action, _ := actual.(func()) + + if action == nil { + message = shouldUseVoidNiladicFunction + return + } + + defer func() { + recovered := recover() + if recovered == nil { + message = shouldHavePanicked + } else { + if equal := ShouldEqual(recovered, expected[0]); equal != success { + message = serializer.serialize(expected[0], recovered, fmt.Sprintf(shouldHavePanickedWith, expected[0], recovered)) + } else { + message = success + } + } + }() + action() + + return +} + +// ShouldNotPanicWith receives a void, niladic function and expects to recover a panic whose content differs from the second argument. +func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string) { + if fail := need(1, expected); fail != success { + return fail + } + + action, _ := actual.(func()) + + if action == nil { + message = shouldUseVoidNiladicFunction + return + } + + defer func() { + recovered := recover() + if recovered == nil { + message = success + } else { + if equal := ShouldEqual(recovered, expected[0]); equal == success { + message = fmt.Sprintf(shouldNotHavePanickedWith, expected[0]) + } else { + message = success + } + } + }() + action() + + return +} diff --git a/backend/vendor/github.com/smartystreets/assertions/quantity.go b/backend/vendor/github.com/smartystreets/assertions/quantity.go new file mode 100644 index 00000000..f28b0a06 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/quantity.go @@ -0,0 +1,141 @@ +package assertions + +import ( + "fmt" + + "github.com/smartystreets/assertions/internal/oglematchers" +) + +// ShouldBeGreaterThan receives exactly two parameters and ensures that the first is greater than the second. +func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + if matchError := oglematchers.GreaterThan(expected[0]).Matches(actual); matchError != nil { + return fmt.Sprintf(shouldHaveBeenGreater, actual, expected[0]) + } + return success +} + +// ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that the first is greater than or equal to the second. +func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } else if matchError := oglematchers.GreaterOrEqual(expected[0]).Matches(actual); matchError != nil { + return fmt.Sprintf(shouldHaveBeenGreaterOrEqual, actual, expected[0]) + } + return success +} + +// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than the second. +func ShouldBeLessThan(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } else if matchError := oglematchers.LessThan(expected[0]).Matches(actual); matchError != nil { + return fmt.Sprintf(shouldHaveBeenLess, actual, expected[0]) + } + return success +} + +// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than or equal to the second. +func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } else if matchError := oglematchers.LessOrEqual(expected[0]).Matches(actual); matchError != nil { + return fmt.Sprintf(shouldHaveBeenLessOrEqual, actual, expected[0]) + } + return success +} + +// ShouldBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound. +// It ensures that the actual value is between both bounds (but not equal to either of them). +func ShouldBeBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + lower, upper, fail := deriveBounds(expected) + + if fail != success { + return fail + } else if !isBetween(actual, lower, upper) { + return fmt.Sprintf(shouldHaveBeenBetween, actual, lower, upper) + } + return success +} + +// ShouldNotBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound. +// It ensures that the actual value is NOT between both bounds. +func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + lower, upper, fail := deriveBounds(expected) + + if fail != success { + return fail + } else if isBetween(actual, lower, upper) { + return fmt.Sprintf(shouldNotHaveBeenBetween, actual, lower, upper) + } + return success +} +func deriveBounds(values []interface{}) (lower interface{}, upper interface{}, fail string) { + lower = values[0] + upper = values[1] + + if ShouldNotEqual(lower, upper) != success { + return nil, nil, fmt.Sprintf(shouldHaveDifferentUpperAndLower, lower) + } else if ShouldBeLessThan(lower, upper) != success { + lower, upper = upper, lower + } + return lower, upper, success +} +func isBetween(value, lower, upper interface{}) bool { + if ShouldBeGreaterThan(value, lower) != success { + return false + } else if ShouldBeLessThan(value, upper) != success { + return false + } + return true +} + +// ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound. +// It ensures that the actual value is between both bounds or equal to one of them. +func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + lower, upper, fail := deriveBounds(expected) + + if fail != success { + return fail + } else if !isBetweenOrEqual(actual, lower, upper) { + return fmt.Sprintf(shouldHaveBeenBetweenOrEqual, actual, lower, upper) + } + return success +} + +// ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound. +// It ensures that the actual value is nopt between the bounds nor equal to either of them. +func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + lower, upper, fail := deriveBounds(expected) + + if fail != success { + return fail + } else if isBetweenOrEqual(actual, lower, upper) { + return fmt.Sprintf(shouldNotHaveBeenBetweenOrEqual, actual, lower, upper) + } + return success +} + +func isBetweenOrEqual(value, lower, upper interface{}) bool { + if ShouldBeGreaterThanOrEqualTo(value, lower) != success { + return false + } else if ShouldBeLessThanOrEqualTo(value, upper) != success { + return false + } + return true +} diff --git a/backend/vendor/github.com/smartystreets/assertions/serializer.go b/backend/vendor/github.com/smartystreets/assertions/serializer.go new file mode 100644 index 00000000..f1e3570e --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/serializer.go @@ -0,0 +1,70 @@ +package assertions + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/smartystreets/assertions/internal/go-render/render" +) + +type Serializer interface { + serialize(expected, actual interface{}, message string) string + serializeDetailed(expected, actual interface{}, message string) string +} + +type failureSerializer struct{} + +func (self *failureSerializer) serializeDetailed(expected, actual interface{}, message string) string { + if index := strings.Index(message, " Diff:"); index > 0 { + message = message[:index] + } + view := FailureView{ + Message: message, + Expected: render.Render(expected), + Actual: render.Render(actual), + } + serialized, _ := json.Marshal(view) + return string(serialized) +} + +func (self *failureSerializer) serialize(expected, actual interface{}, message string) string { + if index := strings.Index(message, " Diff:"); index > 0 { + message = message[:index] + } + view := FailureView{ + Message: message, + Expected: fmt.Sprintf("%+v", expected), + Actual: fmt.Sprintf("%+v", actual), + } + serialized, _ := json.Marshal(view) + return string(serialized) +} + +func newSerializer() *failureSerializer { + return &failureSerializer{} +} + +/////////////////////////////////////////////////////////////////////////////// + +// This struct is also declared in github.com/smartystreets/goconvey/convey/reporting. +// The json struct tags should be equal in both declarations. +type FailureView struct { + Message string `json:"Message"` + Expected string `json:"Expected"` + Actual string `json:"Actual"` +} + +/////////////////////////////////////////////////////// + +// noopSerializer just gives back the original message. This is useful when we are using +// the assertions from a context other than the GoConvey Web UI, that requires the JSON +// structure provided by the failureSerializer. +type noopSerializer struct{} + +func (self *noopSerializer) serialize(expected, actual interface{}, message string) string { + return message +} +func (self *noopSerializer) serializeDetailed(expected, actual interface{}, message string) string { + return message +} diff --git a/backend/vendor/github.com/smartystreets/assertions/strings.go b/backend/vendor/github.com/smartystreets/assertions/strings.go new file mode 100644 index 00000000..dbc3f047 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/strings.go @@ -0,0 +1,227 @@ +package assertions + +import ( + "fmt" + "reflect" + "strings" +) + +// ShouldStartWith receives exactly 2 string parameters and ensures that the first starts with the second. +func ShouldStartWith(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + value, valueIsString := actual.(string) + prefix, prefixIsString := expected[0].(string) + + if !valueIsString || !prefixIsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + return shouldStartWith(value, prefix) +} +func shouldStartWith(value, prefix string) string { + if !strings.HasPrefix(value, prefix) { + shortval := value + if len(shortval) > len(prefix) { + shortval = shortval[:len(prefix)] + "..." + } + return serializer.serialize(prefix, shortval, fmt.Sprintf(shouldHaveStartedWith, value, prefix)) + } + return success +} + +// ShouldNotStartWith receives exactly 2 string parameters and ensures that the first does not start with the second. +func ShouldNotStartWith(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + value, valueIsString := actual.(string) + prefix, prefixIsString := expected[0].(string) + + if !valueIsString || !prefixIsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + return shouldNotStartWith(value, prefix) +} +func shouldNotStartWith(value, prefix string) string { + if strings.HasPrefix(value, prefix) { + if value == "" { + value = "" + } + if prefix == "" { + prefix = "" + } + return fmt.Sprintf(shouldNotHaveStartedWith, value, prefix) + } + return success +} + +// ShouldEndWith receives exactly 2 string parameters and ensures that the first ends with the second. +func ShouldEndWith(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + value, valueIsString := actual.(string) + suffix, suffixIsString := expected[0].(string) + + if !valueIsString || !suffixIsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + return shouldEndWith(value, suffix) +} +func shouldEndWith(value, suffix string) string { + if !strings.HasSuffix(value, suffix) { + shortval := value + if len(shortval) > len(suffix) { + shortval = "..." + shortval[len(shortval)-len(suffix):] + } + return serializer.serialize(suffix, shortval, fmt.Sprintf(shouldHaveEndedWith, value, suffix)) + } + return success +} + +// ShouldEndWith receives exactly 2 string parameters and ensures that the first does not end with the second. +func ShouldNotEndWith(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + value, valueIsString := actual.(string) + suffix, suffixIsString := expected[0].(string) + + if !valueIsString || !suffixIsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + return shouldNotEndWith(value, suffix) +} +func shouldNotEndWith(value, suffix string) string { + if strings.HasSuffix(value, suffix) { + if value == "" { + value = "" + } + if suffix == "" { + suffix = "" + } + return fmt.Sprintf(shouldNotHaveEndedWith, value, suffix) + } + return success +} + +// ShouldContainSubstring receives exactly 2 string parameters and ensures that the first contains the second as a substring. +func ShouldContainSubstring(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + long, longOk := actual.(string) + short, shortOk := expected[0].(string) + + if !longOk || !shortOk { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + if !strings.Contains(long, short) { + return serializer.serialize(expected[0], actual, fmt.Sprintf(shouldHaveContainedSubstring, long, short)) + } + return success +} + +// ShouldNotContainSubstring receives exactly 2 string parameters and ensures that the first does NOT contain the second as a substring. +func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + long, longOk := actual.(string) + short, shortOk := expected[0].(string) + + if !longOk || !shortOk { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + if strings.Contains(long, short) { + return fmt.Sprintf(shouldNotHaveContainedSubstring, long, short) + } + return success +} + +// ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal to "". +func ShouldBeBlank(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + value, ok := actual.(string) + if !ok { + return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual)) + } + if value != "" { + return serializer.serialize("", value, fmt.Sprintf(shouldHaveBeenBlank, value)) + } + return success +} + +// ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is equal to "". +func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + value, ok := actual.(string) + if !ok { + return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual)) + } + if value == "" { + return shouldNotHaveBeenBlank + } + return success +} + +// ShouldEqualWithout receives exactly 3 string parameters and ensures that the first is equal to the second +// after removing all instances of the third from the first using strings.Replace(first, third, "", -1). +func ShouldEqualWithout(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualString, ok1 := actual.(string) + expectedString, ok2 := expected[0].(string) + replace, ok3 := expected[1].(string) + + if !ok1 || !ok2 || !ok3 { + return fmt.Sprintf(shouldAllBeStrings, []reflect.Type{ + reflect.TypeOf(actual), + reflect.TypeOf(expected[0]), + reflect.TypeOf(expected[1]), + }) + } + + replaced := strings.Replace(actualString, replace, "", -1) + if replaced == expectedString { + return "" + } + + return fmt.Sprintf("Expected '%s' to equal '%s' but without any '%s' (but it didn't).", actualString, expectedString, replace) +} + +// ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the first is equal to the second +// after removing all leading and trailing whitespace using strings.TrimSpace(first). +func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + actualString, valueIsString := actual.(string) + _, value2IsString := expected[0].(string) + + if !valueIsString || !value2IsString { + return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0])) + } + + actualString = strings.TrimSpace(actualString) + return ShouldEqual(actualString, expected[0]) +} diff --git a/backend/vendor/github.com/smartystreets/assertions/time.go b/backend/vendor/github.com/smartystreets/assertions/time.go new file mode 100644 index 00000000..918ee284 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/time.go @@ -0,0 +1,218 @@ +package assertions + +import ( + "fmt" + "time" +) + +// ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the first happens before the second. +func ShouldHappenBefore(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + expectedTime, secondOk := expected[0].(time.Time) + + if !firstOk || !secondOk { + return shouldUseTimes + } + + if !actualTime.Before(expectedTime) { + return fmt.Sprintf(shouldHaveHappenedBefore, actualTime, expectedTime, actualTime.Sub(expectedTime)) + } + + return success +} + +// ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that the first happens on or before the second. +func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + expectedTime, secondOk := expected[0].(time.Time) + + if !firstOk || !secondOk { + return shouldUseTimes + } + + if actualTime.Equal(expectedTime) { + return success + } + return ShouldHappenBefore(actualTime, expectedTime) +} + +// ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the first happens after the second. +func ShouldHappenAfter(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + expectedTime, secondOk := expected[0].(time.Time) + + if !firstOk || !secondOk { + return shouldUseTimes + } + if !actualTime.After(expectedTime) { + return fmt.Sprintf(shouldHaveHappenedAfter, actualTime, expectedTime, expectedTime.Sub(actualTime)) + } + return success +} + +// ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that the first happens on or after the second. +func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + expectedTime, secondOk := expected[0].(time.Time) + + if !firstOk || !secondOk { + return shouldUseTimes + } + if actualTime.Equal(expectedTime) { + return success + } + return ShouldHappenAfter(actualTime, expectedTime) +} + +// ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the first happens between (not on) the second and third. +func ShouldHappenBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + min, secondOk := expected[0].(time.Time) + max, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseTimes + } + + if !actualTime.After(min) { + return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, min.Sub(actualTime)) + } + if !actualTime.Before(max) { + return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, actualTime.Sub(max)) + } + return success +} + +// ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first happens between or on the second and third. +func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + min, secondOk := expected[0].(time.Time) + max, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseTimes + } + if actualTime.Equal(min) || actualTime.Equal(max) { + return success + } + return ShouldHappenBetween(actualTime, min, max) +} + +// ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first +// does NOT happen between or on the second or third. +func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + min, secondOk := expected[0].(time.Time) + max, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseTimes + } + if actualTime.Equal(min) || actualTime.Equal(max) { + return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max) + } + if actualTime.After(min) && actualTime.Before(max) { + return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max) + } + return success +} + +// ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments) +// and asserts that the first time.Time happens within or on the duration specified relative to +// the other time.Time. +func ShouldHappenWithin(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + tolerance, secondOk := expected[0].(time.Duration) + threshold, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseDurationAndTime + } + + min := threshold.Add(-tolerance) + max := threshold.Add(tolerance) + return ShouldHappenOnOrBetween(actualTime, min, max) +} + +// ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments) +// and asserts that the first time.Time does NOT happen within or on the duration specified relative to +// the other time.Time. +func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string { + if fail := need(2, expected); fail != success { + return fail + } + actualTime, firstOk := actual.(time.Time) + tolerance, secondOk := expected[0].(time.Duration) + threshold, thirdOk := expected[1].(time.Time) + + if !firstOk || !secondOk || !thirdOk { + return shouldUseDurationAndTime + } + + min := threshold.Add(-tolerance) + max := threshold.Add(tolerance) + return ShouldNotHappenOnOrBetween(actualTime, min, max) +} + +// ShouldBeChronological receives a []time.Time slice and asserts that they are +// in chronological order starting with the first time.Time as the earliest. +func ShouldBeChronological(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + + times, ok := actual.([]time.Time) + if !ok { + return shouldUseTimeSlice + } + + var previous time.Time + for i, current := range times { + if i > 0 && current.Before(previous) { + return fmt.Sprintf(shouldHaveBeenChronological, + i, i-1, previous.String(), i, current.String()) + } + previous = current + } + return "" +} + +// ShouldNotBeChronological receives a []time.Time slice and asserts that they are +// NOT in chronological order. +func ShouldNotBeChronological(actual interface{}, expected ...interface{}) string { + if fail := need(0, expected); fail != success { + return fail + } + if _, ok := actual.([]time.Time); !ok { + return shouldUseTimeSlice + } + result := ShouldBeChronological(actual, expected...) + if result != "" { + return "" + } + return shouldNotHaveBeenchronological +} diff --git a/backend/vendor/github.com/smartystreets/assertions/type.go b/backend/vendor/github.com/smartystreets/assertions/type.go new file mode 100644 index 00000000..d2d1dc86 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/assertions/type.go @@ -0,0 +1,134 @@ +package assertions + +import ( + "fmt" + "reflect" +) + +// ShouldHaveSameTypeAs receives exactly two parameters and compares their underlying types for equality. +func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + first := reflect.TypeOf(actual) + second := reflect.TypeOf(expected[0]) + + if first != second { + return serializer.serialize(second, first, fmt.Sprintf(shouldHaveBeenA, actual, second, first)) + } + + return success +} + +// ShouldNotHaveSameTypeAs receives exactly two parameters and compares their underlying types for inequality. +func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string { + if fail := need(1, expected); fail != success { + return fail + } + + first := reflect.TypeOf(actual) + second := reflect.TypeOf(expected[0]) + + if (actual == nil && expected[0] == nil) || first == second { + return fmt.Sprintf(shouldNotHaveBeenA, actual, second) + } + return success +} + +// ShouldImplement receives exactly two parameters and ensures +// that the first implements the interface type of the second. +func ShouldImplement(actual interface{}, expectedList ...interface{}) string { + if fail := need(1, expectedList); fail != success { + return fail + } + + expected := expectedList[0] + if fail := ShouldBeNil(expected); fail != success { + return shouldCompareWithInterfacePointer + } + + if fail := ShouldNotBeNil(actual); fail != success { + return shouldNotBeNilActual + } + + var actualType reflect.Type + if reflect.TypeOf(actual).Kind() != reflect.Ptr { + actualType = reflect.PtrTo(reflect.TypeOf(actual)) + } else { + actualType = reflect.TypeOf(actual) + } + + expectedType := reflect.TypeOf(expected) + if fail := ShouldNotBeNil(expectedType); fail != success { + return shouldCompareWithInterfacePointer + } + + expectedInterface := expectedType.Elem() + + if !actualType.Implements(expectedInterface) { + return fmt.Sprintf(shouldHaveImplemented, expectedInterface, actualType) + } + return success +} + +// ShouldNotImplement receives exactly two parameters and ensures +// that the first does NOT implement the interface type of the second. +func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string { + if fail := need(1, expectedList); fail != success { + return fail + } + + expected := expectedList[0] + if fail := ShouldBeNil(expected); fail != success { + return shouldCompareWithInterfacePointer + } + + if fail := ShouldNotBeNil(actual); fail != success { + return shouldNotBeNilActual + } + + var actualType reflect.Type + if reflect.TypeOf(actual).Kind() != reflect.Ptr { + actualType = reflect.PtrTo(reflect.TypeOf(actual)) + } else { + actualType = reflect.TypeOf(actual) + } + + expectedType := reflect.TypeOf(expected) + if fail := ShouldNotBeNil(expectedType); fail != success { + return shouldCompareWithInterfacePointer + } + + expectedInterface := expectedType.Elem() + + if actualType.Implements(expectedInterface) { + return fmt.Sprintf(shouldNotHaveImplemented, actualType, expectedInterface) + } + return success +} + +// ShouldBeError asserts that the first argument implements the error interface. +// It also compares the first argument against the second argument if provided +// (which must be an error message string or another error value). +func ShouldBeError(actual interface{}, expected ...interface{}) string { + if fail := atMost(1, expected); fail != success { + return fail + } + + if !isError(actual) { + return fmt.Sprintf(shouldBeError, reflect.TypeOf(actual)) + } + + if len(expected) == 0 { + return success + } + + if expected := expected[0]; !isString(expected) && !isError(expected) { + return fmt.Sprintf(shouldBeErrorInvalidComparisonValue, reflect.TypeOf(expected)) + } + return ShouldEqual(fmt.Sprint(actual), fmt.Sprint(expected[0])) +} + +func isString(value interface{}) bool { _, ok := value.(string); return ok } +func isError(value interface{}) bool { _, ok := value.(error); return ok } diff --git a/backend/vendor/github.com/smartystreets/goconvey/LICENSE.md b/backend/vendor/github.com/smartystreets/goconvey/LICENSE.md new file mode 100644 index 00000000..3f87a40e --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/LICENSE.md @@ -0,0 +1,23 @@ +Copyright (c) 2016 SmartyStreets, LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +NOTE: Various optional and subordinate components carry their own licensing +requirements and restrictions. Use of those components is subject to the terms +and conditions outlined the respective license of each component. diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/assertions.go b/backend/vendor/github.com/smartystreets/goconvey/convey/assertions.go new file mode 100644 index 00000000..97e3bec8 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/assertions.go @@ -0,0 +1,71 @@ +package convey + +import "github.com/smartystreets/assertions" + +var ( + ShouldEqual = assertions.ShouldEqual + ShouldNotEqual = assertions.ShouldNotEqual + ShouldAlmostEqual = assertions.ShouldAlmostEqual + ShouldNotAlmostEqual = assertions.ShouldNotAlmostEqual + ShouldResemble = assertions.ShouldResemble + ShouldNotResemble = assertions.ShouldNotResemble + ShouldPointTo = assertions.ShouldPointTo + ShouldNotPointTo = assertions.ShouldNotPointTo + ShouldBeNil = assertions.ShouldBeNil + ShouldNotBeNil = assertions.ShouldNotBeNil + ShouldBeTrue = assertions.ShouldBeTrue + ShouldBeFalse = assertions.ShouldBeFalse + ShouldBeZeroValue = assertions.ShouldBeZeroValue + ShouldNotBeZeroValue = assertions.ShouldNotBeZeroValue + + ShouldBeGreaterThan = assertions.ShouldBeGreaterThan + ShouldBeGreaterThanOrEqualTo = assertions.ShouldBeGreaterThanOrEqualTo + ShouldBeLessThan = assertions.ShouldBeLessThan + ShouldBeLessThanOrEqualTo = assertions.ShouldBeLessThanOrEqualTo + ShouldBeBetween = assertions.ShouldBeBetween + ShouldNotBeBetween = assertions.ShouldNotBeBetween + ShouldBeBetweenOrEqual = assertions.ShouldBeBetweenOrEqual + ShouldNotBeBetweenOrEqual = assertions.ShouldNotBeBetweenOrEqual + + ShouldContain = assertions.ShouldContain + ShouldNotContain = assertions.ShouldNotContain + ShouldContainKey = assertions.ShouldContainKey + ShouldNotContainKey = assertions.ShouldNotContainKey + ShouldBeIn = assertions.ShouldBeIn + ShouldNotBeIn = assertions.ShouldNotBeIn + ShouldBeEmpty = assertions.ShouldBeEmpty + ShouldNotBeEmpty = assertions.ShouldNotBeEmpty + ShouldHaveLength = assertions.ShouldHaveLength + + ShouldStartWith = assertions.ShouldStartWith + ShouldNotStartWith = assertions.ShouldNotStartWith + ShouldEndWith = assertions.ShouldEndWith + ShouldNotEndWith = assertions.ShouldNotEndWith + ShouldBeBlank = assertions.ShouldBeBlank + ShouldNotBeBlank = assertions.ShouldNotBeBlank + ShouldContainSubstring = assertions.ShouldContainSubstring + ShouldNotContainSubstring = assertions.ShouldNotContainSubstring + + ShouldPanic = assertions.ShouldPanic + ShouldNotPanic = assertions.ShouldNotPanic + ShouldPanicWith = assertions.ShouldPanicWith + ShouldNotPanicWith = assertions.ShouldNotPanicWith + + ShouldHaveSameTypeAs = assertions.ShouldHaveSameTypeAs + ShouldNotHaveSameTypeAs = assertions.ShouldNotHaveSameTypeAs + ShouldImplement = assertions.ShouldImplement + ShouldNotImplement = assertions.ShouldNotImplement + + ShouldHappenBefore = assertions.ShouldHappenBefore + ShouldHappenOnOrBefore = assertions.ShouldHappenOnOrBefore + ShouldHappenAfter = assertions.ShouldHappenAfter + ShouldHappenOnOrAfter = assertions.ShouldHappenOnOrAfter + ShouldHappenBetween = assertions.ShouldHappenBetween + ShouldHappenOnOrBetween = assertions.ShouldHappenOnOrBetween + ShouldNotHappenOnOrBetween = assertions.ShouldNotHappenOnOrBetween + ShouldHappenWithin = assertions.ShouldHappenWithin + ShouldNotHappenWithin = assertions.ShouldNotHappenWithin + ShouldBeChronological = assertions.ShouldBeChronological + + ShouldBeError = assertions.ShouldBeError +) diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/context.go b/backend/vendor/github.com/smartystreets/goconvey/convey/context.go new file mode 100644 index 00000000..2c75c2d7 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/context.go @@ -0,0 +1,272 @@ +package convey + +import ( + "fmt" + + "github.com/jtolds/gls" + "github.com/smartystreets/goconvey/convey/reporting" +) + +type conveyErr struct { + fmt string + params []interface{} +} + +func (e *conveyErr) Error() string { + return fmt.Sprintf(e.fmt, e.params...) +} + +func conveyPanic(fmt string, params ...interface{}) { + panic(&conveyErr{fmt, params}) +} + +const ( + missingGoTest = `Top-level calls to Convey(...) need a reference to the *testing.T. + Hint: Convey("description here", t, func() { /* notice that the second argument was the *testing.T (t)! */ }) ` + extraGoTest = `Only the top-level call to Convey(...) needs a reference to the *testing.T.` + noStackContext = "Convey operation made without context on goroutine stack.\n" + + "Hint: Perhaps you meant to use `Convey(..., func(c C){...})` ?" + differentConveySituations = "Different set of Convey statements on subsequent pass!\nDid not expect %#v." + multipleIdenticalConvey = "Multiple convey suites with identical names: %#v" +) + +const ( + failureHalt = "___FAILURE_HALT___" + + nodeKey = "node" +) + +///////////////////////////////// Stack Context ///////////////////////////////// + +func getCurrentContext() *context { + ctx, ok := ctxMgr.GetValue(nodeKey) + if ok { + return ctx.(*context) + } + return nil +} + +func mustGetCurrentContext() *context { + ctx := getCurrentContext() + if ctx == nil { + conveyPanic(noStackContext) + } + return ctx +} + +//////////////////////////////////// Context //////////////////////////////////// + +// context magically handles all coordination of Convey's and So assertions. +// +// It is tracked on the stack as goroutine-local-storage with the gls package, +// or explicitly if the user decides to call convey like: +// +// Convey(..., func(c C) { +// c.So(...) +// }) +// +// This implements the `C` interface. +type context struct { + reporter reporting.Reporter + + children map[string]*context + + resets []func() + + executedOnce bool + expectChildRun *bool + complete bool + + focus bool + failureMode FailureMode +} + +// rootConvey is the main entry point to a test suite. This is called when +// there's no context in the stack already, and items must contain a `t` object, +// or this panics. +func rootConvey(items ...interface{}) { + entry := discover(items) + + if entry.Test == nil { + conveyPanic(missingGoTest) + } + + expectChildRun := true + ctx := &context{ + reporter: buildReporter(), + + children: make(map[string]*context), + + expectChildRun: &expectChildRun, + + focus: entry.Focus, + failureMode: defaultFailureMode.combine(entry.FailMode), + } + ctxMgr.SetValues(gls.Values{nodeKey: ctx}, func() { + ctx.reporter.BeginStory(reporting.NewStoryReport(entry.Test)) + defer ctx.reporter.EndStory() + + for ctx.shouldVisit() { + ctx.conveyInner(entry.Situation, entry.Func) + expectChildRun = true + } + }) +} + +//////////////////////////////////// Methods //////////////////////////////////// + +func (ctx *context) SkipConvey(items ...interface{}) { + ctx.Convey(items, skipConvey) +} + +func (ctx *context) FocusConvey(items ...interface{}) { + ctx.Convey(items, focusConvey) +} + +func (ctx *context) Convey(items ...interface{}) { + entry := discover(items) + + // we're a branch, or leaf (on the wind) + if entry.Test != nil { + conveyPanic(extraGoTest) + } + if ctx.focus && !entry.Focus { + return + } + + var inner_ctx *context + if ctx.executedOnce { + var ok bool + inner_ctx, ok = ctx.children[entry.Situation] + if !ok { + conveyPanic(differentConveySituations, entry.Situation) + } + } else { + if _, ok := ctx.children[entry.Situation]; ok { + conveyPanic(multipleIdenticalConvey, entry.Situation) + } + inner_ctx = &context{ + reporter: ctx.reporter, + + children: make(map[string]*context), + + expectChildRun: ctx.expectChildRun, + + focus: entry.Focus, + failureMode: ctx.failureMode.combine(entry.FailMode), + } + ctx.children[entry.Situation] = inner_ctx + } + + if inner_ctx.shouldVisit() { + ctxMgr.SetValues(gls.Values{nodeKey: inner_ctx}, func() { + inner_ctx.conveyInner(entry.Situation, entry.Func) + }) + } +} + +func (ctx *context) SkipSo(stuff ...interface{}) { + ctx.assertionReport(reporting.NewSkipReport()) +} + +func (ctx *context) So(actual interface{}, assert assertion, expected ...interface{}) { + if result := assert(actual, expected...); result == assertionSuccess { + ctx.assertionReport(reporting.NewSuccessReport()) + } else { + ctx.assertionReport(reporting.NewFailureReport(result)) + } +} + +func (ctx *context) Reset(action func()) { + /* TODO: Failure mode configuration */ + ctx.resets = append(ctx.resets, action) +} + +func (ctx *context) Print(items ...interface{}) (int, error) { + fmt.Fprint(ctx.reporter, items...) + return fmt.Print(items...) +} + +func (ctx *context) Println(items ...interface{}) (int, error) { + fmt.Fprintln(ctx.reporter, items...) + return fmt.Println(items...) +} + +func (ctx *context) Printf(format string, items ...interface{}) (int, error) { + fmt.Fprintf(ctx.reporter, format, items...) + return fmt.Printf(format, items...) +} + +//////////////////////////////////// Private //////////////////////////////////// + +// shouldVisit returns true iff we should traverse down into a Convey. Note +// that just because we don't traverse a Convey this time, doesn't mean that +// we may not traverse it on a subsequent pass. +func (c *context) shouldVisit() bool { + return !c.complete && *c.expectChildRun +} + +// conveyInner is the function which actually executes the user's anonymous test +// function body. At this point, Convey or RootConvey has decided that this +// function should actually run. +func (ctx *context) conveyInner(situation string, f func(C)) { + // Record/Reset state for next time. + defer func() { + ctx.executedOnce = true + + // This is only needed at the leaves, but there's no harm in also setting it + // when returning from branch Convey's + *ctx.expectChildRun = false + }() + + // Set up+tear down our scope for the reporter + ctx.reporter.Enter(reporting.NewScopeReport(situation)) + defer ctx.reporter.Exit() + + // Recover from any panics in f, and assign the `complete` status for this + // node of the tree. + defer func() { + ctx.complete = true + if problem := recover(); problem != nil { + if problem, ok := problem.(*conveyErr); ok { + panic(problem) + } + if problem != failureHalt { + ctx.reporter.Report(reporting.NewErrorReport(problem)) + } + } else { + for _, child := range ctx.children { + if !child.complete { + ctx.complete = false + return + } + } + } + }() + + // Resets are registered as the `f` function executes, so nil them here. + // All resets are run in registration order (FIFO). + ctx.resets = []func(){} + defer func() { + for _, r := range ctx.resets { + // panics handled by the previous defer + r() + } + }() + + if f == nil { + // if f is nil, this was either a Convey(..., nil), or a SkipConvey + ctx.reporter.Report(reporting.NewSkipReport()) + } else { + f(ctx) + } +} + +// assertionReport is a helper for So and SkipSo which makes the report and +// then possibly panics, depending on the current context's failureMode. +func (ctx *context) assertionReport(r *reporting.AssertionResult) { + ctx.reporter.Report(r) + if r.Failure != "" && ctx.failureMode == FailureHalts { + panic(failureHalt) + } +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey b/backend/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey new file mode 100644 index 00000000..a2d9327d --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/convey.goconvey @@ -0,0 +1,4 @@ +#ignore +-timeout=1s +#-covermode=count +#-coverpkg=github.com/smartystreets/goconvey/convey,github.com/smartystreets/goconvey/convey/gotest,github.com/smartystreets/goconvey/convey/reporting \ No newline at end of file diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/discovery.go b/backend/vendor/github.com/smartystreets/goconvey/convey/discovery.go new file mode 100644 index 00000000..eb8d4cb2 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/discovery.go @@ -0,0 +1,103 @@ +package convey + +type actionSpecifier uint8 + +const ( + noSpecifier actionSpecifier = iota + skipConvey + focusConvey +) + +type suite struct { + Situation string + Test t + Focus bool + Func func(C) // nil means skipped + FailMode FailureMode +} + +func newSuite(situation string, failureMode FailureMode, f func(C), test t, specifier actionSpecifier) *suite { + ret := &suite{ + Situation: situation, + Test: test, + Func: f, + FailMode: failureMode, + } + switch specifier { + case skipConvey: + ret.Func = nil + case focusConvey: + ret.Focus = true + } + return ret +} + +func discover(items []interface{}) *suite { + name, items := parseName(items) + test, items := parseGoTest(items) + failure, items := parseFailureMode(items) + action, items := parseAction(items) + specifier, items := parseSpecifier(items) + + if len(items) != 0 { + conveyPanic(parseError) + } + + return newSuite(name, failure, action, test, specifier) +} +func item(items []interface{}) interface{} { + if len(items) == 0 { + conveyPanic(parseError) + } + return items[0] +} +func parseName(items []interface{}) (string, []interface{}) { + if name, parsed := item(items).(string); parsed { + return name, items[1:] + } + conveyPanic(parseError) + panic("never get here") +} +func parseGoTest(items []interface{}) (t, []interface{}) { + if test, parsed := item(items).(t); parsed { + return test, items[1:] + } + return nil, items +} +func parseFailureMode(items []interface{}) (FailureMode, []interface{}) { + if mode, parsed := item(items).(FailureMode); parsed { + return mode, items[1:] + } + return FailureInherits, items +} +func parseAction(items []interface{}) (func(C), []interface{}) { + switch x := item(items).(type) { + case nil: + return nil, items[1:] + case func(C): + return x, items[1:] + case func(): + return func(C) { x() }, items[1:] + } + conveyPanic(parseError) + panic("never get here") +} +func parseSpecifier(items []interface{}) (actionSpecifier, []interface{}) { + if len(items) == 0 { + return noSpecifier, items + } + if spec, ok := items[0].(actionSpecifier); ok { + return spec, items[1:] + } + conveyPanic(parseError) + panic("never get here") +} + +// This interface allows us to pass the *testing.T struct +// throughout the internals of this package without ever +// having to import the "testing" package. +type t interface { + Fail() +} + +const parseError = "You must provide a name (string), then a *testing.T (if in outermost scope), an optional FailureMode, and then an action (func())." diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/doc.go b/backend/vendor/github.com/smartystreets/goconvey/convey/doc.go new file mode 100644 index 00000000..e4f7b51a --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/doc.go @@ -0,0 +1,218 @@ +// Package convey contains all of the public-facing entry points to this project. +// This means that it should never be required of the user to import any other +// packages from this project as they serve internal purposes. +package convey + +import "github.com/smartystreets/goconvey/convey/reporting" + +////////////////////////////////// suite ////////////////////////////////// + +// C is the Convey context which you can optionally obtain in your action +// by calling Convey like: +// +// Convey(..., func(c C) { +// ... +// }) +// +// See the documentation on Convey for more details. +// +// All methods in this context behave identically to the global functions of the +// same name in this package. +type C interface { + Convey(items ...interface{}) + SkipConvey(items ...interface{}) + FocusConvey(items ...interface{}) + + So(actual interface{}, assert assertion, expected ...interface{}) + SkipSo(stuff ...interface{}) + + Reset(action func()) + + Println(items ...interface{}) (int, error) + Print(items ...interface{}) (int, error) + Printf(format string, items ...interface{}) (int, error) +} + +// Convey is the method intended for use when declaring the scopes of +// a specification. Each scope has a description and a func() which may contain +// other calls to Convey(), Reset() or Should-style assertions. Convey calls can +// be nested as far as you see fit. +// +// IMPORTANT NOTE: The top-level Convey() within a Test method +// must conform to the following signature: +// +// Convey(description string, t *testing.T, action func()) +// +// All other calls should look like this (no need to pass in *testing.T): +// +// Convey(description string, action func()) +// +// Don't worry, goconvey will panic if you get it wrong so you can fix it. +// +// Additionally, you may explicitly obtain access to the Convey context by doing: +// +// Convey(description string, action func(c C)) +// +// You may need to do this if you want to pass the context through to a +// goroutine, or to close over the context in a handler to a library which +// calls your handler in a goroutine (httptest comes to mind). +// +// All Convey()-blocks also accept an optional parameter of FailureMode which sets +// how goconvey should treat failures for So()-assertions in the block and +// nested blocks. See the constants in this file for the available options. +// +// By default it will inherit from its parent block and the top-level blocks +// default to the FailureHalts setting. +// +// This parameter is inserted before the block itself: +// +// Convey(description string, t *testing.T, mode FailureMode, action func()) +// Convey(description string, mode FailureMode, action func()) +// +// See the examples package for, well, examples. +func Convey(items ...interface{}) { + if ctx := getCurrentContext(); ctx == nil { + rootConvey(items...) + } else { + ctx.Convey(items...) + } +} + +// SkipConvey is analogous to Convey except that the scope is not executed +// (which means that child scopes defined within this scope are not run either). +// The reporter will be notified that this step was skipped. +func SkipConvey(items ...interface{}) { + Convey(append(items, skipConvey)...) +} + +// FocusConvey is has the inverse effect of SkipConvey. If the top-level +// Convey is changed to `FocusConvey`, only nested scopes that are defined +// with FocusConvey will be run. The rest will be ignored completely. This +// is handy when debugging a large suite that runs a misbehaving function +// repeatedly as you can disable all but one of that function +// without swaths of `SkipConvey` calls, just a targeted chain of calls +// to FocusConvey. +func FocusConvey(items ...interface{}) { + Convey(append(items, focusConvey)...) +} + +// Reset registers a cleanup function to be run after each Convey() +// in the same scope. See the examples package for a simple use case. +func Reset(action func()) { + mustGetCurrentContext().Reset(action) +} + +/////////////////////////////////// Assertions /////////////////////////////////// + +// assertion is an alias for a function with a signature that the convey.So() +// method can handle. Any future or custom assertions should conform to this +// method signature. The return value should be an empty string if the assertion +// passes and a well-formed failure message if not. +type assertion func(actual interface{}, expected ...interface{}) string + +const assertionSuccess = "" + +// So is the means by which assertions are made against the system under test. +// The majority of exported names in the assertions package begin with the word +// 'Should' and describe how the first argument (actual) should compare with any +// of the final (expected) arguments. How many final arguments are accepted +// depends on the particular assertion that is passed in as the assert argument. +// See the examples package for use cases and the assertions package for +// documentation on specific assertion methods. A failing assertion will +// cause t.Fail() to be invoked--you should never call this method (or other +// failure-inducing methods) in your test code. Leave that to GoConvey. +func So(actual interface{}, assert assertion, expected ...interface{}) { + mustGetCurrentContext().So(actual, assert, expected...) +} + +// SkipSo is analogous to So except that the assertion that would have been passed +// to So is not executed and the reporter is notified that the assertion was skipped. +func SkipSo(stuff ...interface{}) { + mustGetCurrentContext().SkipSo() +} + +// FailureMode is a type which determines how the So() blocks should fail +// if their assertion fails. See constants further down for acceptable values +type FailureMode string + +const ( + + // FailureContinues is a failure mode which prevents failing + // So()-assertions from halting Convey-block execution, instead + // allowing the test to continue past failing So()-assertions. + FailureContinues FailureMode = "continue" + + // FailureHalts is the default setting for a top-level Convey()-block + // and will cause all failing So()-assertions to halt further execution + // in that test-arm and continue on to the next arm. + FailureHalts FailureMode = "halt" + + // FailureInherits is the default setting for failure-mode, it will + // default to the failure-mode of the parent block. You should never + // need to specify this mode in your tests.. + FailureInherits FailureMode = "inherits" +) + +func (f FailureMode) combine(other FailureMode) FailureMode { + if other == FailureInherits { + return f + } + return other +} + +var defaultFailureMode FailureMode = FailureHalts + +// SetDefaultFailureMode allows you to specify the default failure mode +// for all Convey blocks. It is meant to be used in an init function to +// allow the default mode to be changdd across all tests for an entire packgae +// but it can be used anywhere. +func SetDefaultFailureMode(mode FailureMode) { + if mode == FailureContinues || mode == FailureHalts { + defaultFailureMode = mode + } else { + panic("You may only use the constants named 'FailureContinues' and 'FailureHalts' as default failure modes.") + } +} + +//////////////////////////////////// Print functions //////////////////////////////////// + +// Print is analogous to fmt.Print (and it even calls fmt.Print). It ensures that +// output is aligned with the corresponding scopes in the web UI. +func Print(items ...interface{}) (written int, err error) { + return mustGetCurrentContext().Print(items...) +} + +// Print is analogous to fmt.Println (and it even calls fmt.Println). It ensures that +// output is aligned with the corresponding scopes in the web UI. +func Println(items ...interface{}) (written int, err error) { + return mustGetCurrentContext().Println(items...) +} + +// Print is analogous to fmt.Printf (and it even calls fmt.Printf). It ensures that +// output is aligned with the corresponding scopes in the web UI. +func Printf(format string, items ...interface{}) (written int, err error) { + return mustGetCurrentContext().Printf(format, items...) +} + +/////////////////////////////////////////////////////////////////////////////// + +// SuppressConsoleStatistics prevents automatic printing of console statistics. +// Calling PrintConsoleStatistics explicitly will force printing of statistics. +func SuppressConsoleStatistics() { + reporting.SuppressConsoleStatistics() +} + +// PrintConsoleStatistics may be called at any time to print assertion statistics. +// Generally, the best place to do this would be in a TestMain function, +// after all tests have been run. Something like this: +// +// func TestMain(m *testing.M) { +// convey.SuppressConsoleStatistics() +// result := m.Run() +// convey.PrintConsoleStatistics() +// os.Exit(result) +// } +// +func PrintConsoleStatistics() { + reporting.PrintConsoleStatistics() +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go b/backend/vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go new file mode 100644 index 00000000..167c8fb7 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/gotest/utils.go @@ -0,0 +1,28 @@ +// Package gotest contains internal functionality. Although this package +// contains one or more exported names it is not intended for public +// consumption. See the examples package for how to use this project. +package gotest + +import ( + "runtime" + "strings" +) + +func ResolveExternalCaller() (file string, line int, name string) { + var caller_id uintptr + callers := runtime.Callers(0, callStack) + + for x := 0; x < callers; x++ { + caller_id, file, line, _ = runtime.Caller(x) + if strings.HasSuffix(file, "_test.go") || strings.HasSuffix(file, "_tests.go") { + name = runtime.FuncForPC(caller_id).Name() + return + } + } + file, line, name = "", -1, "" + return // panic? +} + +const maxStackDepth = 100 // This had better be enough... + +var callStack []uintptr = make([]uintptr, maxStackDepth, maxStackDepth) diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/init.go b/backend/vendor/github.com/smartystreets/goconvey/convey/init.go new file mode 100644 index 00000000..cb930a0d --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/init.go @@ -0,0 +1,81 @@ +package convey + +import ( + "flag" + "os" + + "github.com/jtolds/gls" + "github.com/smartystreets/assertions" + "github.com/smartystreets/goconvey/convey/reporting" +) + +func init() { + assertions.GoConveyMode(true) + + declareFlags() + + ctxMgr = gls.NewContextManager() +} + +func declareFlags() { + flag.BoolVar(&json, "convey-json", false, "When true, emits results in JSON blocks. Default: 'false'") + flag.BoolVar(&silent, "convey-silent", false, "When true, all output from GoConvey is suppressed.") + flag.BoolVar(&story, "convey-story", false, "When true, emits story output, otherwise emits dot output. When not provided, this flag mirrors the value of the '-test.v' flag") + + if noStoryFlagProvided() { + story = verboseEnabled + } + + // FYI: flag.Parse() is called from the testing package. +} + +func noStoryFlagProvided() bool { + return !story && !storyDisabled +} + +func buildReporter() reporting.Reporter { + selectReporter := os.Getenv("GOCONVEY_REPORTER") + + switch { + case testReporter != nil: + return testReporter + case json || selectReporter == "json": + return reporting.BuildJsonReporter() + case silent || selectReporter == "silent": + return reporting.BuildSilentReporter() + case selectReporter == "dot": + // Story is turned on when verbose is set, so we need to check for dot reporter first. + return reporting.BuildDotReporter() + case story || selectReporter == "story": + return reporting.BuildStoryReporter() + default: + return reporting.BuildDotReporter() + } +} + +var ( + ctxMgr *gls.ContextManager + + // only set by internal tests + testReporter reporting.Reporter +) + +var ( + json bool + silent bool + story bool + + verboseEnabled = flagFound("-test.v=true") + storyDisabled = flagFound("-story=false") +) + +// flagFound parses the command line args manually for flags defined in other +// packages. Like the '-v' flag from the "testing" package, for instance. +func flagFound(flagValue string) bool { + for _, arg := range os.Args { + if arg == flagValue { + return true + } + } + return false +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/nilReporter.go b/backend/vendor/github.com/smartystreets/goconvey/convey/nilReporter.go new file mode 100644 index 00000000..777b2a51 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/nilReporter.go @@ -0,0 +1,15 @@ +package convey + +import ( + "github.com/smartystreets/goconvey/convey/reporting" +) + +type nilReporter struct{} + +func (self *nilReporter) BeginStory(story *reporting.StoryReport) {} +func (self *nilReporter) Enter(scope *reporting.ScopeReport) {} +func (self *nilReporter) Report(report *reporting.AssertionResult) {} +func (self *nilReporter) Exit() {} +func (self *nilReporter) EndStory() {} +func (self *nilReporter) Write(p []byte) (int, error) { return len(p), nil } +func newNilReporter() *nilReporter { return &nilReporter{} } diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/console.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/console.go new file mode 100644 index 00000000..7bf67dbb --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/console.go @@ -0,0 +1,16 @@ +package reporting + +import ( + "fmt" + "io" +) + +type console struct{} + +func (self *console) Write(p []byte) (n int, err error) { + return fmt.Print(string(p)) +} + +func NewConsole() io.Writer { + return new(console) +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go new file mode 100644 index 00000000..a37d0019 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/doc.go @@ -0,0 +1,5 @@ +// Package reporting contains internal functionality related +// to console reporting and output. Although this package has +// exported names is not intended for public consumption. See the +// examples package for how to use this project. +package reporting diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go new file mode 100644 index 00000000..47d57c6b --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/dot.go @@ -0,0 +1,40 @@ +package reporting + +import "fmt" + +type dot struct{ out *Printer } + +func (self *dot) BeginStory(story *StoryReport) {} + +func (self *dot) Enter(scope *ScopeReport) {} + +func (self *dot) Report(report *AssertionResult) { + if report.Error != nil { + fmt.Print(redColor) + self.out.Insert(dotError) + } else if report.Failure != "" { + fmt.Print(yellowColor) + self.out.Insert(dotFailure) + } else if report.Skipped { + fmt.Print(yellowColor) + self.out.Insert(dotSkip) + } else { + fmt.Print(greenColor) + self.out.Insert(dotSuccess) + } + fmt.Print(resetColor) +} + +func (self *dot) Exit() {} + +func (self *dot) EndStory() {} + +func (self *dot) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewDotReporter(out *Printer) *dot { + self := new(dot) + self.out = out + return self +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go new file mode 100644 index 00000000..c396e16b --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest.go @@ -0,0 +1,33 @@ +package reporting + +type gotestReporter struct{ test T } + +func (self *gotestReporter) BeginStory(story *StoryReport) { + self.test = story.Test +} + +func (self *gotestReporter) Enter(scope *ScopeReport) {} + +func (self *gotestReporter) Report(r *AssertionResult) { + if !passed(r) { + self.test.Fail() + } +} + +func (self *gotestReporter) Exit() {} + +func (self *gotestReporter) EndStory() { + self.test = nil +} + +func (self *gotestReporter) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewGoTestReporter() *gotestReporter { + return new(gotestReporter) +} + +func passed(r *AssertionResult) bool { + return r.Error == nil && r.Failure == "" +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/init.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/init.go new file mode 100644 index 00000000..99c3bd6d --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/init.go @@ -0,0 +1,94 @@ +package reporting + +import ( + "os" + "runtime" + "strings" +) + +func init() { + if !isColorableTerminal() { + monochrome() + } + + if runtime.GOOS == "windows" { + success, failure, error_ = dotSuccess, dotFailure, dotError + } +} + +func BuildJsonReporter() Reporter { + out := NewPrinter(NewConsole()) + return NewReporters( + NewGoTestReporter(), + NewJsonReporter(out)) +} +func BuildDotReporter() Reporter { + out := NewPrinter(NewConsole()) + return NewReporters( + NewGoTestReporter(), + NewDotReporter(out), + NewProblemReporter(out), + consoleStatistics) +} +func BuildStoryReporter() Reporter { + out := NewPrinter(NewConsole()) + return NewReporters( + NewGoTestReporter(), + NewStoryReporter(out), + NewProblemReporter(out), + consoleStatistics) +} +func BuildSilentReporter() Reporter { + out := NewPrinter(NewConsole()) + return NewReporters( + NewGoTestReporter(), + NewSilentProblemReporter(out)) +} + +var ( + newline = "\n" + success = "✔" + failure = "✘" + error_ = "🔥" + skip = "⚠" + dotSuccess = "." + dotFailure = "x" + dotError = "E" + dotSkip = "S" + errorTemplate = "* %s \nLine %d: - %v \n%s\n" + failureTemplate = "* %s \nLine %d:\n%s\n%s\n" +) + +var ( + greenColor = "\033[32m" + yellowColor = "\033[33m" + redColor = "\033[31m" + resetColor = "\033[0m" +) + +var consoleStatistics = NewStatisticsReporter(NewPrinter(NewConsole())) + +func SuppressConsoleStatistics() { consoleStatistics.Suppress() } +func PrintConsoleStatistics() { consoleStatistics.PrintSummary() } + +// QuietMode disables all console output symbols. This is only meant to be used +// for tests that are internal to goconvey where the output is distracting or +// otherwise not needed in the test output. +func QuietMode() { + success, failure, error_, skip, dotSuccess, dotFailure, dotError, dotSkip = "", "", "", "", "", "", "", "" +} + +func monochrome() { + greenColor, yellowColor, redColor, resetColor = "", "", "", "" +} + +func isColorableTerminal() bool { + return strings.Contains(os.Getenv("TERM"), "color") +} + +// This interface allows us to pass the *testing.T struct +// throughout the internals of this tool without ever +// having to import the "testing" package. +type T interface { + Fail() +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/json.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/json.go new file mode 100644 index 00000000..f8526979 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/json.go @@ -0,0 +1,88 @@ +// TODO: under unit test + +package reporting + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type JsonReporter struct { + out *Printer + currentKey []string + current *ScopeResult + index map[string]*ScopeResult + scopes []*ScopeResult +} + +func (self *JsonReporter) depth() int { return len(self.currentKey) } + +func (self *JsonReporter) BeginStory(story *StoryReport) {} + +func (self *JsonReporter) Enter(scope *ScopeReport) { + self.currentKey = append(self.currentKey, scope.Title) + ID := strings.Join(self.currentKey, "|") + if _, found := self.index[ID]; !found { + next := newScopeResult(scope.Title, self.depth(), scope.File, scope.Line) + self.scopes = append(self.scopes, next) + self.index[ID] = next + } + self.current = self.index[ID] +} + +func (self *JsonReporter) Report(report *AssertionResult) { + self.current.Assertions = append(self.current.Assertions, report) +} + +func (self *JsonReporter) Exit() { + self.currentKey = self.currentKey[:len(self.currentKey)-1] +} + +func (self *JsonReporter) EndStory() { + self.report() + self.reset() +} +func (self *JsonReporter) report() { + scopes := []string{} + for _, scope := range self.scopes { + serialized, err := json.Marshal(scope) + if err != nil { + self.out.Println(jsonMarshalFailure) + panic(err) + } + var buffer bytes.Buffer + json.Indent(&buffer, serialized, "", " ") + scopes = append(scopes, buffer.String()) + } + self.out.Print(fmt.Sprintf("%s\n%s,\n%s\n", OpenJson, strings.Join(scopes, ","), CloseJson)) +} +func (self *JsonReporter) reset() { + self.scopes = []*ScopeResult{} + self.index = map[string]*ScopeResult{} + self.currentKey = nil +} + +func (self *JsonReporter) Write(content []byte) (written int, err error) { + self.current.Output += string(content) + return len(content), nil +} + +func NewJsonReporter(out *Printer) *JsonReporter { + self := new(JsonReporter) + self.out = out + self.reset() + return self +} + +const OpenJson = ">->->OPEN-JSON->->->" // "⌦" +const CloseJson = "<-<-<-CLOSE-JSON<-<-<" // "⌫" +const jsonMarshalFailure = ` + +GOCONVEY_JSON_MARSHALL_FAILURE: There was an error when attempting to convert test results to JSON. +Please file a bug report and reference the code that caused this failure if possible. + +Here's the panic: + +` diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go new file mode 100644 index 00000000..3dac0d4d --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/printer.go @@ -0,0 +1,60 @@ +package reporting + +import ( + "fmt" + "io" + "strings" +) + +type Printer struct { + out io.Writer + prefix string +} + +func (self *Printer) Println(message string, values ...interface{}) { + formatted := self.format(message, values...) + newline + self.out.Write([]byte(formatted)) +} + +func (self *Printer) Print(message string, values ...interface{}) { + formatted := self.format(message, values...) + self.out.Write([]byte(formatted)) +} + +func (self *Printer) Insert(text string) { + self.out.Write([]byte(text)) +} + +func (self *Printer) format(message string, values ...interface{}) string { + var formatted string + if len(values) == 0 { + formatted = self.prefix + message + } else { + formatted = self.prefix + fmt_Sprintf(message, values...) + } + indented := strings.Replace(formatted, newline, newline+self.prefix, -1) + return strings.TrimRight(indented, space) +} + +// Extracting fmt.Sprintf to a separate variable circumvents go vet, which, as of go 1.10 is run with go test. +var fmt_Sprintf = fmt.Sprintf + +func (self *Printer) Indent() { + self.prefix += pad +} + +func (self *Printer) Dedent() { + if len(self.prefix) >= padLength { + self.prefix = self.prefix[:len(self.prefix)-padLength] + } +} + +func NewPrinter(out io.Writer) *Printer { + self := new(Printer) + self.out = out + return self +} + +const space = " " +const pad = space + space +const padLength = len(pad) diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go new file mode 100644 index 00000000..33d5e147 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/problems.go @@ -0,0 +1,80 @@ +package reporting + +import "fmt" + +type problem struct { + silent bool + out *Printer + errors []*AssertionResult + failures []*AssertionResult +} + +func (self *problem) BeginStory(story *StoryReport) {} + +func (self *problem) Enter(scope *ScopeReport) {} + +func (self *problem) Report(report *AssertionResult) { + if report.Error != nil { + self.errors = append(self.errors, report) + } else if report.Failure != "" { + self.failures = append(self.failures, report) + } +} + +func (self *problem) Exit() {} + +func (self *problem) EndStory() { + self.show(self.showErrors, redColor) + self.show(self.showFailures, yellowColor) + self.prepareForNextStory() +} +func (self *problem) show(display func(), color string) { + if !self.silent { + fmt.Print(color) + } + display() + if !self.silent { + fmt.Print(resetColor) + } + self.out.Dedent() +} +func (self *problem) showErrors() { + for i, e := range self.errors { + if i == 0 { + self.out.Println("\nErrors:\n") + self.out.Indent() + } + self.out.Println(errorTemplate, e.File, e.Line, e.Error, e.StackTrace) + } +} +func (self *problem) showFailures() { + for i, f := range self.failures { + if i == 0 { + self.out.Println("\nFailures:\n") + self.out.Indent() + } + self.out.Println(failureTemplate, f.File, f.Line, f.Failure, f.StackTrace) + } +} + +func (self *problem) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewProblemReporter(out *Printer) *problem { + self := new(problem) + self.out = out + self.prepareForNextStory() + return self +} + +func NewSilentProblemReporter(out *Printer) *problem { + self := NewProblemReporter(out) + self.silent = true + return self +} + +func (self *problem) prepareForNextStory() { + self.errors = []*AssertionResult{} + self.failures = []*AssertionResult{} +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go new file mode 100644 index 00000000..cce6c5e4 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter.go @@ -0,0 +1,39 @@ +package reporting + +import "io" + +type Reporter interface { + BeginStory(story *StoryReport) + Enter(scope *ScopeReport) + Report(r *AssertionResult) + Exit() + EndStory() + io.Writer +} + +type reporters struct{ collection []Reporter } + +func (self *reporters) BeginStory(s *StoryReport) { self.foreach(func(r Reporter) { r.BeginStory(s) }) } +func (self *reporters) Enter(s *ScopeReport) { self.foreach(func(r Reporter) { r.Enter(s) }) } +func (self *reporters) Report(a *AssertionResult) { self.foreach(func(r Reporter) { r.Report(a) }) } +func (self *reporters) Exit() { self.foreach(func(r Reporter) { r.Exit() }) } +func (self *reporters) EndStory() { self.foreach(func(r Reporter) { r.EndStory() }) } + +func (self *reporters) Write(contents []byte) (written int, err error) { + self.foreach(func(r Reporter) { + written, err = r.Write(contents) + }) + return written, err +} + +func (self *reporters) foreach(action func(Reporter)) { + for _, r := range self.collection { + action(r) + } +} + +func NewReporters(collection ...Reporter) *reporters { + self := new(reporters) + self.collection = collection + return self +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey new file mode 100644 index 00000000..79982854 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey @@ -0,0 +1,2 @@ +#ignore +-timeout=1s diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go new file mode 100644 index 00000000..712e6ade --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/reports.go @@ -0,0 +1,179 @@ +package reporting + +import ( + "encoding/json" + "fmt" + "runtime" + "strings" + + "github.com/smartystreets/goconvey/convey/gotest" +) + +////////////////// ScopeReport //////////////////// + +type ScopeReport struct { + Title string + File string + Line int +} + +func NewScopeReport(title string) *ScopeReport { + file, line, _ := gotest.ResolveExternalCaller() + self := new(ScopeReport) + self.Title = title + self.File = file + self.Line = line + return self +} + +////////////////// ScopeResult //////////////////// + +type ScopeResult struct { + Title string + File string + Line int + Depth int + Assertions []*AssertionResult + Output string +} + +func newScopeResult(title string, depth int, file string, line int) *ScopeResult { + self := new(ScopeResult) + self.Title = title + self.Depth = depth + self.File = file + self.Line = line + self.Assertions = []*AssertionResult{} + return self +} + +/////////////////// StoryReport ///////////////////// + +type StoryReport struct { + Test T + Name string + File string + Line int +} + +func NewStoryReport(test T) *StoryReport { + file, line, name := gotest.ResolveExternalCaller() + name = removePackagePath(name) + self := new(StoryReport) + self.Test = test + self.Name = name + self.File = file + self.Line = line + return self +} + +// name comes in looking like "github.com/smartystreets/goconvey/examples.TestName". +// We only want the stuff after the last '.', which is the name of the test function. +func removePackagePath(name string) string { + parts := strings.Split(name, ".") + return parts[len(parts)-1] +} + +/////////////////// FailureView //////////////////////// + +// This struct is also declared in github.com/smartystreets/assertions. +// The json struct tags should be equal in both declarations. +type FailureView struct { + Message string `json:"Message"` + Expected string `json:"Expected"` + Actual string `json:"Actual"` +} + +////////////////////AssertionResult ////////////////////// + +type AssertionResult struct { + File string + Line int + Expected string + Actual string + Failure string + Error interface{} + StackTrace string + Skipped bool +} + +func NewFailureReport(failure string) *AssertionResult { + report := new(AssertionResult) + report.File, report.Line = caller() + report.StackTrace = stackTrace() + parseFailure(failure, report) + return report +} +func parseFailure(failure string, report *AssertionResult) { + view := new(FailureView) + err := json.Unmarshal([]byte(failure), view) + if err == nil { + report.Failure = view.Message + report.Expected = view.Expected + report.Actual = view.Actual + } else { + report.Failure = failure + } +} +func NewErrorReport(err interface{}) *AssertionResult { + report := new(AssertionResult) + report.File, report.Line = caller() + report.StackTrace = fullStackTrace() + report.Error = fmt.Sprintf("%v", err) + return report +} +func NewSuccessReport() *AssertionResult { + return new(AssertionResult) +} +func NewSkipReport() *AssertionResult { + report := new(AssertionResult) + report.File, report.Line = caller() + report.StackTrace = fullStackTrace() + report.Skipped = true + return report +} + +func caller() (file string, line int) { + file, line, _ = gotest.ResolveExternalCaller() + return +} + +func stackTrace() string { + buffer := make([]byte, 1024*64) + n := runtime.Stack(buffer, false) + return removeInternalEntries(string(buffer[:n])) +} +func fullStackTrace() string { + buffer := make([]byte, 1024*64) + n := runtime.Stack(buffer, true) + return removeInternalEntries(string(buffer[:n])) +} +func removeInternalEntries(stack string) string { + lines := strings.Split(stack, newline) + filtered := []string{} + for _, line := range lines { + if !isExternal(line) { + filtered = append(filtered, line) + } + } + return strings.Join(filtered, newline) +} +func isExternal(line string) bool { + for _, p := range internalPackages { + if strings.Contains(line, p) { + return true + } + } + return false +} + +// NOTE: any new packages that host goconvey packages will need to be added here! +// An alternative is to scan the goconvey directory and then exclude stuff like +// the examples package but that's nasty too. +var internalPackages = []string{ + "goconvey/assertions", + "goconvey/convey", + "goconvey/execution", + "goconvey/gotest", + "goconvey/reporting", +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go new file mode 100644 index 00000000..c3ccd056 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/statistics.go @@ -0,0 +1,108 @@ +package reporting + +import ( + "fmt" + "sync" +) + +func (self *statistics) BeginStory(story *StoryReport) {} + +func (self *statistics) Enter(scope *ScopeReport) {} + +func (self *statistics) Report(report *AssertionResult) { + self.Lock() + defer self.Unlock() + + if !self.failing && report.Failure != "" { + self.failing = true + } + if !self.erroring && report.Error != nil { + self.erroring = true + } + if report.Skipped { + self.skipped += 1 + } else { + self.total++ + } +} + +func (self *statistics) Exit() {} + +func (self *statistics) EndStory() { + self.Lock() + defer self.Unlock() + + if !self.suppressed { + self.printSummaryLocked() + } +} + +func (self *statistics) Suppress() { + self.Lock() + defer self.Unlock() + self.suppressed = true +} + +func (self *statistics) PrintSummary() { + self.Lock() + defer self.Unlock() + self.printSummaryLocked() +} + +func (self *statistics) printSummaryLocked() { + self.reportAssertionsLocked() + self.reportSkippedSectionsLocked() + self.completeReportLocked() +} +func (self *statistics) reportAssertionsLocked() { + self.decideColorLocked() + self.out.Print("\n%d total %s", self.total, plural("assertion", self.total)) +} +func (self *statistics) decideColorLocked() { + if self.failing && !self.erroring { + fmt.Print(yellowColor) + } else if self.erroring { + fmt.Print(redColor) + } else { + fmt.Print(greenColor) + } +} +func (self *statistics) reportSkippedSectionsLocked() { + if self.skipped > 0 { + fmt.Print(yellowColor) + self.out.Print(" (one or more sections skipped)") + } +} +func (self *statistics) completeReportLocked() { + fmt.Print(resetColor) + self.out.Print("\n") + self.out.Print("\n") +} + +func (self *statistics) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewStatisticsReporter(out *Printer) *statistics { + self := statistics{} + self.out = out + return &self +} + +type statistics struct { + sync.Mutex + + out *Printer + total int + failing bool + erroring bool + skipped int + suppressed bool +} + +func plural(word string, count int) string { + if count == 1 { + return word + } + return word + "s" +} diff --git a/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/story.go b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/story.go new file mode 100644 index 00000000..9e73c971 --- /dev/null +++ b/backend/vendor/github.com/smartystreets/goconvey/convey/reporting/story.go @@ -0,0 +1,73 @@ +// TODO: in order for this reporter to be completely honest +// we need to retrofit to be more like the json reporter such that: +// 1. it maintains ScopeResult collections, which count assertions +// 2. it reports only after EndStory(), so that all tick marks +// are placed near the appropriate title. +// 3. Under unit test + +package reporting + +import ( + "fmt" + "strings" +) + +type story struct { + out *Printer + titlesById map[string]string + currentKey []string +} + +func (self *story) BeginStory(story *StoryReport) {} + +func (self *story) Enter(scope *ScopeReport) { + self.out.Indent() + + self.currentKey = append(self.currentKey, scope.Title) + ID := strings.Join(self.currentKey, "|") + + if _, found := self.titlesById[ID]; !found { + self.out.Println("") + self.out.Print(scope.Title) + self.out.Insert(" ") + self.titlesById[ID] = scope.Title + } +} + +func (self *story) Report(report *AssertionResult) { + if report.Error != nil { + fmt.Print(redColor) + self.out.Insert(error_) + } else if report.Failure != "" { + fmt.Print(yellowColor) + self.out.Insert(failure) + } else if report.Skipped { + fmt.Print(yellowColor) + self.out.Insert(skip) + } else { + fmt.Print(greenColor) + self.out.Insert(success) + } + fmt.Print(resetColor) +} + +func (self *story) Exit() { + self.out.Dedent() + self.currentKey = self.currentKey[:len(self.currentKey)-1] +} + +func (self *story) EndStory() { + self.titlesById = make(map[string]string) + self.out.Println("\n") +} + +func (self *story) Write(content []byte) (written int, err error) { + return len(content), nil // no-op +} + +func NewStoryReporter(out *Printer) *story { + self := new(story) + self.out = out + self.titlesById = make(map[string]string) + return self +} diff --git a/backend/vendor/github.com/ugorji/go/codec/xml.go b/backend/vendor/github.com/ugorji/go/codec/xml.go deleted file mode 100644 index 19fc36ca..00000000 --- a/backend/vendor/github.com/ugorji/go/codec/xml.go +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. -// Use of this source code is governed by a MIT license found in the LICENSE file. - -// +build ignore - -package codec - -import "reflect" - -/* - -A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder. - -We are attempting this due to perceived issues with encoding/xml: - - Complicated. It tried to do too much, and is not as simple to use as json. - - Due to over-engineering, reflection is over-used AND performance suffers: - java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/ - even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html - -codec framework will offer the following benefits - - VASTLY improved performance (when using reflection-mode or codecgen) - - simplicity and consistency: with the rest of the supported formats - - all other benefits of codec framework (streaming, codegeneration, etc) - -codec is not a drop-in replacement for encoding/xml. -It is a replacement, based on the simplicity and performance of codec. -Look at it like JAXB for Go. - -Challenges: - - Need to output XML preamble, with all namespaces at the right location in the output. - - Each "end" block is dynamic, so we need to maintain a context-aware stack - - How to decide when to use an attribute VS an element - - How to handle chardata, attr, comment EXPLICITLY. - - Should it output fragments? - e.g. encoding a bool should just output true OR false, which is not well-formed XML. - -Extend the struct tag. See representative example: - type X struct { - ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"` - // format: [namespace-uri ][namespace-prefix ]local-name, ... - } - -Based on this, we encode - - fields as elements, BUT - encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string) - - text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata". - -To handle namespaces: - - XMLHandle is denoted as being namespace-aware. - Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name. - - *Encoder and *Decoder know whether the Handle "prefers" namespaces. - - add *Encoder.getEncName(*structFieldInfo). - No one calls *structFieldInfo.indexForEncName directly anymore - - OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware - indexForEncName takes a parameter of the form namespace:local-name OR local-name - - add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc - by being a method on *Decoder, or maybe a method on the Handle itself. - No one accesses .encName anymore - - let encode.go and decode.go use these (for consistency) - - only problem exists for gen.go, where we create a big switch on encName. - Now, we also have to add a switch on strings.endsWith(kName, encNsName) - - gen.go will need to have many more methods, and then double-on the 2 switch loops like: - switch k { - case "abc" : x.abc() - case "def" : x.def() - default { - switch { - case !nsAware: panic(...) - case strings.endsWith(":abc"): x.abc() - case strings.endsWith(":def"): x.def() - default: panic(...) - } - } - } - -The structure below accommodates this: - - type typeInfo struct { - sfi []*structFieldInfo // sorted by encName - sfins // sorted by namespace - sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately. - sfip // unsorted - } - type structFieldInfo struct { - encName - nsEncName - ns string - attr bool - cdata bool - } - -indexForEncName is now an internal helper function that takes a sorted array -(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...) - -There will be a separate parser from the builder. -The parser will have a method: next() xmlToken method. It has lookahead support, -so you can pop multiple tokens, make a determination, and push them back in the order popped. -This will be needed to determine whether we are "nakedly" decoding a container or not. -The stack will be implemented using a slice and push/pop happens at the [0] element. - -xmlToken has fields: - - type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text - - value string - - ns string - -SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL - -The following are skipped when parsing: - - External Entities (from external file) - - Notation Declaration e.g. - - Entity Declarations & References - - XML Declaration (assume UTF-8) - - XML Directive i.e. - - Other Declarations: Notation, etc. - - Comment - - Processing Instruction - - schema / DTD for validation: - We are not a VALIDATING parser. Validation is done elsewhere. - However, some parts of the DTD internal subset are used (SEE BELOW). - For Attribute List Declarations e.g. - - We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED) - -The following XML features are supported - - Namespace - - Element - - Attribute - - cdata - - Unicode escape - -The following DTD (when as an internal sub-set) features are supported: - - Internal Entities e.g. - AND entities for the set: [<>&"'] - - Parameter entities e.g. - - -At decode time, a structure containing the following is kept - - namespace mapping - - default attribute values - - all internal entities (<>&"' and others written in the document) - -When decode starts, it parses XML namespace declarations and creates a map in the -xmlDecDriver. While parsing, that map continuously gets updated. -The only problem happens when a namespace declaration happens on the node that it defines. -e.g. -To handle this, each Element must be fully parsed at a time, -even if it amounts to multiple tokens which are returned one at a time on request. - -xmlns is a special attribute name. - - It is used to define namespaces, including the default - - It is never returned as an AttrKey or AttrVal. - *We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.* - -Number, bool, null, mapKey, etc can all be decoded from any xmlToken. -This accommodates map[int]string for example. - -It should be possible to create a schema from the types, -or vice versa (generate types from schema with appropriate tags). -This is however out-of-scope from this parsing project. - -We should write all namespace information at the first point that it is referenced in the tree, -and use the mapping for all child nodes and attributes. This means that state is maintained -at a point in the tree. This also means that calls to Decode or MustDecode will reset some state. - -When decoding, it is important to keep track of entity references and default attribute values. -It seems these can only be stored in the DTD components. We should honor them when decoding. - -Configuration for XMLHandle will look like this: - - XMLHandle - DefaultNS string - // Encoding: - NS map[string]string // ns URI to key, used for encoding - // Decoding: in case ENTITY declared in external schema or dtd, store info needed here - Entities map[string]string // map of entity rep to character - - -During encode, if a namespace mapping is not defined for a namespace found on a struct, -then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict -with any other namespace mapping). - -Note that different fields in a struct can have different namespaces. -However, all fields will default to the namespace on the _struct field (if defined). - -An XML document is a name, a map of attributes and a list of children. -Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example). -We have to "DecodeNaked" into something that resembles XML data. - -To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types: - type Name struct { // Preferred. Less allocations due to conversions. - Local string - Space string - } - type Element struct { - Name Name - Attrs map[Name]string - Children []interface{} // each child is either *Element or string - } -Only two "supporting" types are exposed for XML: Name and Element. - -// ------------------ - -We considered 'type Name string' where Name is like "Space Local" (space-separated). -We decided against it, because each creation of a name would lead to -double allocation (first convert []byte to string, then concatenate them into a string). -The benefit is that it is faster to read Attrs from a map. But given that Element is a value -object, we want to eschew methods and have public exposed variables. - -We also considered the following, where xml types were not value objects, and we used -intelligent accessor methods to extract information and for performance. -*** WE DECIDED AGAINST THIS. *** - type Attr struct { - Name Name - Value string - } - // Element is a ValueObject: There are no accessor methods. - // Make element self-contained. - type Element struct { - Name Name - attrsMap map[string]string // where key is "Space Local" - attrs []Attr - childrenT []string - childrenE []Element - childrenI []int // each child is a index into T or E. - } - func (x *Element) child(i) interface{} // returns string or *Element - -// ------------------ - -Per XML spec and our default handling, white space is always treated as -insignificant between elements, except in a text node. The xml:space='preserve' -attribute is ignored. - -**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.** -**So treat them as just "directives" that should be interpreted to mean something**. - -On encoding, we support indenting aka prettifying markup in the same way we support it for json. - -A document or element can only be encoded/decoded from/to a struct. In this mode: - - struct name maps to element name (or tag-info from _struct field) - - fields are mapped to child elements or attributes - -A map is either encoded as attributes on current element, or as a set of child elements. -Maps are encoded as attributes iff their keys and values are primitives (number, bool, string). - -A list is encoded as a set of child elements. - -Primitives (number, bool, string) are encoded as an element, attribute or text -depending on the context. - -Extensions must encode themselves as a text string. - -Encoding is tough, specifically when encoding mappings, because we need to encode -as either attribute or element. To do this, we need to default to encoding as attributes, -and then let Encoder inform the Handle when to start encoding as nodes. -i.e. Encoder does something like: - - h.EncodeMapStart() - h.Encode(), h.Encode(), ... - h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal - h.Encode(), h.Encode(), ... - h.EncodeEnd() - -Only XMLHandle understands this, and will set itself to start encoding as elements. - -This support extends to maps. For example, if a struct field is a map, and it has -the struct tag signifying it should be attr, then all its fields are encoded as attributes. -e.g. - - type X struct { - M map[string]int `codec:"m,attr"` // encode keys as attributes named - } - -Question: - - if encoding a map, what if map keys have spaces in them??? - Then they cannot be attributes or child elements. Error. - -Options to consider adding later: - - For attribute values, normalize by trimming beginning and ending white space, - and converting every white space sequence to a single space. - - ATTLIST restrictions are enforced. - e.g. default value of xml:space, skipping xml:XYZ style attributes, etc. - - Consider supporting NON-STRICT mode (e.g. to handle HTML parsing). - Some elements e.g. br, hr, etc need not close and should be auto-closed - ... (see http://www.w3.org/TR/html4/loose.dtd) - An expansive set of entities are pre-defined. - - Have easy way to create a HTML parser: - add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose, - and add HTML Entities to the list. - - Support validating element/attribute XMLName before writing it. - Keep this behind a flag, which is set to false by default (for performance). - type XMLHandle struct { - CheckName bool - } - -Misc: - -ROADMAP (1 weeks): - - build encoder (1 day) - - build decoder (based off xmlParser) (1 day) - - implement xmlParser (2 days). - Look at encoding/xml for inspiration. - - integrate and TEST (1 days) - - write article and post it (1 day) - -// ---------- MORE NOTES FROM 2017-11-30 ------------ - -when parsing -- parse the attributes first -- then parse the nodes - -basically: -- if encoding a field: we use the field name for the wrapper -- if encoding a non-field, then just use the element type name - - map[string]string ==> abcval... or - val... OR - val1val2... <- PREFERED - []string ==> v1v2... - string v1 ==> v1 - bool true ==> true - float 1.0 ==> 1.0 - ... - - F1 map[string]string ==> abcval... OR - val... OR - val... <- PREFERED - F2 []string ==> v1v2... - F3 bool ==> true - ... - -- a scalar is encoded as: - (value) of type T ==> - (value) of field F ==> -- A kv-pair is encoded as: - (key,value) ==> OR - (key,value) of field F ==> OR -- A map or struct is just a list of kv-pairs -- A list is encoded as sequences of same node e.g. - - - value21 - value22 -- we may have to singularize the field name, when entering into xml, - and pluralize them when encoding. -- bi-directional encode->decode->encode is not a MUST. - even encoding/xml cannot decode correctly what was encoded: - - see https://play.golang.org/p/224V_nyhMS - func main() { - fmt.Println("Hello, playground") - v := []interface{}{"hello", 1, true, nil, time.Now()} - s, err := xml.Marshal(v) - fmt.Printf("err: %v, \ns: %s\n", err, s) - var v2 []interface{} - err = xml.Unmarshal(s, &v2) - fmt.Printf("err: %v, \nv2: %v\n", err, v2) - type T struct { - V []interface{} - } - v3 := T{V: v} - s, err = xml.Marshal(v3) - fmt.Printf("err: %v, \ns: %s\n", err, s) - var v4 T - err = xml.Unmarshal(s, &v4) - fmt.Printf("err: %v, \nv4: %v\n", err, v4) - } - Output: - err: , - s: hello1true - err: , - v2: [] - err: , - s: hello1true2009-11-10T23:00:00Z - err: , - v4: {[ ]} -- -*/ - -// ----------- PARSER ------------------- - -type xmlTokenType uint8 - -const ( - _ xmlTokenType = iota << 1 - xmlTokenElemStart - xmlTokenElemEnd - xmlTokenAttrKey - xmlTokenAttrVal - xmlTokenText -) - -type xmlToken struct { - Type xmlTokenType - Value string - Namespace string // blank for AttrVal and Text -} - -type xmlParser struct { - r decReader - toks []xmlToken // list of tokens. - ptr int // ptr into the toks slice - done bool // nothing else to parse. r now returns EOF. -} - -func (x *xmlParser) next() (t *xmlToken) { - // once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish) - if !x.done && len(x.toks) == 0 { - x.nextTag() - } - // parses one element at a time (into possible many tokens) - if x.ptr < len(x.toks) { - t = &(x.toks[x.ptr]) - x.ptr++ - if x.ptr == len(x.toks) { - x.ptr = 0 - x.toks = x.toks[:0] - } - } - return -} - -// nextTag will parses the next element and fill up toks. -// It set done flag if/once EOF is reached. -func (x *xmlParser) nextTag() { - // TODO: implement. -} - -// ----------- ENCODER ------------------- - -type xmlEncDriver struct { - e *Encoder - w encWriter - h *XMLHandle - b [64]byte // scratch - bs []byte // scratch - // s jsonStack - noBuiltInTypes -} - -// ----------- DECODER ------------------- - -type xmlDecDriver struct { - d *Decoder - h *XMLHandle - r decReader // *bytesDecReader decReader - ct valueType // container type. one of unset, array or map. - bstr [8]byte // scratch used for string \UXXX parsing - b [64]byte // scratch - - // wsSkipped bool // whitespace skipped - - // s jsonStack - - noBuiltInTypes -} - -// DecodeNaked will decode into an XMLNode - -// XMLName is a value object representing a namespace-aware NAME -type XMLName struct { - Local string - Space string -} - -// XMLNode represents a "union" of the different types of XML Nodes. -// Only one of fields (Text or *Element) is set. -type XMLNode struct { - Element *Element - Text string -} - -// XMLElement is a value object representing an fully-parsed XML element. -type XMLElement struct { - Name Name - Attrs map[XMLName]string - // Children is a list of child nodes, each being a *XMLElement or string - Children []XMLNode -} - -// ----------- HANDLE ------------------- - -type XMLHandle struct { - BasicHandle - textEncodingType - - DefaultNS string - NS map[string]string // ns URI to key, for encoding - Entities map[string]string // entity representation to string, for encoding. -} - -func (h *XMLHandle) newEncDriver(e *Encoder) encDriver { - return &xmlEncDriver{e: e, w: e.w, h: h} -} - -func (h *XMLHandle) newDecDriver(d *Decoder) decDriver { - // d := xmlDecDriver{r: r.(*bytesDecReader), h: h} - hd := xmlDecDriver{d: d, r: d.r, h: h} - hd.n.bytes = d.b[:] - return &hd -} - -func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { - return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) -} - -var _ decDriver = (*xmlDecDriver)(nil) -var _ encDriver = (*xmlEncDriver)(nil) diff --git a/backend/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/backend/vendor/golang.org/x/sys/unix/mkasm_darwin.go deleted file mode 100644 index 4548b993..00000000 --- a/backend/vendor/golang.org/x/sys/unix/mkasm_darwin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. -//This program must be run after mksyscall.go. -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "strings" -) - -func main() { - in1, err := ioutil.ReadFile("syscall_darwin.go") - if err != nil { - log.Fatalf("can't open syscall_darwin.go: %s", err) - } - arch := os.Args[1] - in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) - } - in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) - if err != nil { - log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) - } - in := string(in1) + string(in2) + string(in3) - - trampolines := map[string]bool{} - - var out bytes.Buffer - - fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) - fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "// +build go1.12\n") - fmt.Fprintf(&out, "\n") - fmt.Fprintf(&out, "#include \"textflag.h\"\n") - for _, line := range strings.Split(in, "\n") { - if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { - continue - } - fn := line[5 : len(line)-13] - if !trampolines[fn] { - trampolines[fn] = true - fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) - fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) - } - } - err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644) - if err != nil { - log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err) - } -} diff --git a/backend/vendor/golang.org/x/sys/unix/mkpost.go b/backend/vendor/golang.org/x/sys/unix/mkpost.go deleted file mode 100644 index 9feddd00..00000000 --- a/backend/vendor/golang.org/x/sys/unix/mkpost.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// mkpost processes the output of cgo -godefs to -// modify the generated types. It is used to clean up -// the sys API in an architecture specific manner. -// -// mkpost is run after cgo -godefs; see README.md. -package main - -import ( - "bytes" - "fmt" - "go/format" - "io/ioutil" - "log" - "os" - "regexp" -) - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check that we are using the Docker-based build system if we should be. - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") - os.Stderr.WriteString("See README.md\n") - os.Exit(1) - } - } - - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatal(err) - } - - // Intentionally export __val fields in Fsid and Sigset_t - valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`) - b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}")) - - // Intentionally export __fds_bits field in FdSet - fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) - b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) - - // If we have empty Ptrace structs, we should delete them. Only s390x emits - // nonempty Ptrace structs. - ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) - b = ptraceRexexp.ReplaceAll(b, nil) - - // Replace the control_regs union with a blank identifier for now. - controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) - b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) - - // Remove fields that are added by glibc - // Note that this is unstable as the identifers are private. - removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Convert [65]int8 to [65]byte in Utsname members to simplify - // conversion to string; see golang.org/issue/20753 - convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) - b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) - - // Convert [1024]int8 to [1024]byte in Ptmget members - convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) - b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) - - // Remove spare fields (e.g. in Statx_t) - spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) - b = spareFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove cgo padding fields - removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) - b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - - // Remove the first line of warning from cgo - b = b[bytes.IndexByte(b, '\n')+1:] - // Modify the command in the header to include: - // mkpost, our own warning, and a build tag. - replacement := fmt.Sprintf(`$1 | go run mkpost.go -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s,%s`, goarch, goos) - cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) - b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) - - // gofmt - b, err = format.Source(b) - if err != nil { - log.Fatal(err) - } - - os.Stdout.Write(b) -} diff --git a/backend/vendor/golang.org/x/sys/unix/mksyscall.go b/backend/vendor/golang.org/x/sys/unix/mksyscall.go deleted file mode 100644 index e4af9424..00000000 --- a/backend/vendor/golang.org/x/sys/unix/mksyscall.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_darwin.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named errno. - -A line beginning with //sysnb is like //sys, except that the -goroutine will not be suspended during the execution of the system -call. This must only be used for system calls which can never -block, as otherwise the system call could cause all goroutines to -hang. -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - plan9 = flag.Bool("plan9", false, "plan9") - openbsd = flag.Bool("openbsd", false, "openbsd") - netbsd = flag.Bool("netbsd", false, "netbsd") - dragonfly = flag.Bool("dragonfly", false, "dragonfly") - arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair - tags = flag.String("tags", "", "build tags") - filename = flag.String("output", "", "output file name (standard output if omitted)") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - // Get the OS and architecture (using GOARCH_TARGET if it exists) - goos := os.Getenv("GOOS") - if goos == "" { - fmt.Fprintln(os.Stderr, "GOOS not defined in environment") - os.Exit(1) - } - goarch := os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - - // Check that we are using the Docker-based build system if we should - if goos == "linux" { - if os.Getenv("GOLANG_SYS_BUILD") != "docker" { - fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") - fmt.Fprintf(os.Stderr, "See README.md\n") - os.Exit(1) - } - } - - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - libc := false - if goos == "darwin" && strings.Contains(buildTags(), ",go1.12") { - libc = true - } - trampolines := map[string]bool{} - - text := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, errno error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, sysname := f[2], f[3], f[4], f[5] - - // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. - if goos == "darwin" && !libc && funct == "ClockGettime" { - continue - } - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Go function header. - outDecl := "" - if len(out) > 0 { - outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - break - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d *byte\n", n) - text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass dummy pointer in that case. - // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). - text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) - text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && (*openbsd || *netbsd) { - args = append(args, "0") - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if p.Type == "int64" && *dragonfly { - if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else if endianness == "little-endian" { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { - if len(args)%2 == 1 && *arm { - // arm abi specifies 64-bit argument uses - // (even, odd) pair - args = append(args, "0") - } - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - - // Determine which form to use; pad args with zeros. - asm := "Syscall" - if nonblock != nil { - if errvar == "" && goos == "linux" { - asm = "RawSyscallNoError" - } else { - asm = "RawSyscall" - } - } else { - if errvar == "" && goos == "linux" { - asm = "SyscallNoError" - } - } - if len(args) <= 3 { - for len(args) < 3 { - args = append(args, "0") - } - } else if len(args) <= 6 { - asm += "6" - for len(args) < 6 { - args = append(args, "0") - } - } else if len(args) <= 9 { - asm += "9" - for len(args) < 9 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) - } - - // System call number. - if sysname == "" { - sysname = "SYS_" + funct - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToUpper(sysname) - } - - var libcFn string - if libc { - asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call - sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ - sysname = strings.ToLower(sysname) // lowercase - if sysname == "getdirentries64" { - // Special case - libSystem name and - // raw syscall name don't match. - sysname = "__getdirentries64" - } - libcFn = sysname - sysname = "funcPC(libc_" + sysname + "_trampoline)" - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" && !*plan9 { - reg = "e1" - ret[2] = reg - doErrno = true - } else if p.Name == "err" && *plan9 { - ret[0] = "r0" - ret[2] = "e1" - break - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" || *plan9 { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - if errvar == "" && goos == "linux" { - // raw syscall without error on Linux, see golang.org/issue/22924 - text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - } - text += body - - if *plan9 && ret[2] == "e1" { - text += "\tif int32(r0) == -1 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } else if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = errnoErr(e1)\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n\n" - - if libc && !trampolines[libcFn] { - // some system calls share a trampoline, like read and readlen. - trampolines[libcFn] = true - // Declare assembly trampoline. - text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) - // Assembly trampoline calls the libc_* function, which this magic - // redirects to use the function from libSystem. - text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) - text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) - text += "\n" - } - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -import ( - "syscall" - "unsafe" -) - -var _ syscall.Errno - -%s -` diff --git a/backend/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/backend/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go deleted file mode 100644 index f2c58fb7..00000000 --- a/backend/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - cExtern := "/*\n#include \n#include \n" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // Check if value return, err return available - errvar := "" - retvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - retvar = p.Name - rettype = p.Type - } - } - - // System call name. - if sysname == "" { - sysname = funct - } - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // Change p.Types to c - var cIn []string - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - cIn = append(cIn, "int") - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - - // So file name. - if *aix { - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - } - - strconvfunc := "C.CString" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if text != "" { - text += "\n" - } - - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments to Syscall. - var args []string - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) - n++ - text += fmt.Sprintf("\tvar _p%d int\n", n) - text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) - args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - n++ - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("_p%d", n)) - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "unsafe.Pointer" { - args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) - } else if p.Type == "int" { - if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { - args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) - } else if argN == 0 && funct == "fcntl" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := "" - if sysname == "exit" { - if errvar != "" { - call += "er :=" - } else { - call += "" - } - } else if errvar != "" { - call += "r0,er :=" - } else if retvar != "" { - call += "r0,_ :=" - } else { - call += "" - } - call += fmt.Sprintf("C.%s(%s)", sysname, arglist) - - // Assign return values. - body := "" - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - } else { - reg = "r0" - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - - // verify return - if sysname != "exit" && errvar != "" { - if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { - body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } else { - body += "\tif (r0 ==-1 && er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - } else if errvar != "" { - body += "\tif (er != nil) {\n" - body += fmt.Sprintf("\t\t%s = er\n", errvar) - body += "\t}\n" - } - - text += fmt.Sprintf("\t%s\n", call) - text += body - - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - - -%s -*/ -import "C" -import ( - "unsafe" -) - - -%s - -%s -` diff --git a/backend/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/backend/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go deleted file mode 100644 index 45b44290..00000000 --- a/backend/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go +++ /dev/null @@ -1,602 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -This program reads a file containing function prototypes -(like syscall_aix.go) and generates system call bodies. -The prototypes are marked by lines beginning with "//sys" -and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt - - -This program will generate three files and handle both gc and gccgo implementation: - - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) - - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 - - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. - - The generated code looks like this - -zsyscall_aix_ppc64.go -func asyscall(...) (n int, err error) { - // Pointer Creation - r1, e1 := callasyscall(...) - // Type Conversion - // Error Handler - return -} - -zsyscall_aix_ppc64_gc.go -//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" -//go:linkname libc_asyscall libc_asyscall -var asyscall syscallFunc - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) - return -} - -zsyscall_aix_ppc64_ggcgo.go - -// int asyscall(...) - -import "C" - -func callasyscall(...) (r1 uintptr, e1 Errno) { - r1 = uintptr(C.asyscall(...)) - e1 = syscall.GetErrno() - return -} -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "io/ioutil" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - aix = flag.Bool("aix", false, "aix") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - // GCCGO - textgccgo := "" - cExtern := "/*\n#include \n" - // GC - textgc := "" - dynimports := "" - linknames := "" - var vars []string - // COMMON - textcommon := "" - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - if sysname == "" { - sysname = funct - } - - onlyCommon := false - if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { - // This function call another syscall which is already implemented. - // Therefore, the gc and gccgo part must not be generated. - onlyCommon = true - } - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - - textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - if !onlyCommon { - textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - } - - // Check if value return, err return available - errvar := "" - rettype := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - } else { - rettype = p.Type - } - } - - sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // GCCGO Prototype return type - cRettype := "" - if rettype == "unsafe.Pointer" { - cRettype = "uintptr_t" - } else if rettype == "uintptr" { - cRettype = "uintptr_t" - } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { - cRettype = "uintptr_t" - } else if rettype == "int" { - cRettype = "int" - } else if rettype == "int32" { - cRettype = "int" - } else if rettype == "int64" { - cRettype = "long long" - } else if rettype == "uint32" { - cRettype = "unsigned int" - } else if rettype == "uint64" { - cRettype = "unsigned long long" - } else { - cRettype = "int" - } - if sysname == "exit" { - cRettype = "void" - } - - // GCCGO Prototype arguments type - var cIn []string - for i, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "string" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t", "size_t") - } else if p.Type == "unsafe.Pointer" { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "uintptr" { - cIn = append(cIn, "uintptr_t") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { - cIn = append(cIn, "uintptr_t") - } else if p.Type == "int" { - if (i == 0 || i == 2) && funct == "fcntl" { - // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock - cIn = append(cIn, "uintptr_t") - } else { - cIn = append(cIn, "int") - } - - } else if p.Type == "int32" { - cIn = append(cIn, "int") - } else if p.Type == "int64" { - cIn = append(cIn, "long long") - } else if p.Type == "uint32" { - cIn = append(cIn, "unsigned int") - } else if p.Type == "uint64" { - cIn = append(cIn, "unsigned long long") - } else { - cIn = append(cIn, "int") - } - } - - if !onlyCommon { - // GCCGO Prototype Generation - // Imports of system calls from libc - cExtern += fmt.Sprintf("%s %s", cRettype, sysname) - cIn := strings.Join(cIn, ", ") - cExtern += fmt.Sprintf("(%s);\n", cIn) - } - // GC Library name - if modname == "" { - modname = "libc.a/shr_64.o" - } else { - fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) - os.Exit(1) - } - sysvarname := fmt.Sprintf("libc_%s", sysname) - - if !onlyCommon { - // GC Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) - // GC Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) - // GC Library proc address variable. - vars = append(vars, sysvarname) - } - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - // Go function header. - if outps != "" { - outps = fmt.Sprintf(" (%s)", outps) - } - if textcommon != "" { - textcommon += "\n" - } - - textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) - - // Prepare arguments tocall. - var argscommon []string // Arguments in the common part - var argscall []string // Arguments for call prototype - var argsgc []string // Arguments for gc call (with syscall6) - var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) - n := 0 - argN := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "string" && errvar != "" { - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) - n++ - } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) - textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) - argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) - n++ - } else if p.Type == "int64" && endianness != "" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") - } else if p.Type == "bool" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") - } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else if p.Type == "int" { - if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { - // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock - argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - - } else { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - } else if p.Type == "int32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } else if p.Type == "int64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) - } else if p.Type == "uint32" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) - } else if p.Type == "uint64" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) - } else if p.Type == "uintptr" { - argscommon = append(argscommon, p.Name) - argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) - argsgc = append(argsgc, p.Name) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) - } else { - argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) - argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) - argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) - argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) - } - argN++ - } - nargs := len(argsgc) - - // COMMON function generation - argscommonlist := strings.Join(argscommon, ", ") - callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) - ret := []string{"_", "_"} - body := "" - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[1] = reg - doErrno = true - } else { - reg = "r0" - ret[0] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%s != 0", reg) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" { - textcommon += fmt.Sprintf("\t%s\n", callcommon) - } else { - textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) - } - textcommon += body - - if doErrno { - textcommon += "\tif e1 != 0 {\n" - textcommon += "\t\terr = errnoErr(e1)\n" - textcommon += "\t}\n" - } - textcommon += "\treturn\n" - textcommon += "}\n" - - if onlyCommon { - continue - } - - // CALL Prototype - callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) - - // GC function generation - asm := "syscall6" - if nonblock != nil { - asm = "rawSyscall6" - } - - if len(argsgc) <= 6 { - for len(argsgc) < 6 { - argsgc = append(argsgc, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) - os.Exit(1) - } - argsgclist := strings.Join(argsgc, ", ") - callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) - - textgc += callProto - textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) - textgc += "\treturn\n}\n" - - // GCCGO function generation - argsgccgolist := strings.Join(argsgccgo, ", ") - callgccgo := fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) - textgccgo += callProto - textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) - textgccgo += "\te1 = syscall.GetErrno()\n" - textgccgo += "\treturn\n}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - - // Print zsyscall_aix_ppc64.go - err := ioutil.WriteFile("zsyscall_aix_ppc64.go", - []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gc.go - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", - []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - - // Print zsyscall_aix_ppc64_gccgo.go - err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", - []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), - 0644) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } -} - -const srcTemplate1 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "unsafe" -) - - -%s - -%s -` -const srcTemplate2 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build !gccgo - -package %s - -import ( - "unsafe" -) -%s -%s -%s -type syscallFunc uintptr - -var ( -%s -) - -// Implemented in runtime/syscall_aix.go. -func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - -%s -` -const srcTemplate3 = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s -// +build gccgo - -package %s - -%s -*/ -import "C" -import ( - "syscall" -) - - -%s - -%s -` diff --git a/backend/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/backend/vendor/golang.org/x/sys/unix/mksyscall_solaris.go deleted file mode 100644 index 3d864738..00000000 --- a/backend/vendor/golang.org/x/sys/unix/mksyscall_solaris.go +++ /dev/null @@ -1,335 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* - This program reads a file containing function prototypes - (like syscall_solaris.go) and generates system call bodies. - The prototypes are marked by lines beginning with "//sys" - and read like func declarations if //sys is replaced by func, but: - * The parameter lists must give a name for each argument. - This includes return parameters. - * The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - * If the return parameter is an error number, it must be named err. - * If go func name needs to be different than its libc name, - * or the function is not in libc, name could be specified - * at the end, after "=" sign, like - //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt -*/ - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - b32 = flag.Bool("b32", false, "32bit big-endian") - l32 = flag.Bool("l32", false, "32bit little-endian") - tags = flag.String("tags", "", "build tags") -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return *tags -} - -// Param is function parameter -type Param struct { - Name string - Type string -} - -// usage prints the program usage -func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") - os.Exit(1) -} - -// parseParamList parses parameter list and returns a slice of parameters -func parseParamList(list string) []string { - list = strings.TrimSpace(list) - if list == "" { - return []string{} - } - return regexp.MustCompile(`\s*,\s*`).Split(list, -1) -} - -// parseParam splits a parameter into name and type -func parseParam(p string) Param { - ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) - if ps == nil { - fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) - os.Exit(1) - } - return Param{ps[1], ps[2]} -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - endianness := "" - if *b32 { - endianness = "big-endian" - } else if *l32 { - endianness = "little-endian" - } - - pack := "" - text := "" - dynimports := "" - linknames := "" - var vars []string - for _, path := range flag.Args() { - file, err := os.Open(path) - if err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - s := bufio.NewScanner(file) - for s.Scan() { - t := s.Text() - t = strings.TrimSpace(t) - t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) - if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { - pack = p[1] - } - nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) - if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { - continue - } - - // Line must be of the form - // func Open(path string, mode int, perm int) (fd int, err error) - // Split into name, in params, out params. - f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) - if f == nil { - fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) - os.Exit(1) - } - funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] - - // Split argument lists on comma. - in := parseParamList(inps) - out := parseParamList(outps) - - inps = strings.Join(in, ", ") - outps = strings.Join(out, ", ") - - // Try in vain to keep people from editing this file. - // The theory is that they jump into the middle of the file - // without reading the header. - text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" - - // So file name. - if modname == "" { - modname = "libc" - } - - // System call name. - if sysname == "" { - sysname = funct - } - - // System call pointer variable name. - sysvarname := fmt.Sprintf("proc%s", sysname) - - strconvfunc := "BytePtrFromString" - strconvtype := "*byte" - - sysname = strings.ToLower(sysname) // All libc functions are lowercase. - - // Runtime import of function to allow cross-platform builds. - dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) - // Link symbol to proc address variable. - linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) - // Library proc address variable. - vars = append(vars, sysvarname) - - // Go function header. - outlist := strings.Join(out, ", ") - if outlist != "" { - outlist = fmt.Sprintf(" (%s)", outlist) - } - if text != "" { - text += "\n" - } - text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) - - // Check if err return available - errvar := "" - for _, param := range out { - p := parseParam(param) - if p.Type == "error" { - errvar = p.Name - continue - } - } - - // Prepare arguments to Syscall. - var args []string - n := 0 - for _, param := range in { - p := parseParam(param) - if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { - args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") - } else if p.Type == "string" && errvar != "" { - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) - text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if p.Type == "string" { - fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") - text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) - text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) - n++ - } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { - // Convert slice into pointer, length. - // Have to be careful not to take address of &a[0] if len == 0: - // pass nil in that case. - text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) - text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) - args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) - n++ - } else if p.Type == "int64" && endianness != "" { - if endianness == "big-endian" { - args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) - } - } else if p.Type == "bool" { - text += fmt.Sprintf("\tvar _p%d uint32\n", n) - text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) - args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) - n++ - } else { - args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) - } - } - nargs := len(args) - - // Determine which form to use; pad args with zeros. - asm := "sysvicall6" - if nonblock != nil { - asm = "rawSysvicall6" - } - if len(args) <= 6 { - for len(args) < 6 { - args = append(args, "0") - } - } else { - fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) - os.Exit(1) - } - - // Actual call. - arglist := strings.Join(args, ", ") - call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) - - // Assign return values. - body := "" - ret := []string{"_", "_", "_"} - doErrno := false - for i := 0; i < len(out); i++ { - p := parseParam(out[i]) - reg := "" - if p.Name == "err" { - reg = "e1" - ret[2] = reg - doErrno = true - } else { - reg = fmt.Sprintf("r%d", i) - ret[i] = reg - } - if p.Type == "bool" { - reg = fmt.Sprintf("%d != 0", reg) - } - if p.Type == "int64" && endianness != "" { - // 64-bit number in r1:r0 or r0:r1. - if i+2 > len(out) { - fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) - os.Exit(1) - } - if endianness == "big-endian" { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) - } else { - reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) - } - ret[i] = fmt.Sprintf("r%d", i) - ret[i+1] = fmt.Sprintf("r%d", i+1) - } - if reg != "e1" { - body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) - } - } - if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { - text += fmt.Sprintf("\t%s\n", call) - } else { - text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) - } - text += body - - if doErrno { - text += "\tif e1 != 0 {\n" - text += "\t\terr = e1\n" - text += "\t}\n" - } - text += "\treturn\n" - text += "}\n" - } - if err := s.Err(); err != nil { - fmt.Fprintf(os.Stderr, err.Error()) - os.Exit(1) - } - file.Close() - } - imp := "" - if pack != "unix" { - imp = "import \"golang.org/x/sys/unix\"\n" - - } - vardecls := "\t" + strings.Join(vars, ",\n\t") - vardecls += " syscallFunc" - fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) -} - -const srcTemplate = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package %s - -import ( - "syscall" - "unsafe" -) -%s -%s -%s -var ( -%s -) - -%s -` diff --git a/backend/vendor/golang.org/x/sys/unix/mksysnum.go b/backend/vendor/golang.org/x/sys/unix/mksysnum.go deleted file mode 100644 index 07f8960f..00000000 --- a/backend/vendor/golang.org/x/sys/unix/mksysnum.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Generate system call table for DragonFly, NetBSD, -// FreeBSD, OpenBSD or Darwin from master list -// (for example, /usr/src/sys/kern/syscalls.master or -// sys/syscall.h). -package main - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "regexp" - "strings" -) - -var ( - goos, goarch string -) - -// cmdLine returns this programs's commandline arguments -func cmdLine() string { - return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") -} - -// buildTags returns build tags -func buildTags() string { - return fmt.Sprintf("%s,%s", goarch, goos) -} - -func checkErr(err error) { - if err != nil { - fmt.Fprintf(os.Stderr, "%v\n", err) - os.Exit(1) - } -} - -// source string and substring slice for regexp -type re struct { - str string // source string - sub []string // matched sub-string -} - -// Match performs regular expression match -func (r *re) Match(exp string) bool { - r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) - if r.sub != nil { - return true - } - return false -} - -// fetchFile fetches a text file from URL -func fetchFile(URL string) io.Reader { - resp, err := http.Get(URL) - checkErr(err) - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - checkErr(err) - return strings.NewReader(string(body)) -} - -// readFile reads a text file from path -func readFile(path string) io.Reader { - file, err := os.Open(os.Args[1]) - checkErr(err) - return file -} - -func format(name, num, proto string) string { - name = strings.ToUpper(name) - // There are multiple entries for enosys and nosys, so comment them out. - nm := re{str: name} - if nm.Match(`^SYS_E?NOSYS$`) { - name = fmt.Sprintf("// %s", name) - } - if name == `SYS_SYS_EXIT` { - name = `SYS_EXIT` - } - return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) -} - -func main() { - // Get the OS (using GOOS_TARGET if it exist) - goos = os.Getenv("GOOS_TARGET") - if goos == "" { - goos = os.Getenv("GOOS") - } - // Get the architecture (using GOARCH_TARGET if it exists) - goarch = os.Getenv("GOARCH_TARGET") - if goarch == "" { - goarch = os.Getenv("GOARCH") - } - // Check if GOOS and GOARCH environment variables are defined - if goarch == "" || goos == "" { - fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") - os.Exit(1) - } - - file := strings.TrimSpace(os.Args[1]) - var syscalls io.Reader - if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { - // Download syscalls.master file - syscalls = fetchFile(file) - } else { - syscalls = readFile(file) - } - - var text, line string - s := bufio.NewScanner(syscalls) - for s.Scan() { - t := re{str: line} - if t.Match(`^(.*)\\$`) { - // Handle continuation - line = t.sub[1] - line += strings.TrimLeft(s.Text(), " \t") - } else { - // New line - line = s.Text() - } - t = re{str: line} - if t.Match(`\\$`) { - continue - } - t = re{str: line} - - switch goos { - case "dragonfly": - if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "freebsd": - if t.Match(`^([0-9]+)\s+\S+\s+(?:NO)?STD\s+({ \S+\s+(\w+).*)$`) { - num, proto := t.sub[1], t.sub[2] - name := fmt.Sprintf("SYS_%s", t.sub[3]) - text += format(name, num, proto) - } - case "openbsd": - if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { - num, proto, name := t.sub[1], t.sub[3], t.sub[4] - text += format(name, num, proto) - } - case "netbsd": - if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { - num, proto, compat := t.sub[1], t.sub[6], t.sub[8] - name := t.sub[7] + "_" + t.sub[9] - if t.sub[11] != "" { - name = t.sub[7] + "_" + t.sub[11] - } - name = strings.ToUpper(name) - if compat == "" || compat == "13" || compat == "30" || compat == "50" { - text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) - } - } - case "darwin": - if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { - name, num := t.sub[1], t.sub[2] - name = strings.ToUpper(name) - text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) - } - default: - fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) - os.Exit(1) - - } - } - err := s.Err() - checkErr(err) - - fmt.Printf(template, cmdLine(), buildTags(), text) -} - -const template = `// %s -// Code generated by the command above; see README.md. DO NOT EDIT. - -// +build %s - -package unix - -const( -%s)` diff --git a/backend/vendor/golang.org/x/sys/unix/types_aix.go b/backend/vendor/golang.org/x/sys/unix/types_aix.go deleted file mode 100644 index 25e83494..00000000 --- a/backend/vendor/golang.org/x/sys/unix/types_aix.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore -// +build aix - -/* -Input to cgo -godefs. See also mkerrors.sh and mkall.sh -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - - -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -type off64 C.off64_t -type off C.off_t -type Mode_t C.mode_t - -// Time - -type Timespec C.struct_timespec - -type StTimespec C.struct_st_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Timex C.struct_timex - -type Time_t C.time_t - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -type Timezone C.struct_timezone - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit64 - -type Pid_t C.pid_t - -type _Gid_t C.gid_t - -type dev_t C.dev_t - -// Files - -type Stat_t C.struct_stat - -type StatxTimestamp C.struct_statx_timestamp - -type Statx_t C.struct_statx - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Cmsghdr C.struct_cmsghdr - -type ICMPv6Filter C.struct_icmp6_filter - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type Linger C.struct_linger - -type Msghdr C.struct_msghdr - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr -) - -type IfMsgHdr C.struct_if_msghdr - -// Misc - -type FdSet C.fd_set - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -type Sigset_t C.sigset_t - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -//poll - -type PollFd struct { - Fd int32 - Events uint16 - Revents uint16 -} - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -//flock_t - -type Flock_t C.struct_flock64 - -// Statfs - -type Fsid_t C.struct_fsid_t -type Fsid64_t C.struct_fsid64_t - -type Statfs_t C.struct_statfs - -const RNDGETENTCNT = 0x80045200 diff --git a/backend/vendor/golang.org/x/sys/unix/types_darwin.go b/backend/vendor/golang.org/x/sys/unix/types_darwin.go deleted file mode 100644 index 155c2e69..00000000 --- a/backend/vendor/golang.org/x/sys/unix/types_darwin.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define __DARWIN_UNIX03 0 -#define KERNEL -#define _DARWIN_USE_64_BIT_INODE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat64 - -type Statfs_t C.struct_statfs64 - -type Flock_t C.struct_flock - -type Fstore_t C.struct_fstore - -type Radvisory_t C.struct_radvisory - -type Fbootstraptransfer_t C.struct_fbootstraptransfer - -type Log2phys_t C.struct_log2phys - -type Fsid C.struct_fsid - -type Dirent C.struct_dirent - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet4Pktinfo C.struct_in_pktinfo - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfmaMsghdr2 C.struct_ifma_msghdr2 - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/backend/vendor/golang.org/x/sys/unix/types_dragonfly.go b/backend/vendor/golang.org/x/sys/unix/types_dragonfly.go deleted file mode 100644 index 3365dd79..00000000 --- a/backend/vendor/golang.org/x/sys/unix/types_dragonfly.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Uname - -type Utsname C.struct_utsname diff --git a/backend/vendor/golang.org/x/sys/unix/types_freebsd.go b/backend/vendor/golang.org/x/sys/unix/types_freebsd.go deleted file mode 100644 index 74707989..00000000 --- a/backend/vendor/golang.org/x/sys/unix/types_freebsd.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define _WANT_FREEBSD11_STAT 1 -#define _WANT_FREEBSD11_STATFS 1 -#define _WANT_FREEBSD11_DIRENT 1 -#define _WANT_FREEBSD11_KEVENT 1 - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -// This structure is a duplicate of if_data on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_data8 { - u_char ifi_type; - u_char ifi_physical; - u_char ifi_addrlen; - u_char ifi_hdrlen; - u_char ifi_link_state; - u_char ifi_spare_char1; - u_char ifi_spare_char2; - u_char ifi_datalen; - u_long ifi_mtu; - u_long ifi_metric; - u_long ifi_baudrate; - u_long ifi_ipackets; - u_long ifi_ierrors; - u_long ifi_opackets; - u_long ifi_oerrors; - u_long ifi_collisions; - u_long ifi_ibytes; - u_long ifi_obytes; - u_long ifi_imcasts; - u_long ifi_omcasts; - u_long ifi_iqdrops; - u_long ifi_noproto; - u_long ifi_hwassist; -// FIXME: these are now unions, so maybe need to change definitions? -#undef ifi_epoch - time_t ifi_epoch; -#undef ifi_lastchange - struct timeval ifi_lastchange; -}; - -// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. -// See /usr/include/net/if.h. -struct if_msghdr8 { - u_short ifm_msglen; - u_char ifm_version; - u_char ifm_type; - int ifm_addrs; - int ifm_flags; - u_short ifm_index; - struct if_data8 ifm_data; -}; -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -const ( - _statfsVersion = C.STATFS_VERSION - _dirblksiz = C.DIRBLKSIZ -) - -type Stat_t C.struct_stat - -type stat_freebsd11_t C.struct_freebsd11_stat - -type Statfs_t C.struct_statfs - -type statfs_freebsd11_t C.struct_freebsd11_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type dirent_freebsd11 C.struct_freebsd11_dirent - -type Fsid C.struct_fsid - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPMreqn C.struct_ip_mreqn - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPMreqn = C.sizeof_struct_ip_mreqn - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent_freebsd11 - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - sizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 - sizeofIfData = C.sizeof_struct_if_data - SizeofIfData = C.sizeof_struct_if_data8 - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type ifMsghdr C.struct_if_msghdr - -type IfMsghdr C.struct_if_msghdr8 - -type ifData C.struct_if_data - -type IfData C.struct_if_data8 - -type IfaMsghdr C.struct_ifa_msghdr - -type IfmaMsghdr C.struct_ifma_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr - SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfZbuf C.struct_bpf_zbuf - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfZbufHeader C.struct_bpf_zbuf_header - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLINIGNEOF = C.POLLINIGNEOF - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Capabilities - -type CapRights C.struct_cap_rights - -// Uname - -type Utsname C.struct_utsname diff --git a/backend/vendor/golang.org/x/sys/unix/types_netbsd.go b/backend/vendor/golang.org/x/sys/unix/types_netbsd.go deleted file mode 100644 index 2dd4f954..00000000 --- a/backend/vendor/golang.org/x/sys/unix/types_netbsd.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Advice to Fadvise - -const ( - FADV_NORMAL = C.POSIX_FADV_NORMAL - FADV_RANDOM = C.POSIX_FADV_RANDOM - FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL - FADV_WILLNEED = C.POSIX_FADV_WILLNEED - FADV_DONTNEED = C.POSIX_FADV_DONTNEED - FADV_NOREUSE = C.POSIX_FADV_NOREUSE -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -type Ptmget C.struct_ptmget - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Sysctl - -type Sysctlnode C.struct_sysctlnode - -// Uname - -type Utsname C.struct_utsname - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/backend/vendor/golang.org/x/sys/unix/types_openbsd.go b/backend/vendor/golang.org/x/sys/unix/types_openbsd.go deleted file mode 100644 index 8aafbe44..00000000 --- a/backend/vendor/golang.org/x/sys/unix/types_openbsd.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Statfs_t C.struct_statfs - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -type Fsid C.fsid_t - -// File system limits - -const ( - PathMax = C.PATH_MAX -) - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Ptrace requests - -const ( - PTRACE_TRACEME = C.PT_TRACE_ME - PTRACE_CONT = C.PT_CONTINUE - PTRACE_KILL = C.PT_KILL -) - -// Events (kqueue, kevent) - -type Kevent_t C.struct_kevent - -// Select - -type FdSet C.fd_set - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type IfAnnounceMsghdr C.struct_if_announcemsghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -type Mclpool C.struct_mclpool - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfHdr C.struct_bpf_hdr - -type BpfTimeval C.struct_bpf_timeval - -// Terminal handling - -type Termios C.struct_termios - -type Winsize C.struct_winsize - -// fchmodat-like syscalls. - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW -) - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) - -// Signal Sets - -type Sigset_t C.sigset_t - -// Uname - -type Utsname C.struct_utsname - -// Uvmexp - -const SizeofUvmexp = C.sizeof_struct_uvmexp - -type Uvmexp C.struct_uvmexp - -// Clockinfo - -const SizeofClockinfo = C.sizeof_struct_clockinfo - -type Clockinfo C.struct_clockinfo diff --git a/backend/vendor/golang.org/x/sys/unix/types_solaris.go b/backend/vendor/golang.org/x/sys/unix/types_solaris.go deleted file mode 100644 index 2b716f93..00000000 --- a/backend/vendor/golang.org/x/sys/unix/types_solaris.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -Input to cgo -godefs. See README.md -*/ - -// +godefs map struct_in_addr [4]byte /* in_addr */ -// +godefs map struct_in6_addr [16]byte /* in6_addr */ - -package unix - -/* -#define KERNEL -// These defines ensure that builds done on newer versions of Solaris are -// backwards-compatible with older versions of Solaris and -// OpenSolaris-based derivatives. -#define __USE_SUNOS_SOCKETS__ // msghdr -#define __USE_LEGACY_PROTOTYPES__ // iovec -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -enum { - sizeofPtr = sizeof(void*), -}; - -union sockaddr_all { - struct sockaddr s1; // this one gets used for fields - struct sockaddr_in s2; // these pad it out - struct sockaddr_in6 s3; - struct sockaddr_un s4; - struct sockaddr_dl s5; -}; - -struct sockaddr_any { - struct sockaddr addr; - char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; -}; - -*/ -import "C" - -// Machine characteristics - -const ( - SizeofPtr = C.sizeofPtr - SizeofShort = C.sizeof_short - SizeofInt = C.sizeof_int - SizeofLong = C.sizeof_long - SizeofLongLong = C.sizeof_longlong - PathMax = C.PATH_MAX - MaxHostNameLen = C.MAXHOSTNAMELEN -) - -// Basic types - -type ( - _C_short C.short - _C_int C.int - _C_long C.long - _C_long_long C.longlong -) - -// Time - -type Timespec C.struct_timespec - -type Timeval C.struct_timeval - -type Timeval32 C.struct_timeval32 - -type Tms C.struct_tms - -type Utimbuf C.struct_utimbuf - -// Processes - -type Rusage C.struct_rusage - -type Rlimit C.struct_rlimit - -type _Gid_t C.gid_t - -// Files - -type Stat_t C.struct_stat - -type Flock_t C.struct_flock - -type Dirent C.struct_dirent - -// Filesystems - -type _Fsblkcnt_t C.fsblkcnt_t - -type Statvfs_t C.struct_statvfs - -// Sockets - -type RawSockaddrInet4 C.struct_sockaddr_in - -type RawSockaddrInet6 C.struct_sockaddr_in6 - -type RawSockaddrUnix C.struct_sockaddr_un - -type RawSockaddrDatalink C.struct_sockaddr_dl - -type RawSockaddr C.struct_sockaddr - -type RawSockaddrAny C.struct_sockaddr_any - -type _Socklen C.socklen_t - -type Linger C.struct_linger - -type Iovec C.struct_iovec - -type IPMreq C.struct_ip_mreq - -type IPv6Mreq C.struct_ipv6_mreq - -type Msghdr C.struct_msghdr - -type Cmsghdr C.struct_cmsghdr - -type Inet6Pktinfo C.struct_in6_pktinfo - -type IPv6MTUInfo C.struct_ip6_mtuinfo - -type ICMPv6Filter C.struct_icmp6_filter - -const ( - SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in - SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 - SizeofSockaddrAny = C.sizeof_struct_sockaddr_any - SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un - SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl - SizeofLinger = C.sizeof_struct_linger - SizeofIPMreq = C.sizeof_struct_ip_mreq - SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq - SizeofMsghdr = C.sizeof_struct_msghdr - SizeofCmsghdr = C.sizeof_struct_cmsghdr - SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo - SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo - SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter -) - -// Select - -type FdSet C.fd_set - -// Misc - -type Utsname C.struct_utsname - -type Ustat_t C.struct_ustat - -const ( - AT_FDCWD = C.AT_FDCWD - AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW - AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW - AT_REMOVEDIR = C.AT_REMOVEDIR - AT_EACCESS = C.AT_EACCESS -) - -// Routing and interface messages - -const ( - SizeofIfMsghdr = C.sizeof_struct_if_msghdr - SizeofIfData = C.sizeof_struct_if_data - SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr - SizeofRtMsghdr = C.sizeof_struct_rt_msghdr - SizeofRtMetrics = C.sizeof_struct_rt_metrics -) - -type IfMsghdr C.struct_if_msghdr - -type IfData C.struct_if_data - -type IfaMsghdr C.struct_ifa_msghdr - -type RtMsghdr C.struct_rt_msghdr - -type RtMetrics C.struct_rt_metrics - -// Berkeley packet filter - -const ( - SizeofBpfVersion = C.sizeof_struct_bpf_version - SizeofBpfStat = C.sizeof_struct_bpf_stat - SizeofBpfProgram = C.sizeof_struct_bpf_program - SizeofBpfInsn = C.sizeof_struct_bpf_insn - SizeofBpfHdr = C.sizeof_struct_bpf_hdr -) - -type BpfVersion C.struct_bpf_version - -type BpfStat C.struct_bpf_stat - -type BpfProgram C.struct_bpf_program - -type BpfInsn C.struct_bpf_insn - -type BpfTimeval C.struct_bpf_timeval - -type BpfHdr C.struct_bpf_hdr - -// Terminal handling - -type Termios C.struct_termios - -type Termio C.struct_termio - -type Winsize C.struct_winsize - -// poll - -type PollFd C.struct_pollfd - -const ( - POLLERR = C.POLLERR - POLLHUP = C.POLLHUP - POLLIN = C.POLLIN - POLLNVAL = C.POLLNVAL - POLLOUT = C.POLLOUT - POLLPRI = C.POLLPRI - POLLRDBAND = C.POLLRDBAND - POLLRDNORM = C.POLLRDNORM - POLLWRBAND = C.POLLWRBAND - POLLWRNORM = C.POLLWRNORM -) diff --git a/backend/vendor/golang.org/x/text/unicode/norm/maketables.go b/backend/vendor/golang.org/x/text/unicode/norm/maketables.go deleted file mode 100644 index 338c395e..00000000 --- a/backend/vendor/golang.org/x/text/unicode/norm/maketables.go +++ /dev/null @@ -1,976 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Normalization table generator. -// Data read from the web. -// See forminfo.go for a description of the trie values associated with each rune. - -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "log" - "sort" - "strconv" - "strings" - - "golang.org/x/text/internal/gen" - "golang.org/x/text/internal/triegen" - "golang.org/x/text/internal/ucd" -) - -func main() { - gen.Init() - loadUnicodeData() - compactCCC() - loadCompositionExclusions() - completeCharFields(FCanonical) - completeCharFields(FCompatibility) - computeNonStarterCounts() - verifyComputed() - printChars() - testDerived() - printTestdata() - makeTables() -} - -var ( - tablelist = flag.String("tables", - "all", - "comma-separated list of which tables to generate; "+ - "can be 'decomp', 'recomp', 'info' and 'all'") - test = flag.Bool("test", - false, - "test existing tables against DerivedNormalizationProps and generate test data for regression testing") - verbose = flag.Bool("verbose", - false, - "write data to stdout as it is parsed") -) - -const MaxChar = 0x10FFFF // anything above this shouldn't exist - -// Quick Check properties of runes allow us to quickly -// determine whether a rune may occur in a normal form. -// For a given normal form, a rune may be guaranteed to occur -// verbatim (QC=Yes), may or may not combine with another -// rune (QC=Maybe), or may not occur (QC=No). -type QCResult int - -const ( - QCUnknown QCResult = iota - QCYes - QCNo - QCMaybe -) - -func (r QCResult) String() string { - switch r { - case QCYes: - return "Yes" - case QCNo: - return "No" - case QCMaybe: - return "Maybe" - } - return "***UNKNOWN***" -} - -const ( - FCanonical = iota // NFC or NFD - FCompatibility // NFKC or NFKD - FNumberOfFormTypes -) - -const ( - MComposed = iota // NFC or NFKC - MDecomposed // NFD or NFKD - MNumberOfModes -) - -// This contains only the properties we're interested in. -type Char struct { - name string - codePoint rune // if zero, this index is not a valid code point. - ccc uint8 // canonical combining class - origCCC uint8 - excludeInComp bool // from CompositionExclusions.txt - compatDecomp bool // it has a compatibility expansion - - nTrailingNonStarters uint8 - nLeadingNonStarters uint8 // must be equal to trailing if non-zero - - forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility - - state State -} - -var chars = make([]Char, MaxChar+1) -var cccMap = make(map[uint8]uint8) - -func (c Char) String() string { - buf := new(bytes.Buffer) - - fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) - fmt.Fprintf(buf, " ccc: %v\n", c.ccc) - fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) - fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) - fmt.Fprintf(buf, " state: %v\n", c.state) - fmt.Fprintf(buf, " NFC:\n") - fmt.Fprint(buf, c.forms[FCanonical]) - fmt.Fprintf(buf, " NFKC:\n") - fmt.Fprint(buf, c.forms[FCompatibility]) - - return buf.String() -} - -// In UnicodeData.txt, some ranges are marked like this: -// 3400;;Lo;0;L;;;;;N;;;;; -// 4DB5;;Lo;0;L;;;;;N;;;;; -// parseCharacter keeps a state variable indicating the weirdness. -type State int - -const ( - SNormal State = iota // known to be zero for the type - SFirst - SLast - SMissing -) - -var lastChar = rune('\u0000') - -func (c Char) isValid() bool { - return c.codePoint != 0 && c.state != SMissing -} - -type FormInfo struct { - quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed - verified [MNumberOfModes]bool // index: MComposed or MDecomposed - - combinesForward bool // May combine with rune on the right - combinesBackward bool // May combine with rune on the left - isOneWay bool // Never appears in result - inDecomp bool // Some decompositions result in this char. - decomp Decomposition - expandedDecomp Decomposition -} - -func (f FormInfo) String() string { - buf := bytes.NewBuffer(make([]byte, 0)) - - fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) - fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) - fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) - fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) - fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) - fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) - fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) - fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) - - return buf.String() -} - -type Decomposition []rune - -func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { - decomp := strings.Split(s, " ") - if len(decomp) > 0 && skipfirst { - decomp = decomp[1:] - } - for _, d := range decomp { - point, err := strconv.ParseUint(d, 16, 64) - if err != nil { - return a, err - } - a = append(a, rune(point)) - } - return a, nil -} - -func loadUnicodeData() { - f := gen.OpenUCDFile("UnicodeData.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(ucd.CodePoint) - char := &chars[r] - - char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) - decmap := p.String(ucd.DecompMapping) - - exp, err := parseDecomposition(decmap, false) - isCompat := false - if err != nil { - if len(decmap) > 0 { - exp, err = parseDecomposition(decmap, true) - if err != nil { - log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) - } - isCompat = true - } - } - - char.name = p.String(ucd.Name) - char.codePoint = r - char.forms[FCompatibility].decomp = exp - if !isCompat { - char.forms[FCanonical].decomp = exp - } else { - char.compatDecomp = true - } - if len(decmap) > 0 { - char.forms[FCompatibility].decomp = exp - } - } - if err := p.Err(); err != nil { - log.Fatal(err) - } -} - -// compactCCC converts the sparse set of CCC values to a continguous one, -// reducing the number of bits needed from 8 to 6. -func compactCCC() { - m := make(map[uint8]uint8) - for i := range chars { - c := &chars[i] - m[c.ccc] = 0 - } - cccs := []int{} - for v, _ := range m { - cccs = append(cccs, int(v)) - } - sort.Ints(cccs) - for i, c := range cccs { - cccMap[uint8(i)] = uint8(c) - m[uint8(c)] = uint8(i) - } - for i := range chars { - c := &chars[i] - c.origCCC = c.ccc - c.ccc = m[c.ccc] - } - if len(m) >= 1<<6 { - log.Fatalf("too many difference CCC values: %d >= 64", len(m)) - } -} - -// CompositionExclusions.txt has form: -// 0958 # ... -// See http://unicode.org/reports/tr44/ for full explanation -func loadCompositionExclusions() { - f := gen.OpenUCDFile("CompositionExclusions.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - c := &chars[p.Rune(0)] - if c.excludeInComp { - log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) - } - c.excludeInComp = true - } - if e := p.Err(); e != nil { - log.Fatal(e) - } -} - -// hasCompatDecomp returns true if any of the recursive -// decompositions contains a compatibility expansion. -// In this case, the character may not occur in NFK*. -func hasCompatDecomp(r rune) bool { - c := &chars[r] - if c.compatDecomp { - return true - } - for _, d := range c.forms[FCompatibility].decomp { - if hasCompatDecomp(d) { - return true - } - } - return false -} - -// Hangul related constants. -const ( - HangulBase = 0xAC00 - HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) - - JamoLBase = 0x1100 - JamoLEnd = 0x1113 - JamoVBase = 0x1161 - JamoVEnd = 0x1176 - JamoTBase = 0x11A8 - JamoTEnd = 0x11C3 - - JamoLVTCount = 19 * 21 * 28 - JamoTCount = 28 -) - -func isHangul(r rune) bool { - return HangulBase <= r && r < HangulEnd -} - -func isHangulWithoutJamoT(r rune) bool { - if !isHangul(r) { - return false - } - r -= HangulBase - return r < JamoLVTCount && r%JamoTCount == 0 -} - -func ccc(r rune) uint8 { - return chars[r].ccc -} - -// Insert a rune in a buffer, ordered by Canonical Combining Class. -func insertOrdered(b Decomposition, r rune) Decomposition { - n := len(b) - b = append(b, 0) - cc := ccc(r) - if cc > 0 { - // Use bubble sort. - for ; n > 0; n-- { - if ccc(b[n-1]) <= cc { - break - } - b[n] = b[n-1] - } - } - b[n] = r - return b -} - -// Recursively decompose. -func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { - dcomp := chars[r].forms[form].decomp - if len(dcomp) == 0 { - return insertOrdered(d, r) - } - for _, c := range dcomp { - d = decomposeRecursive(form, c, d) - } - return d -} - -func completeCharFields(form int) { - // Phase 0: pre-expand decomposition. - for i := range chars { - f := &chars[i].forms[form] - if len(f.decomp) == 0 { - continue - } - exp := make(Decomposition, 0) - for _, c := range f.decomp { - exp = decomposeRecursive(form, c, exp) - } - f.expandedDecomp = exp - } - - // Phase 1: composition exclusion, mark decomposition. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - // Marks script-specific exclusions and version restricted. - f.isOneWay = c.excludeInComp - - // Singletons - f.isOneWay = f.isOneWay || len(f.decomp) == 1 - - // Non-starter decompositions - if len(f.decomp) > 1 { - chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 - f.isOneWay = f.isOneWay || chk - } - - // Runes that decompose into more than two runes. - f.isOneWay = f.isOneWay || len(f.decomp) > 2 - - if form == FCompatibility { - f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) - } - - for _, r := range f.decomp { - chars[r].forms[form].inDecomp = true - } - } - - // Phase 2: forward and backward combining. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - if !f.isOneWay && len(f.decomp) == 2 { - f0 := &chars[f.decomp[0]].forms[form] - f1 := &chars[f.decomp[1]].forms[form] - if !f0.isOneWay { - f0.combinesForward = true - } - if !f1.isOneWay { - f1.combinesBackward = true - } - } - if isHangulWithoutJamoT(rune(i)) { - f.combinesForward = true - } - } - - // Phase 3: quick check values. - for i := range chars { - c := &chars[i] - f := &c.forms[form] - - switch { - case len(f.decomp) > 0: - f.quickCheck[MDecomposed] = QCNo - case isHangul(rune(i)): - f.quickCheck[MDecomposed] = QCNo - default: - f.quickCheck[MDecomposed] = QCYes - } - switch { - case f.isOneWay: - f.quickCheck[MComposed] = QCNo - case (i & 0xffff00) == JamoLBase: - f.quickCheck[MComposed] = QCYes - if JamoLBase <= i && i < JamoLEnd { - f.combinesForward = true - } - if JamoVBase <= i && i < JamoVEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - f.combinesForward = true - } - if JamoTBase <= i && i < JamoTEnd { - f.quickCheck[MComposed] = QCMaybe - f.combinesBackward = true - } - case !f.combinesBackward: - f.quickCheck[MComposed] = QCYes - default: - f.quickCheck[MComposed] = QCMaybe - } - } -} - -func computeNonStarterCounts() { - // Phase 4: leading and trailing non-starter count - for i := range chars { - c := &chars[i] - - runes := []rune{rune(i)} - // We always use FCompatibility so that the CGJ insertion points do not - // change for repeated normalizations with different forms. - if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { - runes = exp - } - // We consider runes that combine backwards to be non-starters for the - // purpose of Stream-Safe Text Processing. - for _, r := range runes { - if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nLeadingNonStarters++ - } - for i := len(runes) - 1; i >= 0; i-- { - if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { - break - } - c.nTrailingNonStarters++ - } - if c.nTrailingNonStarters > 3 { - log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) - } - - if isHangul(rune(i)) { - c.nTrailingNonStarters = 2 - if isHangulWithoutJamoT(rune(i)) { - c.nTrailingNonStarters = 1 - } - } - - if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { - log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) - } - if t := c.nTrailingNonStarters; t > 3 { - log.Fatalf("%U: number of trailing non-starters is %d > 3", t) - } - } -} - -func printBytes(w io.Writer, b []byte, name string) { - fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) - fmt.Fprintf(w, "var %s = [...]byte {", name) - for i, c := range b { - switch { - case i%64 == 0: - fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) - case i%8 == 0: - fmt.Fprintf(w, "\n") - } - fmt.Fprintf(w, "0x%.2X, ", c) - } - fmt.Fprint(w, "\n}\n\n") -} - -// See forminfo.go for format. -func makeEntry(f *FormInfo, c *Char) uint16 { - e := uint16(0) - if r := c.codePoint; HangulBase <= r && r < HangulEnd { - e |= 0x40 - } - if f.combinesForward { - e |= 0x20 - } - if f.quickCheck[MDecomposed] == QCNo { - e |= 0x4 - } - switch f.quickCheck[MComposed] { - case QCYes: - case QCNo: - e |= 0x10 - case QCMaybe: - e |= 0x18 - default: - log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) - } - e |= uint16(c.nTrailingNonStarters) - return e -} - -// decompSet keeps track of unique decompositions, grouped by whether -// the decomposition is followed by a trailing and/or leading CCC. -type decompSet [7]map[string]bool - -const ( - normalDecomp = iota - firstMulti - firstCCC - endMulti - firstLeadingCCC - firstCCCZeroExcept - firstStarterWithNLead - lastDecomp -) - -var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} - -func makeDecompSet() decompSet { - m := decompSet{} - for i := range m { - m[i] = make(map[string]bool) - } - return m -} -func (m *decompSet) insert(key int, s string) { - m[key][s] = true -} - -func printCharInfoTables(w io.Writer) int { - mkstr := func(r rune, f *FormInfo) (int, string) { - d := f.expandedDecomp - s := string([]rune(d)) - if max := 1 << 6; len(s) >= max { - const msg = "%U: too many bytes in decomposition: %d >= %d" - log.Fatalf(msg, r, len(s), max) - } - head := uint8(len(s)) - if f.quickCheck[MComposed] != QCYes { - head |= 0x40 - } - if f.combinesForward { - head |= 0x80 - } - s = string([]byte{head}) + s - - lccc := ccc(d[0]) - tccc := ccc(d[len(d)-1]) - cc := ccc(r) - if cc != 0 && lccc == 0 && tccc == 0 { - log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) - } - if tccc < lccc && lccc != 0 { - const msg = "%U: lccc (%d) must be <= tcc (%d)" - log.Fatalf(msg, r, lccc, tccc) - } - index := normalDecomp - nTrail := chars[r].nTrailingNonStarters - nLead := chars[r].nLeadingNonStarters - if tccc > 0 || lccc > 0 || nTrail > 0 { - tccc <<= 2 - tccc |= nTrail - s += string([]byte{tccc}) - index = endMulti - for _, r := range d[1:] { - if ccc(r) == 0 { - index = firstCCC - } - } - if lccc > 0 || nLead > 0 { - s += string([]byte{lccc}) - if index == firstCCC { - log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) - } - index = firstLeadingCCC - } - if cc != lccc { - if cc != 0 { - log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) - } - index = firstCCCZeroExcept - } - } else if len(d) > 1 { - index = firstMulti - } - return index, s - } - - decompSet := makeDecompSet() - const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. - decompSet.insert(firstStarterWithNLead, nLeadStr) - - // Store the uniqued decompositions in a byte buffer, - // preceded by their byte length. - for _, c := range chars { - for _, f := range c.forms { - if len(f.expandedDecomp) == 0 { - continue - } - if f.combinesBackward { - log.Fatalf("%U: combinesBackward and decompose", c.codePoint) - } - index, s := mkstr(c.codePoint, &f) - decompSet.insert(index, s) - } - } - - decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) - size := 0 - positionMap := make(map[string]uint16) - decompositions.WriteString("\000") - fmt.Fprintln(w, "const (") - for i, m := range decompSet { - sa := []string{} - for s := range m { - sa = append(sa, s) - } - sort.Strings(sa) - for _, s := range sa { - p := decompositions.Len() - decompositions.WriteString(s) - positionMap[s] = uint16(p) - } - if cname[i] != "" { - fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) - } - } - fmt.Fprintln(w, "maxDecomp = 0x8000") - fmt.Fprintln(w, ")") - b := decompositions.Bytes() - printBytes(w, b, "decomps") - size += len(b) - - varnames := []string{"nfc", "nfkc"} - for i := 0; i < FNumberOfFormTypes; i++ { - trie := triegen.NewTrie(varnames[i]) - - for r, c := range chars { - f := c.forms[i] - d := f.expandedDecomp - if len(d) != 0 { - _, key := mkstr(c.codePoint, &f) - trie.Insert(rune(r), uint64(positionMap[key])) - if c.ccc != ccc(d[0]) { - // We assume the lead ccc of a decomposition !=0 in this case. - if ccc(d[0]) == 0 { - log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) - } - } - } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { - // Handle cases where it can't be detected that the nLead should be equal - // to nTrail. - trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) - } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { - trie.Insert(c.codePoint, uint64(0x8000|v)) - } - } - sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) - if err != nil { - log.Fatal(err) - } - size += sz - } - return size -} - -func contains(sa []string, s string) bool { - for _, a := range sa { - if a == s { - return true - } - } - return false -} - -func makeTables() { - w := &bytes.Buffer{} - - size := 0 - if *tablelist == "" { - return - } - list := strings.Split(*tablelist, ",") - if *tablelist == "all" { - list = []string{"recomp", "info"} - } - - // Compute maximum decomposition size. - max := 0 - for _, c := range chars { - if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { - max = n - } - } - - fmt.Fprintln(w, "const (") - fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") - fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) - fmt.Fprintln(w) - fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") - fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") - fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") - fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") - fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) - fmt.Fprintln(w, ")\n") - - // Print the CCC remap table. - size += len(cccMap) - fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) - for i := 0; i < len(cccMap); i++ { - if i%8 == 0 { - fmt.Fprintln(w) - } - fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) - } - fmt.Fprintln(w, "\n}\n") - - if contains(list, "info") { - size += printCharInfoTables(w) - } - - if contains(list, "recomp") { - // Note that we use 32 bit keys, instead of 64 bit. - // This clips the bits of three entries, but we know - // this won't cause a collision. The compiler will catch - // any changes made to UnicodeData.txt that introduces - // a collision. - // Note that the recomposition map for NFC and NFKC - // are identical. - - // Recomposition map - nrentries := 0 - for _, c := range chars { - f := c.forms[FCanonical] - if !f.isOneWay && len(f.decomp) > 0 { - nrentries++ - } - } - sz := nrentries * 8 - size += sz - fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) - fmt.Fprintln(w, "var recompMap = map[uint32]rune{") - for i, c := range chars { - f := c.forms[FCanonical] - d := f.decomp - if !f.isOneWay && len(d) > 0 { - key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) - fmt.Fprintf(w, "0x%.8X: 0x%.4X,\n", key, i) - } - } - fmt.Fprintf(w, "}\n\n") - } - - fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) - gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) -} - -func printChars() { - if *verbose { - for _, c := range chars { - if !c.isValid() || c.state == SMissing { - continue - } - fmt.Println(c) - } - } -} - -// verifyComputed does various consistency tests. -func verifyComputed() { - for i, c := range chars { - for _, f := range c.forms { - isNo := (f.quickCheck[MDecomposed] == QCNo) - if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { - log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) - } - - isMaybe := f.quickCheck[MComposed] == QCMaybe - if f.combinesBackward != isMaybe { - log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) - } - if len(f.decomp) > 0 && f.combinesForward && isMaybe { - log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) - } - - if len(f.expandedDecomp) != 0 { - continue - } - if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { - // We accept these runes to be treated differently (it only affects - // segment breaking in iteration, most likely on improper use), but - // reconsider if more characters are added. - // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L; 3099;;;;N;;;;; - // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L; 309A;;;;N;;;;; - // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; - // U+318E HANGUL LETTER ARAEAE;Lo;0;L; 11A1;;;;N;HANGUL LETTER ALAE AE;;;; - // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L; 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; - // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L; 3163;;;;N;;;;; - if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { - log.Fatalf("%U: nLead was %v; want %v", i, a, b) - } - } - } - nfc := c.forms[FCanonical] - nfkc := c.forms[FCompatibility] - if nfc.combinesBackward != nfkc.combinesBackward { - log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) - } - } -} - -// Use values in DerivedNormalizationProps.txt to compare against the -// values we computed. -// DerivedNormalizationProps.txt has form: -// 00C0..00C5 ; NFD_QC; N # ... -// 0374 ; NFD_QC; N # ... -// See http://unicode.org/reports/tr44/ for full explanation -func testDerived() { - f := gen.OpenUCDFile("DerivedNormalizationProps.txt") - defer f.Close() - p := ucd.New(f) - for p.Next() { - r := p.Rune(0) - c := &chars[r] - - var ftype, mode int - qt := p.String(1) - switch qt { - case "NFC_QC": - ftype, mode = FCanonical, MComposed - case "NFD_QC": - ftype, mode = FCanonical, MDecomposed - case "NFKC_QC": - ftype, mode = FCompatibility, MComposed - case "NFKD_QC": - ftype, mode = FCompatibility, MDecomposed - default: - continue - } - var qr QCResult - switch p.String(2) { - case "Y": - qr = QCYes - case "N": - qr = QCNo - case "M": - qr = QCMaybe - default: - log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) - } - if got := c.forms[ftype].quickCheck[mode]; got != qr { - log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) - } - c.forms[ftype].verified[mode] = true - } - if err := p.Err(); err != nil { - log.Fatal(err) - } - // Any unspecified value must be QCYes. Verify this. - for i, c := range chars { - for j, fd := range c.forms { - for k, qr := range fd.quickCheck { - if !fd.verified[k] && qr != QCYes { - m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" - log.Printf(m, i, j, k, qr, c.name) - } - } - } - } -} - -var testHeader = `const ( - Yes = iota - No - Maybe -) - -type formData struct { - qc uint8 - combinesForward bool - decomposition string -} - -type runeData struct { - r rune - ccc uint8 - nLead uint8 - nTrail uint8 - f [2]formData // 0: canonical; 1: compatibility -} - -func f(qc uint8, cf bool, dec string) [2]formData { - return [2]formData{{qc, cf, dec}, {qc, cf, dec}} -} - -func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { - return [2]formData{{qc, cf, d}, {qck, cfk, dk}} -} - -var testData = []runeData{ -` - -func printTestdata() { - type lastInfo struct { - ccc uint8 - nLead uint8 - nTrail uint8 - f string - } - - last := lastInfo{} - w := &bytes.Buffer{} - fmt.Fprintf(w, testHeader) - for r, c := range chars { - f := c.forms[FCanonical] - qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - f = c.forms[FCompatibility] - qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) - s := "" - if d == dk && qc == qck && cf == cfk { - s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) - } else { - s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) - } - current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} - if last != current { - fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) - last = current - } - } - fmt.Fprintln(w, "}") - gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) -} diff --git a/backend/vendor/golang.org/x/text/unicode/norm/triegen.go b/backend/vendor/golang.org/x/text/unicode/norm/triegen.go deleted file mode 100644 index 45d71190..00000000 --- a/backend/vendor/golang.org/x/text/unicode/norm/triegen.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -// Trie table generator. -// Used by make*tables tools to generate a go file with trie data structures -// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte -// sequence are used to lookup offsets in the index table to be used for the -// next byte. The last byte is used to index into a table with 16-bit values. - -package main - -import ( - "fmt" - "io" -) - -const maxSparseEntries = 16 - -type normCompacter struct { - sparseBlocks [][]uint64 - sparseOffset []uint16 - sparseCount int - name string -} - -func mostFrequentStride(a []uint64) int { - counts := make(map[int]int) - var v int - for _, x := range a { - if stride := int(x) - v; v != 0 && stride >= 0 { - counts[stride]++ - } - v = int(x) - } - var maxs, maxc int - for stride, cnt := range counts { - if cnt > maxc || (cnt == maxc && stride < maxs) { - maxs, maxc = stride, cnt - } - } - return maxs -} - -func countSparseEntries(a []uint64) int { - stride := mostFrequentStride(a) - var v, count int - for _, tv := range a { - if int(tv)-v != stride { - if tv != 0 { - count++ - } - } - v = int(tv) - } - return count -} - -func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { - if n := countSparseEntries(v); n <= maxSparseEntries { - return (n+1)*4 + 2, true - } - return 0, false -} - -func (c *normCompacter) Store(v []uint64) uint32 { - h := uint32(len(c.sparseOffset)) - c.sparseBlocks = append(c.sparseBlocks, v) - c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) - c.sparseCount += countSparseEntries(v) + 1 - return h -} - -func (c *normCompacter) Handler() string { - return c.name + "Sparse.lookup" -} - -func (c *normCompacter) Print(w io.Writer) (retErr error) { - p := func(f string, x ...interface{}) { - if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { - retErr = err - } - } - - ls := len(c.sparseBlocks) - p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) - p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) - - ns := c.sparseCount - p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) - p("var %sSparseValues = [%d]valueRange {", c.name, ns) - for i, b := range c.sparseBlocks { - p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) - var v int - stride := mostFrequentStride(b) - n := countSparseEntries(b) - p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) - for i, nv := range b { - if int(nv)-v != stride { - if v != 0 { - p(",hi:%#02x},", 0x80+i-1) - } - if nv != 0 { - p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) - } - } - v = int(nv) - } - if v != 0 { - p(",hi:%#02x},", 0x80+len(b)-1) - } - } - p("\n}\n\n") - return -} diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/.gitignore b/backend/vendor/gopkg.in/go-playground/validator.v9/.gitignore new file mode 100644 index 00000000..792ca00d --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/.gitignore @@ -0,0 +1,29 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.test +*.out +*.txt +cover.html +README.html \ No newline at end of file diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/LICENSE b/backend/vendor/gopkg.in/go-playground/validator.v9/LICENSE new file mode 100644 index 00000000..6a2ae9aa --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dean Karn + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/Makefile b/backend/vendor/gopkg.in/go-playground/validator.v9/Makefile new file mode 100644 index 00000000..aeeee9da --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/Makefile @@ -0,0 +1,16 @@ +GOCMD=go + +linters-install: + $(GOCMD) get -u github.com/alecthomas/gometalinter + gometalinter --install + +lint: linters-install + gometalinter --vendor --disable-all --enable=vet --enable=vetshadow --enable=golint --enable=maligned --enable=megacheck --enable=ineffassign --enable=misspell --enable=errcheck --enable=goconst ./... + +test: + $(GOCMD) test -cover -race ./... + +bench: + $(GOCMD) test -bench=. -benchmem ./... + +.PHONY: test lint linters-install \ No newline at end of file diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/README.md b/backend/vendor/gopkg.in/go-playground/validator.v9/README.md new file mode 100644 index 00000000..ba6a74e6 --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/README.md @@ -0,0 +1,153 @@ +Package validator +================ +[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +![Project status](https://img.shields.io/badge/version-9.29.1-green.svg) +[![Build Status](https://semaphoreci.com/api/v1/joeybloggs/validator/branches/v9/badge.svg)](https://semaphoreci.com/joeybloggs/validator) +[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=v9&service=github)](https://coveralls.io/github/go-playground/validator?branch=v9) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) +[![GoDoc](https://godoc.org/gopkg.in/go-playground/validator.v9?status.svg)](https://godoc.org/gopkg.in/go-playground/validator.v9) +![License](https://img.shields.io/dub/l/vibe-d.svg) + +Package validator implements value validations for structs and individual fields based on tags. + +It has the following **unique** features: + +- Cross Field and Cross Struct validations by using validation tags or custom validators. +- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated. +- Ability to dive into both map keys and values for validation +- Handles type interface by determining it's underlying type prior to validation. +- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29) +- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs +- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError +- Customizable i18n aware error messages. +- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding) + +Installation +------------ + +Use go get. + + go get gopkg.in/go-playground/validator.v9 + +Then import the validator package into your own code. + + import "gopkg.in/go-playground/validator.v9" + +Error Return Value +------- + +Validation functions return type error + +They return type error to avoid the issue discussed in the following, where err is always != nil: + +* http://stackoverflow.com/a/29138676/3158232 +* https://github.com/go-playground/validator/issues/134 + +Validator only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so: + +```go +err := validate.Struct(mystruct) +validationErrors := err.(validator.ValidationErrors) + ``` + +Usage and documentation +------ + +Please see http://godoc.org/gopkg.in/go-playground/validator.v9 for detailed usage docs. + +##### Examples: + +- [Simple](https://github.com/go-playground/validator/blob/v9/_examples/simple/main.go) +- [Custom Field Types](https://github.com/go-playground/validator/blob/v9/_examples/custom/main.go) +- [Struct Level](https://github.com/go-playground/validator/blob/v9/_examples/struct-level/main.go) +- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/v9/_examples/translations/main.go) +- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding) +- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash) + +Benchmarks +------ +###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64 +```go +goos: darwin +goarch: amd64 +pkg: github.com/go-playground/validator +BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op +BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op +BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op +BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op +BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op +BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op +BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op +BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op +BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op +BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op +BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op +BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op +BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op +BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op +BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op +BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op +BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op +BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op +BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op +BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op +BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op +BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op +BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op +BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op +BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op +BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op +BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op +BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op +BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op +BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op +BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op +BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op +BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op +BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op +BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op +BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op +BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op +BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op +BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op +BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op +BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op +BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op +BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op +``` + +Complementary Software +---------------------- + +Here is a list of software that complements using this library either pre or post validation. + +* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support. +* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects + +How to Contribute +------ + +Make a pull request... + +License +------ +Distributed under MIT License, please see license file within the code for more details. diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/baked_in.go b/backend/vendor/gopkg.in/go-playground/validator.v9/baked_in.go new file mode 100644 index 00000000..338cddd7 --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/baked_in.go @@ -0,0 +1,1999 @@ +package validator + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + "net" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + urn "github.com/leodido/go-urn" +) + +// Func accepts a FieldLevel interface for all validation needs. The return +// value should be true when validation succeeds. +type Func func(fl FieldLevel) bool + +// FuncCtx accepts a context.Context and FieldLevel interface for all +// validation needs. The return value should be true when validation succeeds. +type FuncCtx func(ctx context.Context, fl FieldLevel) bool + +// wrapFunc wraps noramal Func makes it compatible with FuncCtx +func wrapFunc(fn Func) FuncCtx { + if fn == nil { + return nil // be sure not to wrap a bad function. + } + return func(ctx context.Context, fl FieldLevel) bool { + return fn(fl) + } +} + +var ( + restrictedTags = map[string]struct{}{ + diveTag: {}, + keysTag: {}, + endKeysTag: {}, + structOnlyTag: {}, + omitempty: {}, + skipValidationTag: {}, + utf8HexComma: {}, + utf8Pipe: {}, + noStructLevelTag: {}, + requiredTag: {}, + isdefault: {}, + } + + // BakedInAliasValidators is a default mapping of a single validation tag that + // defines a common or complex set of validation(s) to simplify + // adding validation to structs. + bakedInAliases = map[string]string{ + "iscolor": "hexcolor|rgb|rgba|hsl|hsla", + } + + // BakedInValidators is the default map of ValidationFunc + // you can add, remove or even replace items to suite your needs, + // or even disregard and use your own map if so desired. + bakedInValidators = map[string]Func{ + "required": hasValue, + "required_with": requiredWith, + "required_with_all": requiredWithAll, + "required_without": requiredWithout, + "required_without_all": requiredWithoutAll, + "isdefault": isDefault, + "len": hasLengthOf, + "min": hasMinOf, + "max": hasMaxOf, + "eq": isEq, + "ne": isNe, + "lt": isLt, + "lte": isLte, + "gt": isGt, + "gte": isGte, + "eqfield": isEqField, + "eqcsfield": isEqCrossStructField, + "necsfield": isNeCrossStructField, + "gtcsfield": isGtCrossStructField, + "gtecsfield": isGteCrossStructField, + "ltcsfield": isLtCrossStructField, + "ltecsfield": isLteCrossStructField, + "nefield": isNeField, + "gtefield": isGteField, + "gtfield": isGtField, + "ltefield": isLteField, + "ltfield": isLtField, + "fieldcontains": fieldContains, + "fieldexcludes": fieldExcludes, + "alpha": isAlpha, + "alphanum": isAlphanum, + "alphaunicode": isAlphaUnicode, + "alphanumunicode": isAlphanumUnicode, + "numeric": isNumeric, + "number": isNumber, + "hexadecimal": isHexadecimal, + "hexcolor": isHEXColor, + "rgb": isRGB, + "rgba": isRGBA, + "hsl": isHSL, + "hsla": isHSLA, + "email": isEmail, + "url": isURL, + "uri": isURI, + "urn_rfc2141": isUrnRFC2141, // RFC 2141 + "file": isFile, + "base64": isBase64, + "base64url": isBase64URL, + "contains": contains, + "containsany": containsAny, + "containsrune": containsRune, + "excludes": excludes, + "excludesall": excludesAll, + "excludesrune": excludesRune, + "startswith": startsWith, + "endswith": endsWith, + "isbn": isISBN, + "isbn10": isISBN10, + "isbn13": isISBN13, + "eth_addr": isEthereumAddress, + "btc_addr": isBitcoinAddress, + "btc_addr_bech32": isBitcoinBech32Address, + "uuid": isUUID, + "uuid3": isUUID3, + "uuid4": isUUID4, + "uuid5": isUUID5, + "uuid_rfc4122": isUUIDRFC4122, + "uuid3_rfc4122": isUUID3RFC4122, + "uuid4_rfc4122": isUUID4RFC4122, + "uuid5_rfc4122": isUUID5RFC4122, + "ascii": isASCII, + "printascii": isPrintableASCII, + "multibyte": hasMultiByteCharacter, + "datauri": isDataURI, + "latitude": isLatitude, + "longitude": isLongitude, + "ssn": isSSN, + "ipv4": isIPv4, + "ipv6": isIPv6, + "ip": isIP, + "cidrv4": isCIDRv4, + "cidrv6": isCIDRv6, + "cidr": isCIDR, + "tcp4_addr": isTCP4AddrResolvable, + "tcp6_addr": isTCP6AddrResolvable, + "tcp_addr": isTCPAddrResolvable, + "udp4_addr": isUDP4AddrResolvable, + "udp6_addr": isUDP6AddrResolvable, + "udp_addr": isUDPAddrResolvable, + "ip4_addr": isIP4AddrResolvable, + "ip6_addr": isIP6AddrResolvable, + "ip_addr": isIPAddrResolvable, + "unix_addr": isUnixAddrResolvable, + "mac": isMAC, + "hostname": isHostnameRFC952, // RFC 952 + "hostname_rfc1123": isHostnameRFC1123, // RFC 1123 + "fqdn": isFQDN, + "unique": isUnique, + "oneof": isOneOf, + "html": isHTML, + "html_encoded": isHTMLEncoded, + "url_encoded": isURLEncoded, + "dir": isDir, + } +) + +var oneofValsCache = map[string][]string{} +var oneofValsCacheRWLock = sync.RWMutex{} + +func parseOneOfParam2(s string) []string { + oneofValsCacheRWLock.RLock() + vals, ok := oneofValsCache[s] + oneofValsCacheRWLock.RUnlock() + if !ok { + oneofValsCacheRWLock.Lock() + vals = strings.Fields(s) + oneofValsCache[s] = vals + oneofValsCacheRWLock.Unlock() + } + return vals +} + +func isURLEncoded(fl FieldLevel) bool { + return uRLEncodedRegex.MatchString(fl.Field().String()) +} + +func isHTMLEncoded(fl FieldLevel) bool { + return hTMLEncodedRegex.MatchString(fl.Field().String()) +} + +func isHTML(fl FieldLevel) bool { + return hTMLRegex.MatchString(fl.Field().String()) +} + +func isOneOf(fl FieldLevel) bool { + vals := parseOneOfParam2(fl.Param()) + + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + for i := 0; i < len(vals); i++ { + if vals[i] == v { + return true + } + } + return false +} + +// isUnique is the validation function for validating if each array|slice|map value is unique +func isUnique(fl FieldLevel) bool { + + field := fl.Field() + v := reflect.ValueOf(struct{}{}) + + switch field.Kind() { + case reflect.Slice, reflect.Array: + m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type())) + + for i := 0; i < field.Len(); i++ { + m.SetMapIndex(field.Index(i), v) + } + return field.Len() == m.Len() + case reflect.Map: + m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type())) + + for _, k := range field.MapKeys() { + m.SetMapIndex(field.MapIndex(k), v) + } + return field.Len() == m.Len() + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } +} + +// IsMAC is the validation function for validating if the field's value is a valid MAC address. +func isMAC(fl FieldLevel) bool { + + _, err := net.ParseMAC(fl.Field().String()) + + return err == nil +} + +// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. +func isCIDRv4(fl FieldLevel) bool { + + ip, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() != nil +} + +// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. +func isCIDRv6(fl FieldLevel) bool { + + ip, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() == nil +} + +// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. +func isCIDR(fl FieldLevel) bool { + + _, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil +} + +// IsIPv4 is the validation function for validating if a value is a valid v4 IP address. +func isIPv4(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() != nil +} + +// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address. +func isIPv6(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() == nil +} + +// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. +func isIP(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil +} + +// IsSSN is the validation function for validating if the field's value is a valid SSN. +func isSSN(fl FieldLevel) bool { + + field := fl.Field() + + if field.Len() != 11 { + return false + } + + return sSNRegex.MatchString(field.String()) +} + +// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate. +func isLongitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + + return longitudeRegex.MatchString(v) +} + +// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate. +func isLatitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + + return latitudeRegex.MatchString(v) +} + +// IsDataURI is the validation function for validating if the field's value is a valid data URI. +func isDataURI(fl FieldLevel) bool { + + uri := strings.SplitN(fl.Field().String(), ",", 2) + + if len(uri) != 2 { + return false + } + + if !dataURIRegex.MatchString(uri[0]) { + return false + } + + return base64Regex.MatchString(uri[1]) +} + +// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. +func hasMultiByteCharacter(fl FieldLevel) bool { + + field := fl.Field() + + if field.Len() == 0 { + return true + } + + return multibyteRegex.MatchString(field.String()) +} + +// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. +func isPrintableASCII(fl FieldLevel) bool { + return printableASCIIRegex.MatchString(fl.Field().String()) +} + +// IsASCII is the validation function for validating if the field's value is a valid ASCII character. +func isASCII(fl FieldLevel) bool { + return aSCIIRegex.MatchString(fl.Field().String()) +} + +// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID. +func isUUID5(fl FieldLevel) bool { + return uUID5Regex.MatchString(fl.Field().String()) +} + +// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID. +func isUUID4(fl FieldLevel) bool { + return uUID4Regex.MatchString(fl.Field().String()) +} + +// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID. +func isUUID3(fl FieldLevel) bool { + return uUID3Regex.MatchString(fl.Field().String()) +} + +// IsUUID is the validation function for validating if the field's value is a valid UUID of any version. +func isUUID(fl FieldLevel) bool { + return uUIDRegex.MatchString(fl.Field().String()) +} + +// IsUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID. +func isUUID5RFC4122(fl FieldLevel) bool { + return uUID5RFC4122Regex.MatchString(fl.Field().String()) +} + +// IsUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID. +func isUUID4RFC4122(fl FieldLevel) bool { + return uUID4RFC4122Regex.MatchString(fl.Field().String()) +} + +// IsUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID. +func isUUID3RFC4122(fl FieldLevel) bool { + return uUID3RFC4122Regex.MatchString(fl.Field().String()) +} + +// IsUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version. +func isUUIDRFC4122(fl FieldLevel) bool { + return uUIDRFC4122Regex.MatchString(fl.Field().String()) +} + +// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. +func isISBN(fl FieldLevel) bool { + return isISBN10(fl) || isISBN13(fl) +} + +// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. +func isISBN13(fl FieldLevel) bool { + + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4) + + if !iSBN13Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + factor := []int32{1, 3} + + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(s[i]-'0') + } + + return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 +} + +// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. +func isISBN10(fl FieldLevel) bool { + + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3) + + if !iSBN10Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(s[i]-'0') + } + + if s[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(s[9]-'0') + } + + return checksum%11 == 0 +} + +// IsEthereumAddress is the validation function for validating if the field's value is a valid ethereum address based currently only on the format +func isEthereumAddress(fl FieldLevel) bool { + address := fl.Field().String() + + if !ethAddressRegex.MatchString(address) { + return false + } + + if ethaddressRegexUpper.MatchString(address) || ethAddressRegexLower.MatchString(address) { + return true + } + + // checksum validation is blocked by https://github.com/golang/crypto/pull/28 + + return true +} + +// IsBitcoinAddress is the validation function for validating if the field's value is a valid btc address +func isBitcoinAddress(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcAddressRegex.MatchString(address) { + return false + } + + alphabet := []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") + + decode := [25]byte{} + + for _, n := range []byte(address) { + d := bytes.IndexByte(alphabet, n) + + for i := 24; i >= 0; i-- { + d += 58 * int(decode[i]) + decode[i] = byte(d % 256) + d /= 256 + } + } + + h := sha256.New() + _, _ = h.Write(decode[:21]) + d := h.Sum([]byte{}) + h = sha256.New() + _, _ = h.Write(d) + + validchecksum := [4]byte{} + computedchecksum := [4]byte{} + + copy(computedchecksum[:], h.Sum(d[:0])) + copy(validchecksum[:], decode[21:]) + + return validchecksum == computedchecksum +} + +// IsBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address +func isBitcoinBech32Address(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) { + return false + } + + am := len(address) % 8 + + if am == 0 || am == 3 || am == 5 { + return false + } + + address = strings.ToLower(address) + + alphabet := "qpzry9x8gf2tvdw0s3jn54khce6mua7l" + + hr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc + addr := address[3:] + dp := make([]int, 0, len(addr)) + + for _, c := range addr { + dp = append(dp, strings.IndexRune(alphabet, c)) + } + + ver := dp[0] + + if ver < 0 || ver > 16 { + return false + } + + if ver == 0 { + if len(address) != 42 && len(address) != 62 { + return false + } + } + + values := append(hr, dp...) + + GEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3} + + p := 1 + + for _, v := range values { + b := p >> 25 + p = (p&0x1ffffff)<<5 ^ v + + for i := 0; i < 5; i++ { + if (b>>uint(i))&1 == 1 { + p ^= GEN[i] + } + } + } + + if p != 1 { + return false + } + + b := uint(0) + acc := 0 + mv := (1 << 5) - 1 + var sw []int + + for _, v := range dp[1 : len(dp)-6] { + acc = (acc << 5) | v + b += 5 + for b >= 8 { + b -= 8 + sw = append(sw, (acc>>b)&mv) + } + } + + if len(sw) < 2 || len(sw) > 40 { + return false + } + + return true +} + +// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param. +func excludesRune(fl FieldLevel) bool { + return !containsRune(fl) +} + +// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param. +func excludesAll(fl FieldLevel) bool { + return !containsAny(fl) +} + +// Excludes is the validation function for validating that the field's value does not contain the text specified within the param. +func excludes(fl FieldLevel) bool { + return !contains(fl) +} + +// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param. +func containsRune(fl FieldLevel) bool { + + r, _ := utf8.DecodeRuneInString(fl.Param()) + + return strings.ContainsRune(fl.Field().String(), r) +} + +// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param. +func containsAny(fl FieldLevel) bool { + return strings.ContainsAny(fl.Field().String(), fl.Param()) +} + +// Contains is the validation function for validating that the field's value contains the text specified within the param. +func contains(fl FieldLevel) bool { + return strings.Contains(fl.Field().String(), fl.Param()) +} + +// StartsWith is the validation function for validating that the field's value starts with the text specified within the param. +func startsWith(fl FieldLevel) bool { + return strings.HasPrefix(fl.Field().String(), fl.Param()) +} + +// EndsWith is the validation function for validating that the field's value ends with the text specified within the param. +func endsWith(fl FieldLevel) bool { + return strings.HasSuffix(fl.Field().String(), fl.Param()) +} + +// FieldContains is the validation function for validating if the current field's value contains the field specified by the param's value. +func fieldContains(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + + if !ok { + return false + } + + return strings.Contains(field.String(), currentField.String()) +} + +// FieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value. +func fieldExcludes(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + if !ok { + return true + } + + return !strings.Contains(field.String(), currentField.String()) +} + +// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. +func isNeField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + + if !ok || currentKind != kind { + return true + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() != currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() != currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() != currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) != int64(currentField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return true + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() != currentField.String() +} + +// IsNe is the validation function for validating that the field's value does not equal the provided param value. +func isNe(fl FieldLevel) bool { + return !isEq(fl) +} + +// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. +func isLteCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() <= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() <= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() <= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) <= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() <= topField.String() +} + +// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func isLtCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() < topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() < topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() < topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) < int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) + } + } + + // default reflect.String: + return field.String() < topField.String() +} + +// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. +func isGteCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() >= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() >= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() >= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) >= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() >= topField.String() +} + +// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. +func isGtCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() > topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() > topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() > topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) > int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) + } + } + + // default reflect.String: + return field.String() > topField.String() +} + +// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. +func isNeCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return true + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() != field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() != field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() != field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) != int64(field.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return true + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() != field.String() +} + +// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. +func isEqCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() == field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() == field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() == field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) == int64(field.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() == field.String() +} + +// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. +func isEqField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() == currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) == int64(currentField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() == currentField.String() +} + +// IsEq is the validation function for validating if the current field's value is equal to the param's value. +func isEq(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + return field.String() == param + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsBase64 is the validation function for validating if the current field's value is a valid base 64. +func isBase64(fl FieldLevel) bool { + return base64Regex.MatchString(fl.Field().String()) +} + +// IsBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string. +func isBase64URL(fl FieldLevel) bool { + return base64URLRegex.MatchString(fl.Field().String()) +} + +// IsURI is the validation function for validating if the current field's value is a valid URI. +func isURI(fl FieldLevel) bool { + + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i := strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if len(s) == 0 { + return false + } + + _, err := url.ParseRequestURI(s) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsURL is the validation function for validating if the current field's value is a valid URL. +func isURL(fl FieldLevel) bool { + + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + var i int + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i = strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if len(s) == 0 { + return false + } + + url, err := url.ParseRequestURI(s) + + if err != nil || url.Scheme == "" { + return false + } + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141. +func isUrnRFC2141(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + str := field.String() + + _, match := urn.Parse([]byte(str)) + + return match + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsFile is the validation function for validating if the current field's value is a valid file path. +func isFile(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + case reflect.String: + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return !fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsEmail is the validation function for validating if the current field's value is a valid email address. +func isEmail(fl FieldLevel) bool { + return emailRegex.MatchString(fl.Field().String()) +} + +// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color. +func isHSLA(fl FieldLevel) bool { + return hslaRegex.MatchString(fl.Field().String()) +} + +// IsHSL is the validation function for validating if the current field's value is a valid HSL color. +func isHSL(fl FieldLevel) bool { + return hslRegex.MatchString(fl.Field().String()) +} + +// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color. +func isRGBA(fl FieldLevel) bool { + return rgbaRegex.MatchString(fl.Field().String()) +} + +// IsRGB is the validation function for validating if the current field's value is a valid RGB color. +func isRGB(fl FieldLevel) bool { + return rgbRegex.MatchString(fl.Field().String()) +} + +// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color. +func isHEXColor(fl FieldLevel) bool { + return hexcolorRegex.MatchString(fl.Field().String()) +} + +// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. +func isHexadecimal(fl FieldLevel) bool { + return hexadecimalRegex.MatchString(fl.Field().String()) +} + +// IsNumber is the validation function for validating if the current field's value is a valid number. +func isNumber(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numberRegex.MatchString(fl.Field().String()) + } +} + +// IsNumeric is the validation function for validating if the current field's value is a valid numeric value. +func isNumeric(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numericRegex.MatchString(fl.Field().String()) + } +} + +// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. +func isAlphanum(fl FieldLevel) bool { + return alphaNumericRegex.MatchString(fl.Field().String()) +} + +// IsAlpha is the validation function for validating if the current field's value is a valid alpha value. +func isAlpha(fl FieldLevel) bool { + return alphaRegex.MatchString(fl.Field().String()) +} + +// IsAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value. +func isAlphanumUnicode(fl FieldLevel) bool { + return alphaUnicodeNumericRegex.MatchString(fl.Field().String()) +} + +// IsAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. +func isAlphaUnicode(fl FieldLevel) bool { + return alphaUnicodeRegex.MatchString(fl.Field().String()) +} + +// isDefault is the opposite of required aka hasValue +func isDefault(fl FieldLevel) bool { + return !hasValue(fl) +} + +// HasValue is the validation function for validating if the current field's value is not the default static value. +func hasValue(fl FieldLevel) bool { + return requireCheckFieldKind(fl, "") +} + +// requireCheckField is a func for check field kind +func requireCheckFieldKind(fl FieldLevel, param string) bool { + field := fl.Field() + if len(param) > 0 { + if fl.Parent().Kind() == reflect.Ptr { + field = fl.Parent().Elem().FieldByName(param) + } else { + field = fl.Parent().FieldByName(param) + } + } + switch field.Kind() { + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return !field.IsNil() + default: + if fl.(*validate).fldIsPointer && field.Interface() != nil { + return true + } + return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface() + } +} + +// RequiredWith is the validation function +// The field under validation must be present and not empty only if any of the other specified fields are present. +func requiredWith(fl FieldLevel) bool { + + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + + if requireCheckFieldKind(fl, param) { + return requireCheckFieldKind(fl, "") + } + } + + return true +} + +// RequiredWithAll is the validation function +// The field under validation must be present and not empty only if all of the other specified fields are present. +func requiredWithAll(fl FieldLevel) bool { + + isValidateCurrentField := true + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + + if !requireCheckFieldKind(fl, param) { + isValidateCurrentField = false + } + } + + if isValidateCurrentField { + return requireCheckFieldKind(fl, "") + } + + return true +} + +// RequiredWithout is the validation function +// The field under validation must be present and not empty only when any of the other specified fields are not present. +func requiredWithout(fl FieldLevel) bool { + + isValidateCurrentField := false + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + + if requireCheckFieldKind(fl, param) { + isValidateCurrentField = true + } + } + + if !isValidateCurrentField { + return requireCheckFieldKind(fl, "") + } + + return true +} + +// RequiredWithoutAll is the validation function +// The field under validation must be present and not empty only when all of the other specified fields are not present. +func requiredWithoutAll(fl FieldLevel) bool { + + isValidateCurrentField := true + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + + if requireCheckFieldKind(fl, param) { + isValidateCurrentField = false + } + } + + if isValidateCurrentField { + return requireCheckFieldKind(fl, "") + } + + return true +} + +// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. +func isGteField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() >= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() >= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() >= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) >= len(currentField.String()) +} + +// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. +func isGtField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() > currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() > currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() > currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) + } + } + + // default reflect.String + return len(field.String()) > len(currentField.String()) +} + +// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value. +func isGte(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) >= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) >= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() >= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() >= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() >= p + + case reflect.Struct: + + if field.Type() == timeType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.After(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsGt is the validation function for validating if the current field's value is greater than the param's value. +func isGt(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) > p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) > p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() > p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() > p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() > p + case reflect.Struct: + + if field.Type() == timeType { + + return field.Interface().(time.Time).After(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value. +func hasLengthOf(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) == p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value. +func hasMinOf(fl FieldLevel) bool { + return isGte(fl) +} + +// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. +func isLteField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() <= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() <= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() <= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) <= len(currentField.String()) +} + +// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. +func isLtField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() < currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() < currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() < currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) + } + } + + // default reflect.String + return len(field.String()) < len(currentField.String()) +} + +// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value. +func isLte(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) <= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) <= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() <= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() <= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() <= p + + case reflect.Struct: + + if field.Type() == timeType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.Before(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsLt is the validation function for validating if the current field's value is less than the param's value. +func isLt(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) < p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) < p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asInt(param) + + return field.Int() < p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() < p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() < p + + case reflect.Struct: + + if field.Type() == timeType { + + return field.Interface().(time.Time).Before(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value. +func hasMaxOf(fl FieldLevel) bool { + return isLte(fl) +} + +// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. +func isTCP4AddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp4", fl.Field().String()) + return err == nil +} + +// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. +func isTCP6AddrResolvable(fl FieldLevel) bool { + + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp6", fl.Field().String()) + + return err == nil +} + +// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. +func isTCPAddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp", fl.Field().String()) + + return err == nil +} + +// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. +func isUDP4AddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp4", fl.Field().String()) + + return err == nil +} + +// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. +func isUDP6AddrResolvable(fl FieldLevel) bool { + + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp6", fl.Field().String()) + + return err == nil +} + +// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. +func isUDPAddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp", fl.Field().String()) + + return err == nil +} + +// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. +func isIP4AddrResolvable(fl FieldLevel) bool { + + if !isIPv4(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip4", fl.Field().String()) + + return err == nil +} + +// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. +func isIP6AddrResolvable(fl FieldLevel) bool { + + if !isIPv6(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip6", fl.Field().String()) + + return err == nil +} + +// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. +func isIPAddrResolvable(fl FieldLevel) bool { + + if !isIP(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip", fl.Field().String()) + + return err == nil +} + +// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. +func isUnixAddrResolvable(fl FieldLevel) bool { + + _, err := net.ResolveUnixAddr("unix", fl.Field().String()) + + return err == nil +} + +func isIP4Addr(fl FieldLevel) bool { + + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + val = val[0:idx] + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() != nil +} + +func isIP6Addr(fl FieldLevel) bool { + + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + if idx != 0 && val[idx-1:idx] == "]" { + val = val[1 : idx-1] + } + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() == nil +} + +func isHostnameRFC952(fl FieldLevel) bool { + return hostnameRegexRFC952.MatchString(fl.Field().String()) +} + +func isHostnameRFC1123(fl FieldLevel) bool { + return hostnameRegexRFC1123.MatchString(fl.Field().String()) +} + +func isFQDN(fl FieldLevel) bool { + val := fl.Field().String() + + if val == "" { + return false + } + + if val[len(val)-1] == '.' { + val = val[0 : len(val)-1] + } + + return strings.ContainsAny(val, ".") && + hostnameRegexRFC952.MatchString(val) +} + +// IsDir is the validation function for validating if the current field's value is a valid directory. +func isDir(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/cache.go b/backend/vendor/gopkg.in/go-playground/validator.v9/cache.go new file mode 100644 index 00000000..a7a4202f --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/cache.go @@ -0,0 +1,337 @@ +package validator + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type tagType uint8 + +const ( + typeDefault tagType = iota + typeOmitEmpty + typeIsDefault + typeNoStructLevel + typeStructOnly + typeDive + typeOr + typeKeys + typeEndKeys +) + +const ( + invalidValidation = "Invalid validation tag on field '%s'" + undefinedValidation = "Undefined validation function '%s' on field '%s'" + keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag" +) + +type structCache struct { + lock sync.Mutex + m atomic.Value // map[reflect.Type]*cStruct +} + +func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) { + c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key] + return +} + +func (sc *structCache) Set(key reflect.Type, value *cStruct) { + + m := sc.m.Load().(map[reflect.Type]*cStruct) + + nm := make(map[reflect.Type]*cStruct, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + sc.m.Store(nm) +} + +type tagCache struct { + lock sync.Mutex + m atomic.Value // map[string]*cTag +} + +func (tc *tagCache) Get(key string) (c *cTag, found bool) { + c, found = tc.m.Load().(map[string]*cTag)[key] + return +} + +func (tc *tagCache) Set(key string, value *cTag) { + + m := tc.m.Load().(map[string]*cTag) + + nm := make(map[string]*cTag, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + tc.m.Store(nm) +} + +type cStruct struct { + name string + fields []*cField + fn StructLevelFuncCtx +} + +type cField struct { + idx int + name string + altName string + namesEqual bool + cTags *cTag +} + +type cTag struct { + tag string + aliasTag string + actualAliasTag string + param string + keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation + next *cTag + fn FuncCtx + typeof tagType + hasTag bool + hasAlias bool + hasParam bool // true if parameter used eg. eq= where the equal sign has been set + isBlockEnd bool // indicates the current tag represents the last validation in the block +} + +func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct { + + v.structCache.lock.Lock() + defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise! + + typ := current.Type() + + // could have been multiple trying to access, but once first is done this ensures struct + // isn't parsed again. + cs, ok := v.structCache.Get(typ) + if ok { + return cs + } + + cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]} + + numFields := current.NumField() + + var ctag *cTag + var fld reflect.StructField + var tag string + var customName string + + for i := 0; i < numFields; i++ { + + fld = typ.Field(i) + + if !fld.Anonymous && len(fld.PkgPath) > 0 { + continue + } + + tag = fld.Tag.Get(v.tagName) + + if tag == skipValidationTag { + continue + } + + customName = fld.Name + + if v.hasTagNameFunc { + + name := v.tagNameFunc(fld) + + if len(name) > 0 { + customName = name + } + } + + // NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different + // and so only struct level caching can be used instead of combined with Field tag caching + + if len(tag) > 0 { + ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false) + } else { + // even if field doesn't have validations need cTag for traversing to potential inner/nested + // elements of the field. + ctag = new(cTag) + } + + cs.fields = append(cs.fields, &cField{ + idx: i, + name: fld.Name, + altName: customName, + cTags: ctag, + namesEqual: fld.Name == customName, + }) + } + + v.structCache.Set(typ, cs) + + return cs +} + +func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { + + var t string + var ok bool + noAlias := len(alias) == 0 + tags := strings.Split(tag, tagSeparator) + + for i := 0; i < len(tags); i++ { + + t = tags[i] + + if noAlias { + alias = t + } + + // check map for alias and process new tags, otherwise process as usual + if tagsVal, found := v.aliases[t]; found { + if i == 0 { + firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + } else { + next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + current.next, current = next, curr + + } + + continue + } + + var prevTag tagType + + if i == 0 { + current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + firstCtag = current + } else { + prevTag = current.typeof + current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + current = current.next + } + + switch t { + + case diveTag: + current.typeof = typeDive + continue + + case keysTag: + current.typeof = typeKeys + + if i == 0 || prevTag != typeDive { + panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag)) + } + + current.typeof = typeKeys + + // need to pass along only keys tag + // need to increment i to skip over the keys tags + b := make([]byte, 0, 64) + + i++ + + for ; i < len(tags); i++ { + + b = append(b, tags[i]...) + b = append(b, ',') + + if tags[i] == endKeysTag { + break + } + } + + current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false) + continue + + case endKeysTag: + current.typeof = typeEndKeys + + // if there are more in tags then there was no keysTag defined + // and an error should be thrown + if i != len(tags)-1 { + panic(keysTagNotDefined) + } + return + + case omitempty: + current.typeof = typeOmitEmpty + continue + + case structOnlyTag: + current.typeof = typeStructOnly + continue + + case noStructLevelTag: + current.typeof = typeNoStructLevel + continue + + default: + + if t == isdefault { + current.typeof = typeIsDefault + } + + // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C" + orVals := strings.Split(t, orSeparator) + + for j := 0; j < len(orVals); j++ { + + vals := strings.SplitN(orVals[j], tagKeySeparator, 2) + + if noAlias { + alias = vals[0] + current.aliasTag = alias + } else { + current.actualAliasTag = t + } + + if j > 0 { + current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true} + current = current.next + } + current.hasParam = len(vals) > 1 + + current.tag = vals[0] + if len(current.tag) == 0 { + panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName))) + } + + if current.fn, ok = v.validations[current.tag]; !ok { + panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName))) + } + + if len(orVals) > 1 { + current.typeof = typeOr + } + + if len(vals) > 1 { + current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1) + } + } + current.isBlockEnd = true + } + } + return +} + +func (v *Validate) fetchCacheTag(tag string) *cTag { + // find cached tag + ctag, found := v.tagCache.Get(tag) + if !found { + v.tagCache.lock.Lock() + defer v.tagCache.lock.Unlock() + + // could have been multiple trying to access, but once first is done this ensures tag + // isn't parsed again. + ctag, found = v.tagCache.Get(tag) + if !found { + ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false) + v.tagCache.Set(tag, ctag) + } + } + return ctag +} diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/doc.go b/backend/vendor/gopkg.in/go-playground/validator.v9/doc.go new file mode 100644 index 00000000..e0396cb4 --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/doc.go @@ -0,0 +1,1101 @@ +/* +Package validator implements value validations for structs and individual fields +based on tags. + +It can also handle Cross-Field and Cross-Struct validation for nested structs +and has the ability to dive into arrays and maps of any type. + +see more examples https://github.com/go-playground/validator/tree/v9/_examples + +Validation Functions Return Type error + +Doing things this way is actually the way the standard library does, see the +file.Open method here: + + https://golang.org/pkg/os/#Open. + +The authors return type "error" to avoid the issue discussed in the following, +where err is always != nil: + + http://stackoverflow.com/a/29138676/3158232 + https://github.com/go-playground/validator/issues/134 + +Validator only InvalidValidationError for bad validation input, nil or +ValidationErrors as type error; so, in your code all you need to do is check +if the error returned is not nil, and if it's not check if error is +InvalidValidationError ( if necessary, most of the time it isn't ) type cast +it to type ValidationErrors like so err.(validator.ValidationErrors). + +Custom Validation Functions + +Custom Validation functions can be added. Example: + + // Structure + func customFunc(fl validator.FieldLevel) bool { + + if fl.Field().String() == "invalid" { + return false + } + + return true + } + + validate.RegisterValidation("custom tag name", customFunc) + // NOTES: using the same tag name as an existing function + // will overwrite the existing one + +Cross-Field Validation + +Cross-Field Validation can be done via the following tags: + - eqfield + - nefield + - gtfield + - gtefield + - ltfield + - ltefield + - eqcsfield + - necsfield + - gtcsfield + - gtecsfield + - ltcsfield + - ltecsfield + +If, however, some custom cross-field validation is required, it can be done +using a custom validation. + +Why not just have cross-fields validation tags (i.e. only eqcsfield and not +eqfield)? + +The reason is efficiency. If you want to check a field within the same struct +"eqfield" only has to find the field on the same struct (1 level). But, if we +used "eqcsfield" it could be multiple levels down. Example: + + type Inner struct { + StartDate time.Time + } + + type Outer struct { + InnerStructField *Inner + CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"` + } + + now := time.Now() + + inner := &Inner{ + StartDate: now, + } + + outer := &Outer{ + InnerStructField: inner, + CreatedAt: now, + } + + errs := validate.Struct(outer) + + // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed + // into the function + // when calling validate.VarWithValue(val, field, tag) val will be + // whatever you pass, struct, field... + // when calling validate.Field(field, tag) val will be nil + +Multiple Validators + +Multiple validators on a field will process in the order defined. Example: + + type Test struct { + Field `validate:"max=10,min=1"` + } + + // max will be checked then min + +Bad Validator definitions are not handled by the library. Example: + + type Test struct { + Field `validate:"min=10,max=0"` + } + + // this definition of min max will never succeed + +Using Validator Tags + +Baked In Cross-Field validation only compares fields on the same struct. +If Cross-Field + Cross-Struct validation is needed you should implement your +own custom validator. + +Comma (",") is the default separator of validation tags. If you wish to +have a comma included within the parameter (i.e. excludesall=,) you will need to +use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma, +so the above will become excludesall=0x2C. + + type Test struct { + Field `validate:"excludesall=,"` // BAD! Do not include a comma. + Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation. + } + +Pipe ("|") is the 'or' validation tags deparator. If you wish to +have a pipe included within the parameter i.e. excludesall=| you will need to +use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe, +so the above will become excludesall=0x7C + + type Test struct { + Field `validate:"excludesall=|"` // BAD! Do not include a a pipe! + Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation. + } + + +Baked In Validators and Tags + +Here is a list of the current built in validators: + + +Skip Field + +Tells the validation to skip this struct field; this is particularly +handy in ignoring embedded structs from being validated. (Usage: -) + Usage: - + + +Or Operator + +This is the 'or' operator allowing multiple validators to be used and +accepted. (Usage: rbg|rgba) <-- this would allow either rgb or rgba +colors to be accepted. This can also be combined with 'and' for example +( Usage: omitempty,rgb|rgba) + + Usage: | + +StructOnly + +When a field that is a nested struct is encountered, and contains this flag +any validation on the nested struct will be run, but none of the nested +struct fields will be validated. This is useful if inside of your program +you know the struct will be valid, but need to verify it has been assigned. +NOTE: only "required" and "omitempty" can be used on a struct itself. + + Usage: structonly + +NoStructLevel + +Same as structonly tag except that any struct level validations will not run. + + Usage: nostructlevel + +Omit Empty + +Allows conditional validation, for example if a field is not set with +a value (Determined by the "required" validator) then other validation +such as min or max won't run, but if a value is set validation will run. + + Usage: omitempty + +Dive + +This tells the validator to dive into a slice, array or map and validate that +level of the slice, array or map with the validation tags that follow. +Multidimensional nesting is also supported, each level you wish to dive will +require another dive tag. dive has some sub-tags, 'keys' & 'endkeys', please see +the Keys & EndKeys section just below. + + Usage: dive + +Example #1 + + [][]string with validation tag "gt=0,dive,len=1,dive,required" + // gt=0 will be applied to [] + // len=1 will be applied to []string + // required will be applied to string + +Example #2 + + [][]string with validation tag "gt=0,dive,dive,required" + // gt=0 will be applied to [] + // []string will be spared validation + // required will be applied to string + +Keys & EndKeys + +These are to be used together directly after the dive tag and tells the validator +that anything between 'keys' and 'endkeys' applies to the keys of a map and not the +values; think of it like the 'dive' tag, but for map keys instead of values. +Multidimensional nesting is also supported, each level you wish to validate will +require another 'keys' and 'endkeys' tag. These tags are only valid for maps. + + Usage: dive,keys,othertagvalidation(s),endkeys,valuevalidationtags + +Example #1 + + map[string]string with validation tag "gt=0,dive,keys,eg=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eg=1|eq=2 will be applied to the map keys + // required will be applied to map values + +Example #2 + + map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eg=1|eq=2 will be applied to each array element in the the map keys + // required will be applied to map values + +Required + +This validates that the value is not the data types default zero value. +For numbers ensures value is not zero. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required + +Required With + +The field under validation must be present and not empty only if any +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_with + +Examples: + + // require the field if the Field1 is present: + Usage: required_with=Field1 + + // require the field if the Field1 or Field2 is present: + Usage: required_with=Field1 Field2 + +Required With All + +The field under validation must be present and not empty only if all +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_with_all + +Example: + + // require the field if the Field1 and Field2 is present: + Usage: required_with_all=Field1 Field2 + +Required Without + +The field under validation must be present and not empty only when any +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_without + +Examples: + + // require the field if the Field1 is not present: + Usage: required_without=Field1 + + // require the field if the Field1 or Field2 is not present: + Usage: required_without=Field1 Field2 + +Required Without All + +The field under validation must be present and not empty only when all +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_without_all + +Example: + + // require the field if the Field1 and Field2 is not present: + Usage: required_without_all=Field1 Field2 + +Is Default + +This validates that the value is the default value and is almost the +opposite of required. + + Usage: isdefault + +Length + +For numbers, length will ensure that the value is +equal to the parameter given. For strings, it checks that +the string length is exactly that number of characters. For slices, +arrays, and maps, validates the number of items. + + Usage: len=10 + +Maximum + +For numbers, max will ensure that the value is +less than or equal to the parameter given. For strings, it checks +that the string length is at most that number of characters. For +slices, arrays, and maps, validates the number of items. + + Usage: max=10 + +Minimum + +For numbers, min will ensure that the value is +greater or equal to the parameter given. For strings, it checks that +the string length is at least that number of characters. For slices, +arrays, and maps, validates the number of items. + + Usage: min=10 + +Equals + +For strings & numbers, eq will ensure that the value is +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + + Usage: eq=10 + +Not Equal + +For strings & numbers, ne will ensure that the value is not +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + + Usage: ne=10 + +One Of + +For strings, ints, and uints, oneof will ensure that the value +is one of the values in the parameter. The parameter should be +a list of values separated by whitespace. Values may be +strings or numbers. + + Usage: oneof=red green + oneof=5 7 9 + +Greater Than + +For numbers, this will ensure that the value is greater than the +parameter given. For strings, it checks that the string length +is greater than that number of characters. For slices, arrays +and maps it validates the number of items. + +Example #1 + + Usage: gt=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than time.Now.UTC(). + + Usage: gt + +Greater Than or Equal + +Same as 'min' above. Kept both to make terminology with 'len' easier. + + +Example #1 + + Usage: gte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than or equal to time.Now.UTC(). + + Usage: gte + +Less Than + +For numbers, this will ensure that the value is less than the parameter given. +For strings, it checks that the string length is less than that number of +characters. For slices, arrays, and maps it validates the number of items. + +Example #1 + + Usage: lt=10 + +Example #2 (time.Time) +For time.Time ensures the time value is less than time.Now.UTC(). + + Usage: lt + +Less Than or Equal + +Same as 'max' above. Kept both to make terminology with 'len' easier. + +Example #1 + + Usage: lte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is less than or equal to time.Now.UTC(). + + Usage: lte + +Field Equals Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Example #1: + + // Validation on Password field using: + Usage: eqfield=ConfirmPassword + +Example #2: + + // Validating by field: + validate.VarWithValue(password, confirmpassword, "eqfield") + +Field Equals Another Field (relative) + +This does the same as eqfield except that it validates the field provided relative +to the top level struct. + + Usage: eqcsfield=InnerStructField.Field) + +Field Does Not Equal Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Examples: + + // Confirm two colors are not the same: + // + // Validation on Color field: + Usage: nefield=Color2 + + // Validating by field: + validate.VarWithValue(color1, color2, "nefield") + +Field Does Not Equal Another Field (relative) + +This does the same as nefield except that it validates the field provided +relative to the top level struct. + + Usage: necsfield=InnerStructField.Field + +Field Greater Than Another Field + +Only valid for Numbers and time.Time types, this will validate the field value +against another fields value either within a struct or passed in field. +usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtfield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "gtfield") + + +Field Greater Than Another Relative Field + +This does the same as gtfield except that it validates the field provided +relative to the top level struct. + + Usage: gtcsfield=InnerStructField.Field + +Field Greater Than or Equal To Another Field + +Only valid for Numbers and time.Time types, this will validate the field value +against another fields value either within a struct or passed in field. +usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtefield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "gtefield") + +Field Greater Than or Equal To Another Relative Field + +This does the same as gtefield except that it validates the field provided relative +to the top level struct. + + Usage: gtecsfield=InnerStructField.Field + +Less Than Another Field + +Only valid for Numbers and time.Time types, this will validate the field value +against another fields value either within a struct or passed in field. +usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltfield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "ltfield") + +Less Than Another Relative Field + +This does the same as ltfield except that it validates the field provided relative +to the top level struct. + + Usage: ltcsfield=InnerStructField.Field + +Less Than or Equal To Another Field + +Only valid for Numbers and time.Time types, this will validate the field value +against another fields value either within a struct or passed in field. +usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltefield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "ltefield") + +Less Than or Equal To Another Relative Field + +This does the same as ltefield except that it validates the field provided relative +to the top level struct. + + Usage: ltecsfield=InnerStructField.Field + +Field Contains Another Field + +This does the same as contains except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: containsfield=InnerStructField.Field + +Field Excludes Another Field + +This does the same as excludes except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: excludesfield=InnerStructField.Field + +Unique + +For arrays & slices, unique will ensure that there are no duplicates. +For maps, unique will ensure that there are no duplicate values. + + Usage: unique + +Alpha Only + +This validates that a string value contains ASCII alpha characters only + + Usage: alpha + +Alphanumeric + +This validates that a string value contains ASCII alphanumeric characters only + + Usage: alphanum + +Alpha Unicode + +This validates that a string value contains unicode alpha characters only + + Usage: alphaunicode + +Alphanumeric Unicode + +This validates that a string value contains unicode alphanumeric characters only + + Usage: alphanumunicode + +Numeric + +This validates that a string value contains a basic numeric value. +basic excludes exponents etc... +for integers or float it returns true. + + Usage: numeric + +Hexadecimal String + +This validates that a string value contains a valid hexadecimal. + + Usage: hexadecimal + +Hexcolor String + +This validates that a string value contains a valid hex color including +hashtag (#) + + Usage: hexcolor + +RGB String + +This validates that a string value contains a valid rgb color + + Usage: rgb + +RGBA String + +This validates that a string value contains a valid rgba color + + Usage: rgba + +HSL String + +This validates that a string value contains a valid hsl color + + Usage: hsl + +HSLA String + +This validates that a string value contains a valid hsla color + + Usage: hsla + +E-mail String + +This validates that a string value contains a valid email +This may not conform to all possibilities of any rfc standard, but neither +does any email provider accept all possibilities. + + Usage: email + +File path + +This validates that a string value contains a valid file path and that +the file exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: file + +URL String + +This validates that a string value contains a valid url +This will accept any url the golang request uri accepts but must contain +a schema for example http:// or rtmp:// + + Usage: url + +URI String + +This validates that a string value contains a valid uri +This will accept any uri the golang request uri accepts + + Usage: uri + +Urn RFC 2141 String + +This validataes that a string value contains a valid URN +according to the RFC 2141 spec. + + Usage: urn_rfc2141 + +Base64 String + +This validates that a string value contains a valid base64 value. +Although an empty string is valid base64 this will report an empty string +as an error, if you wish to accept an empty string as valid you can use +this with the omitempty tag. + + Usage: base64 + +Base64URL String + +This validates that a string value contains a valid base64 URL safe value +according the the RFC4648 spec. +Although an empty string is a valid base64 URL safe value, this will report +an empty string as an error, if you wish to accept an empty string as valid +you can use this with the omitempty tag. + + Usage: base64url + +Bitcoin Address + +This validates that a string value contains a valid bitcoin address. +The format of the string is checked to ensure it matches one of the three formats +P2PKH, P2SH and performs checksum validation. + + Usage: btc_addr + +Bitcoin Bech32 Address (segwit) + +This validates that a string value contains a valid bitcoin Bech32 address as defined +by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki) +Special thanks to Pieter Wuille for providng reference implementations. + + Usage: btc_addr_bech32 + +Ethereum Address + +This validates that a string value contains a valid ethereum address. +The format of the string is checked to ensure it matches the standard Ethereum address format +Full validation is blocked by https://github.com/golang/crypto/pull/28 + + Usage: eth_addr + +Contains + +This validates that a string value contains the substring value. + + Usage: contains=@ + +Contains Any + +This validates that a string value contains any Unicode code points +in the substring value. + + Usage: containsany=!@#? + +Contains Rune + +This validates that a string value contains the supplied rune value. + + Usage: containsrune=@ + +Excludes + +This validates that a string value does not contain the substring value. + + Usage: excludes=@ + +Excludes All + +This validates that a string value does not contain any Unicode code +points in the substring value. + + Usage: excludesall=!@#? + +Excludes Rune + +This validates that a string value does not contain the supplied rune value. + + Usage: excludesrune=@ + +Starts With + +This validates that a string value starts with the supplied string value + + Usage: startswith=hello + +Ends With + +This validates that a string value ends with the supplied string value + + Usage: endswith=goodbye + +International Standard Book Number + +This validates that a string value contains a valid isbn10 or isbn13 value. + + Usage: isbn + +International Standard Book Number 10 + +This validates that a string value contains a valid isbn10 value. + + Usage: isbn10 + +International Standard Book Number 13 + +This validates that a string value contains a valid isbn13 value. + + Usage: isbn13 + +Universally Unique Identifier UUID + +This validates that a string value contains a valid UUID. Uppercase UUID values will not pass - use `uuid_rfc4122` instead. + + Usage: uuid + +Universally Unique Identifier UUID v3 + +This validates that a string value contains a valid version 3 UUID. Uppercase UUID values will not pass - use `uuid3_rfc4122` instead. + + Usage: uuid3 + +Universally Unique Identifier UUID v4 + +This validates that a string value contains a valid version 4 UUID. Uppercase UUID values will not pass - use `uuid4_rfc4122` instead. + + Usage: uuid4 + +Universally Unique Identifier UUID v5 + +This validates that a string value contains a valid version 5 UUID. Uppercase UUID values will not pass - use `uuid5_rfc4122` instead. + + Usage: uuid5 + +ASCII + +This validates that a string value contains only ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: ascii + +Printable ASCII + +This validates that a string value contains only printable ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: printascii + +Multi-Byte Characters + +This validates that a string value contains one or more multibyte characters. +NOTE: if the string is blank, this validates as true. + + Usage: multibyte + +Data URL + +This validates that a string value contains a valid DataURI. +NOTE: this will also validate that the data portion is valid base64 + + Usage: datauri + +Latitude + +This validates that a string value contains a valid latitude. + + Usage: latitude + +Longitude + +This validates that a string value contains a valid longitude. + + Usage: longitude + +Social Security Number SSN + +This validates that a string value contains a valid U.S. Social Security Number. + + Usage: ssn + +Internet Protocol Address IP + +This validates that a string value contains a valid IP Address. + + Usage: ip + +Internet Protocol Address IPv4 + +This validates that a string value contains a valid v4 IP Address. + + Usage: ipv4 + +Internet Protocol Address IPv6 + +This validates that a string value contains a valid v6 IP Address. + + Usage: ipv6 + +Classless Inter-Domain Routing CIDR + +This validates that a string value contains a valid CIDR Address. + + Usage: cidr + +Classless Inter-Domain Routing CIDRv4 + +This validates that a string value contains a valid v4 CIDR Address. + + Usage: cidrv4 + +Classless Inter-Domain Routing CIDRv6 + +This validates that a string value contains a valid v6 CIDR Address. + + Usage: cidrv6 + +Transmission Control Protocol Address TCP + +This validates that a string value contains a valid resolvable TCP Address. + + Usage: tcp_addr + +Transmission Control Protocol Address TCPv4 + +This validates that a string value contains a valid resolvable v4 TCP Address. + + Usage: tcp4_addr + +Transmission Control Protocol Address TCPv6 + +This validates that a string value contains a valid resolvable v6 TCP Address. + + Usage: tcp6_addr + +User Datagram Protocol Address UDP + +This validates that a string value contains a valid resolvable UDP Address. + + Usage: udp_addr + +User Datagram Protocol Address UDPv4 + +This validates that a string value contains a valid resolvable v4 UDP Address. + + Usage: udp4_addr + +User Datagram Protocol Address UDPv6 + +This validates that a string value contains a valid resolvable v6 UDP Address. + + Usage: udp6_addr + +Internet Protocol Address IP + +This validates that a string value contains a valid resolvable IP Address. + + Usage: ip_addr + +Internet Protocol Address IPv4 + +This validates that a string value contains a valid resolvable v4 IP Address. + + Usage: ip4_addr + +Internet Protocol Address IPv6 + +This validates that a string value contains a valid resolvable v6 IP Address. + + Usage: ip6_addr + +Unix domain socket end point Address + +This validates that a string value contains a valid Unix Address. + + Usage: unix_addr + +Media Access Control Address MAC + +This validates that a string value contains a valid MAC Address. + + Usage: mac + +Note: See Go's ParseMAC for accepted formats and types: + + http://golang.org/src/net/mac.go?s=866:918#L29 + +Hostname RFC 952 + +This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952 + + Usage: hostname + +Hostname RFC 1123 + +This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123 + + Usage: hostname_rfc1123 or if you want to continue to use 'hostname' in your tags, create an alias. + +Full Qualified Domain Name (FQDN) + +This validates that a string value contains a valid FQDN. + + Usage: fqdn + +HTML Tags + +This validates that a string value appears to be an HTML element tag +including those described at https://developer.mozilla.org/en-US/docs/Web/HTML/Element + + Usage: html + +HTML Encoded + +This validates that a string value is a proper character reference in decimal +or hexadecimal format + + Usage: html_encoded + +URL Encoded + +This validates that a string value is percent-encoded (URL encoded) according +to https://tools.ietf.org/html/rfc3986#section-2.1 + + Usage: url_encoded + +Directory + +This validates that a string value contains a valid directory and that +it exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: dir + +Alias Validators and Tags + +NOTE: When returning an error, the tag returned in "FieldError" will be +the alias tag unless the dive tag is part of the alias. Everything after the +dive tag is not reported as the alias tag. Also, the "ActualTag" in the before +case will be the actual tag within the alias that failed. + +Here is a list of the current built in alias tags: + + "iscolor" + alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor) + +Validator notes: + + regex + a regex validator won't be added because commas and = signs can be part + of a regex which conflict with the validation definitions. Although + workarounds can be made, they take away from using pure regex's. + Furthermore it's quick and dirty but the regex's become harder to + maintain and are not reusable, so it's as much a programming philosophy + as anything. + + In place of this new validator functions should be created; a regex can + be used within the validator function and even be precompiled for better + efficiency within regexes.go. + + And the best reason, you can submit a pull request and we can keep on + adding to the validation library of this package! + +Panics + +This package panics when bad input is provided, this is by design, bad code like +that should not make it to production. + + type Test struct { + TestField string `validate:"nonexistantfunction=1"` + } + + t := &Test{ + TestField: "Test" + } + + validate.Struct(t) // this will panic + +Non standard validators + +A collection of validation rules that are frequently needed but are more +complex than the ones found in the baked in validators. +A non standard validator must be registered manually using any tag you like. +See below examples of registration and use. + + type Test struct { + TestField string `validate:"yourtag"` + } + + t := &Test{ + TestField: "Test" + } + + validate := validator.New() + validate.RegisterValidation("yourtag", validations.ValidatorName) + + NotBlank + This validates that the value is not blank or with length zero. + For strings ensures they do not contain only spaces. For channels, maps, slices and arrays + ensures they don't have zero length. For others, a non empty value is required. + + Usage: notblank +*/ +package validator diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/errors.go b/backend/vendor/gopkg.in/go-playground/validator.v9/errors.go new file mode 100644 index 00000000..46c24c9b --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/errors.go @@ -0,0 +1,272 @@ +package validator + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + ut "github.com/go-playground/universal-translator" +) + +const ( + fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag" +) + +// ValidationErrorsTranslations is the translation return type +type ValidationErrorsTranslations map[string]string + +// InvalidValidationError describes an invalid argument passed to +// `Struct`, `StructExcept`, StructPartial` or `Field` +type InvalidValidationError struct { + Type reflect.Type +} + +// Error returns InvalidValidationError message +func (e *InvalidValidationError) Error() string { + + if e.Type == nil { + return "validator: (nil)" + } + + return "validator: (nil " + e.Type.String() + ")" +} + +// ValidationErrors is an array of FieldError's +// for use in custom error messages post validation. +type ValidationErrors []FieldError + +// Error is intended for use in development + debugging and not intended to be a production error message. +// It allows ValidationErrors to subscribe to the Error interface. +// All information to create an error message specific to your application is contained within +// the FieldError found within the ValidationErrors array +func (ve ValidationErrors) Error() string { + + buff := bytes.NewBufferString("") + + var fe *fieldError + + for i := 0; i < len(ve); i++ { + + fe = ve[i].(*fieldError) + buff.WriteString(fe.Error()) + buff.WriteString("\n") + } + + return strings.TrimSpace(buff.String()) +} + +// Translate translates all of the ValidationErrors +func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations { + + trans := make(ValidationErrorsTranslations) + + var fe *fieldError + + for i := 0; i < len(ve); i++ { + fe = ve[i].(*fieldError) + + // // in case an Anonymous struct was used, ensure that the key + // // would be 'Username' instead of ".Username" + // if len(fe.ns) > 0 && fe.ns[:1] == "." { + // trans[fe.ns[1:]] = fe.Translate(ut) + // continue + // } + + trans[fe.ns] = fe.Translate(ut) + } + + return trans +} + +// FieldError contains all functions to get error details +type FieldError interface { + + // returns the validation tag that failed. if the + // validation was an alias, this will return the + // alias name and not the underlying tag that failed. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "iscolor" + Tag() string + + // returns the validation tag that failed, even if an + // alias the actual tag within the alias will be returned. + // If an 'or' validation fails the entire or will be returned. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "hexcolor|rgb|rgba|hsl|hsla" + ActualTag() string + + // returns the namespace for the field error, with the tag + // name taking precedence over the fields actual name. + // + // eg. JSON name "User.fname" + // + // See StructNamespace() for a version that returns actual names. + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract it's name + Namespace() string + + // returns the namespace for the field error, with the fields + // actual name. + // + // eq. "User.FirstName" see Namespace for comparison + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract it's name + StructNamespace() string + + // returns the fields name with the tag name taking precedence over the + // fields actual name. + // + // eq. JSON name "fname" + // see StructField for comparison + Field() string + + // returns the fields actual name from the struct, when able to determine. + // + // eq. "FirstName" + // see Field for comparison + StructField() string + + // returns the actual fields value in case needed for creating the error + // message + Value() interface{} + + // returns the param value, in string form for comparison; this will also + // help with generating an error message + Param() string + + // Kind returns the Field's reflect Kind + // + // eg. time.Time's kind is a struct + Kind() reflect.Kind + + // Type returns the Field's reflect Type + // + // // eg. time.Time's type is time.Time + Type() reflect.Type + + // returns the FieldError's translated error + // from the provided 'ut.Translator' and registered 'TranslationFunc' + // + // NOTE: if no registered translator can be found it returns the same as + // calling fe.Error() + Translate(ut ut.Translator) string +} + +// compile time interface checks +var _ FieldError = new(fieldError) +var _ error = new(fieldError) + +// fieldError contains a single field's validation error along +// with other properties that may be needed for error message creation +// it complies with the FieldError interface +type fieldError struct { + v *Validate + tag string + actualTag string + ns string + structNs string + fieldLen uint8 + structfieldLen uint8 + value interface{} + param string + kind reflect.Kind + typ reflect.Type +} + +// Tag returns the validation tag that failed. +func (fe *fieldError) Tag() string { + return fe.tag +} + +// ActualTag returns the validation tag that failed, even if an +// alias the actual tag within the alias will be returned. +func (fe *fieldError) ActualTag() string { + return fe.actualTag +} + +// Namespace returns the namespace for the field error, with the tag +// name taking precedence over the fields actual name. +func (fe *fieldError) Namespace() string { + return fe.ns +} + +// StructNamespace returns the namespace for the field error, with the fields +// actual name. +func (fe *fieldError) StructNamespace() string { + return fe.structNs +} + +// Field returns the fields name with the tag name taking precedence over the +// fields actual name. +func (fe *fieldError) Field() string { + + return fe.ns[len(fe.ns)-int(fe.fieldLen):] + // // return fe.field + // fld := fe.ns[len(fe.ns)-int(fe.fieldLen):] + + // log.Println("FLD:", fld) + + // if len(fld) > 0 && fld[:1] == "." { + // return fld[1:] + // } + + // return fld +} + +// returns the fields actual name from the struct, when able to determine. +func (fe *fieldError) StructField() string { + // return fe.structField + return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):] +} + +// Value returns the actual fields value in case needed for creating the error +// message +func (fe *fieldError) Value() interface{} { + return fe.value +} + +// Param returns the param value, in string form for comparison; this will +// also help with generating an error message +func (fe *fieldError) Param() string { + return fe.param +} + +// Kind returns the Field's reflect Kind +func (fe *fieldError) Kind() reflect.Kind { + return fe.kind +} + +// Type returns the Field's reflect Type +func (fe *fieldError) Type() reflect.Type { + return fe.typ +} + +// Error returns the fieldError's error message +func (fe *fieldError) Error() string { + return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag) +} + +// Translate returns the FieldError's translated error +// from the provided 'ut.Translator' and registered 'TranslationFunc' +// +// NOTE: is not registered translation can be found it returns the same +// as calling fe.Error() +func (fe *fieldError) Translate(ut ut.Translator) string { + + m, ok := fe.v.transTagFunc[ut] + if !ok { + return fe.Error() + } + + fn, ok := m[fe.tag] + if !ok { + return fe.Error() + } + + return fn(ut, fe) +} diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/field_level.go b/backend/vendor/gopkg.in/go-playground/validator.v9/field_level.go new file mode 100644 index 00000000..cbfbc158 --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/field_level.go @@ -0,0 +1,69 @@ +package validator + +import "reflect" + +// FieldLevel contains all the information and helper functions +// to validate a field +type FieldLevel interface { + + // returns the top level struct, if any + Top() reflect.Value + + // returns the current fields parent struct, if any or + // the comparison value if called 'VarWithValue' + Parent() reflect.Value + + // returns current field for validation + Field() reflect.Value + + // returns the field's name with the tag + // name taking precedence over the fields actual name. + FieldName() string + + // returns the struct field's name + StructFieldName() string + + // returns param for validation against current field + Param() string + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and it's kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // traverses the parent struct to retrieve a specific field denoted by the provided namespace + // in the param and returns the field, field kind and whether is was successful in retrieving + // the field at all. + // + // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field + // could not be retrieved because it didn't exist. + GetStructFieldOK() (reflect.Value, reflect.Kind, bool) +} + +var _ FieldLevel = new(validate) + +// Field returns current field for validation +func (v *validate) Field() reflect.Value { + return v.flField +} + +// FieldName returns the field's name with the tag +// name takeing precedence over the fields actual name. +func (v *validate) FieldName() string { + return v.cf.altName +} + +// StructFieldName returns the struct field's name +func (v *validate) StructFieldName() string { + return v.cf.name +} + +// Param returns param for validation against current field +func (v *validate) Param() string { + return v.ct.param +} + +// GetStructFieldOK returns Param returns param for validation against current field +func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) { + return v.getStructFieldOKInternal(v.slflParent, v.ct.param) +} diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/logo.png b/backend/vendor/gopkg.in/go-playground/validator.v9/logo.png new file mode 100644 index 00000000..355000f5 Binary files /dev/null and b/backend/vendor/gopkg.in/go-playground/validator.v9/logo.png differ diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/regexes.go b/backend/vendor/gopkg.in/go-playground/validator.v9/regexes.go new file mode 100644 index 00000000..0253d709 --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/regexes.go @@ -0,0 +1,95 @@ +package validator + +import "regexp" + +const ( + alphaRegexString = "^[a-zA-Z]+$" + alphaNumericRegexString = "^[a-zA-Z0-9]+$" + alphaUnicodeRegexString = "^[\\p{L}]+$" + alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$" + numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$" + numberRegexString = "^[0-9]+$" + hexadecimalRegexString = "^[0-9a-fA-F]+$" + hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$" + rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$" + hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$" + iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$" + iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$" + uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + uUID3RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-3[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" + uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" + uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + aSCIIRegexString = "^[\x00-\x7F]*$" + printableASCIIRegexString = "^[\x20-\x7E]*$" + multibyteRegexString = "[^\x00-\x7F]" + dataURIRegexString = "^data:.+\\/(.+);base64$" + latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$` + hostnameRegexStringRFC952 = `^[a-zA-Z][a-zA-Z0-9\-\.]+[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952 + hostnameRegexStringRFC1123 = `^[a-zA-Z0-9][a-zA-Z0-9\-\.]+[a-zA-Z0-9]$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123 + btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address + btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + ethAddressRegexString = `^0x[0-9a-fA-F]{40}$` + ethAddressUpperRegexString = `^0x[0-9A-F]{40}$` + ethAddressLowerRegexString = `^0x[0-9a-f]{40}$` + uRLEncodedRegexString = `(%[A-Fa-f0-9]{2})` + hTMLEncodedRegexString = `&#[x]?([0-9a-fA-F]{2})|(>)|(<)|(")|(&)+[;]?` + hTMLRegexString = `<[/]?([a-zA-Z]+).*?>` +) + +var ( + alphaRegex = regexp.MustCompile(alphaRegexString) + alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString) + alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString) + alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString) + numericRegex = regexp.MustCompile(numericRegexString) + numberRegex = regexp.MustCompile(numberRegexString) + hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString) + hexcolorRegex = regexp.MustCompile(hexcolorRegexString) + rgbRegex = regexp.MustCompile(rgbRegexString) + rgbaRegex = regexp.MustCompile(rgbaRegexString) + hslRegex = regexp.MustCompile(hslRegexString) + hslaRegex = regexp.MustCompile(hslaRegexString) + emailRegex = regexp.MustCompile(emailRegexString) + base64Regex = regexp.MustCompile(base64RegexString) + base64URLRegex = regexp.MustCompile(base64URLRegexString) + iSBN10Regex = regexp.MustCompile(iSBN10RegexString) + iSBN13Regex = regexp.MustCompile(iSBN13RegexString) + uUID3Regex = regexp.MustCompile(uUID3RegexString) + uUID4Regex = regexp.MustCompile(uUID4RegexString) + uUID5Regex = regexp.MustCompile(uUID5RegexString) + uUIDRegex = regexp.MustCompile(uUIDRegexString) + uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString) + uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString) + uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString) + uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString) + aSCIIRegex = regexp.MustCompile(aSCIIRegexString) + printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) + multibyteRegex = regexp.MustCompile(multibyteRegexString) + dataURIRegex = regexp.MustCompile(dataURIRegexString) + latitudeRegex = regexp.MustCompile(latitudeRegexString) + longitudeRegex = regexp.MustCompile(longitudeRegexString) + sSNRegex = regexp.MustCompile(sSNRegexString) + hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952) + hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123) + btcAddressRegex = regexp.MustCompile(btcAddressRegexString) + btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32) + btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32) + ethAddressRegex = regexp.MustCompile(ethAddressRegexString) + ethaddressRegexUpper = regexp.MustCompile(ethAddressUpperRegexString) + ethAddressRegexLower = regexp.MustCompile(ethAddressLowerRegexString) + uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString) + hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString) + hTMLRegex = regexp.MustCompile(hTMLRegexString) +) diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/struct_level.go b/backend/vendor/gopkg.in/go-playground/validator.v9/struct_level.go new file mode 100644 index 00000000..57691ee3 --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/struct_level.go @@ -0,0 +1,175 @@ +package validator + +import ( + "context" + "reflect" +) + +// StructLevelFunc accepts all values needed for struct level validation +type StructLevelFunc func(sl StructLevel) + +// StructLevelFuncCtx accepts all values needed for struct level validation +// but also allows passing of contextual validation information via context.Context. +type StructLevelFuncCtx func(ctx context.Context, sl StructLevel) + +// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx +func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx { + return func(ctx context.Context, sl StructLevel) { + fn(sl) + } +} + +// StructLevel contains all the information and helper functions +// to validate a struct +type StructLevel interface { + + // returns the main validation object, in case one wants to call validations internally. + // this is so you don't have to use anonymous functions to get access to the validate + // instance. + Validator() *Validate + + // returns the top level struct, if any + Top() reflect.Value + + // returns the current fields parent struct, if any + Parent() reflect.Value + + // returns the current struct. + Current() reflect.Value + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and its kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // reports an error just by passing the field and tag information + // + // NOTES: + // + // fieldName and altName get appended to the existing namespace that + // validator is on. e.g. pass 'FirstName' or 'Names[0]' depending + // on the nesting + // + // tag can be an existing validation tag or just something you make up + // and process on the flip side it's up to you. + ReportError(field interface{}, fieldName, structFieldName string, tag, param string) + + // reports an error just by passing ValidationErrors + // + // NOTES: + // + // relativeNamespace and relativeActualNamespace get appended to the + // existing namespace that validator is on. + // e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending + // on the nesting. most of the time they will be blank, unless you validate + // at a level lower the the current field depth + ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors) +} + +var _ StructLevel = new(validate) + +// Top returns the top level struct +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an acurate value otherwise. +func (v *validate) Top() reflect.Value { + return v.top +} + +// Parent returns the current structs parent +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an acurate value otherwise. +func (v *validate) Parent() reflect.Value { + return v.slflParent +} + +// Current returns the current struct. +func (v *validate) Current() reflect.Value { + return v.slCurrent +} + +// Validator returns the main validation object, in case one want to call validations internally. +func (v *validate) Validator() *Validate { + return v.v +} + +// ExtractType gets the actual underlying type of field value. +func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) { + return v.extractTypeInternal(field, false) +} + +// ReportError reports an error just by passing the field and tag information +func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) { + + fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false) + + if len(structFieldName) == 0 { + structFieldName = fieldName + } + + v.str1 = string(append(v.ns, fieldName...)) + + if v.v.hasTagNameFunc || fieldName != structFieldName { + v.str2 = string(append(v.actualNs, structFieldName...)) + } else { + v.str2 = v.str1 + } + + if kind == reflect.Invalid { + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + param: param, + kind: kind, + }, + ) + return + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + value: fv.Interface(), + param: param, + kind: kind, + typ: fv.Type(), + }, + ) +} + +// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation. +// +// NOTE: this function prepends the current namespace to the relative ones. +func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) { + + var err *fieldError + + for i := 0; i < len(errs); i++ { + + err = errs[i].(*fieldError) + err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...)) + err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...)) + + v.errs = append(v.errs, err) + } +} diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/translations.go b/backend/vendor/gopkg.in/go-playground/validator.v9/translations.go new file mode 100644 index 00000000..4d9d75c1 --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/translations.go @@ -0,0 +1,11 @@ +package validator + +import ut "github.com/go-playground/universal-translator" + +// TranslationFunc is the function type used to register or override +// custom translations +type TranslationFunc func(ut ut.Translator, fe FieldError) string + +// RegisterTranslationsFunc allows for registering of translations +// for a 'ut.Translator' for use within the 'TranslationFunc' +type RegisterTranslationsFunc func(ut ut.Translator) error diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/util.go b/backend/vendor/gopkg.in/go-playground/validator.v9/util.go new file mode 100644 index 00000000..16a5517c --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/util.go @@ -0,0 +1,257 @@ +package validator + +import ( + "reflect" + "strconv" + "strings" +) + +// extractTypeInternal gets the actual underlying type of field value. +// It will dive into pointers, customTypes and return you the +// underlying value and it's kind. +func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) { + +BEGIN: + switch current.Kind() { + case reflect.Ptr: + + nullable = true + + if current.IsNil() { + return current, reflect.Ptr, nullable + } + + current = current.Elem() + goto BEGIN + + case reflect.Interface: + + nullable = true + + if current.IsNil() { + return current, reflect.Interface, nullable + } + + current = current.Elem() + goto BEGIN + + case reflect.Invalid: + return current, reflect.Invalid, nullable + + default: + + if v.v.hasCustomFuncs { + + if fn, ok := v.v.customFuncs[current.Type()]; ok { + current = reflect.ValueOf(fn(current)) + goto BEGIN + } + } + + return current, current.Kind(), nullable + } +} + +// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and +// returns the field, field kind and whether is was successful in retrieving the field at all. +// +// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field +// could not be retrieved because it didn't exist. +func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, found bool) { + +BEGIN: + current, kind, _ = v.ExtractType(val) + + if kind == reflect.Invalid { + return + } + + if namespace == "" { + found = true + return + } + + switch kind { + + case reflect.Ptr, reflect.Interface: + return + + case reflect.Struct: + + typ := current.Type() + fld := namespace + var ns string + + if typ != timeType { + + idx := strings.Index(namespace, namespaceSeparator) + + if idx != -1 { + fld = namespace[:idx] + ns = namespace[idx+1:] + } else { + ns = "" + } + + bracketIdx := strings.Index(fld, leftBracket) + if bracketIdx != -1 { + fld = fld[:bracketIdx] + + ns = namespace[bracketIdx:] + } + + val = current.FieldByName(fld) + namespace = ns + goto BEGIN + } + + case reflect.Array, reflect.Slice: + idx := strings.Index(namespace, leftBracket) + idx2 := strings.Index(namespace, rightBracket) + + arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2]) + + if arrIdx >= current.Len() { + return current, kind, false + } + + startIdx := idx2 + 1 + + if startIdx < len(namespace) { + if namespace[startIdx:startIdx+1] == namespaceSeparator { + startIdx++ + } + } + + val = current.Index(arrIdx) + namespace = namespace[startIdx:] + goto BEGIN + + case reflect.Map: + idx := strings.Index(namespace, leftBracket) + 1 + idx2 := strings.Index(namespace, rightBracket) + + endIdx := idx2 + + if endIdx+1 < len(namespace) { + if namespace[endIdx+1:endIdx+2] == namespaceSeparator { + endIdx++ + } + } + + key := namespace[idx:idx2] + + switch current.Type().Key().Kind() { + case reflect.Int: + i, _ := strconv.Atoi(key) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Int8: + i, _ := strconv.ParseInt(key, 10, 8) + val = current.MapIndex(reflect.ValueOf(int8(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int16: + i, _ := strconv.ParseInt(key, 10, 16) + val = current.MapIndex(reflect.ValueOf(int16(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int32: + i, _ := strconv.ParseInt(key, 10, 32) + val = current.MapIndex(reflect.ValueOf(int32(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int64: + i, _ := strconv.ParseInt(key, 10, 64) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Uint: + i, _ := strconv.ParseUint(key, 10, 0) + val = current.MapIndex(reflect.ValueOf(uint(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint8: + i, _ := strconv.ParseUint(key, 10, 8) + val = current.MapIndex(reflect.ValueOf(uint8(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint16: + i, _ := strconv.ParseUint(key, 10, 16) + val = current.MapIndex(reflect.ValueOf(uint16(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint32: + i, _ := strconv.ParseUint(key, 10, 32) + val = current.MapIndex(reflect.ValueOf(uint32(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint64: + i, _ := strconv.ParseUint(key, 10, 64) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Float32: + f, _ := strconv.ParseFloat(key, 32) + val = current.MapIndex(reflect.ValueOf(float32(f))) + namespace = namespace[endIdx+1:] + + case reflect.Float64: + f, _ := strconv.ParseFloat(key, 64) + val = current.MapIndex(reflect.ValueOf(f)) + namespace = namespace[endIdx+1:] + + case reflect.Bool: + b, _ := strconv.ParseBool(key) + val = current.MapIndex(reflect.ValueOf(b)) + namespace = namespace[endIdx+1:] + + // reflect.Type = string + default: + val = current.MapIndex(reflect.ValueOf(key)) + namespace = namespace[endIdx+1:] + } + + goto BEGIN + } + + // if got here there was more namespace, cannot go any deeper + panic("Invalid field namespace") +} + +// asInt returns the parameter as a int64 +// or panics if it can't convert +func asInt(param string) int64 { + + i, err := strconv.ParseInt(param, 0, 64) + panicIf(err) + + return i +} + +// asUint returns the parameter as a uint64 +// or panics if it can't convert +func asUint(param string) uint64 { + + i, err := strconv.ParseUint(param, 0, 64) + panicIf(err) + + return i +} + +// asFloat returns the parameter as a float64 +// or panics if it can't convert +func asFloat(param string) float64 { + + i, err := strconv.ParseFloat(param, 64) + panicIf(err) + + return i +} + +func panicIf(err error) { + if err != nil { + panic(err.Error()) + } +} diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/validator.go b/backend/vendor/gopkg.in/go-playground/validator.v9/validator.go new file mode 100644 index 00000000..67473f1e --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/validator.go @@ -0,0 +1,475 @@ +package validator + +import ( + "context" + "fmt" + "reflect" + "strconv" +) + +// per validate contruct +type validate struct { + v *Validate + top reflect.Value + ns []byte + actualNs []byte + errs ValidationErrors + includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise + ffn FilterFunc + slflParent reflect.Value // StructLevel & FieldLevel + slCurrent reflect.Value // StructLevel & FieldLevel + flField reflect.Value // StructLevel & FieldLevel + cf *cField // StructLevel & FieldLevel + ct *cTag // StructLevel & FieldLevel + misc []byte // misc reusable + str1 string // misc reusable + str2 string // misc reusable + fldIsPointer bool // StructLevel & FieldLevel + isPartial bool + hasExcludes bool +} + +// parent and current will be the same the first run of validateStruct +func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) { + + cs, ok := v.v.structCache.Get(typ) + if !ok { + cs = v.v.extractStructCache(current, typ.Name()) + } + + if len(ns) == 0 && len(cs.name) != 0 { + + ns = append(ns, cs.name...) + ns = append(ns, '.') + + structNs = append(structNs, cs.name...) + structNs = append(structNs, '.') + } + + // ct is nil on top level struct, and structs as fields that have no tag info + // so if nil or if not nil and the structonly tag isn't present + if ct == nil || ct.typeof != typeStructOnly { + + var f *cField + + for i := 0; i < len(cs.fields); i++ { + + f = cs.fields[i] + + if v.isPartial { + + if v.ffn != nil { + // used with StructFiltered + if v.ffn(append(structNs, f.name...)) { + continue + } + + } else { + // used with StructPartial & StructExcept + _, ok = v.includeExclude[string(append(structNs, f.name...))] + + if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) { + continue + } + } + } + + v.traverseField(ctx, parent, current.Field(f.idx), ns, structNs, f, f.cTags) + } + } + + // check if any struct level validations, after all field validations already checked. + // first iteration will have no info about nostructlevel tag, and is checked prior to + // calling the next iteration of validateStruct called from traverseField. + if cs.fn != nil { + + v.slflParent = parent + v.slCurrent = current + v.ns = ns + v.actualNs = structNs + + cs.fn(ctx, v) + } +} + +// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options +func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) { + + var typ reflect.Type + var kind reflect.Kind + + current, kind, v.fldIsPointer = v.extractTypeInternal(current, false) + + switch kind { + case reflect.Ptr, reflect.Interface, reflect.Invalid: + + if ct == nil { + return + } + + if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault { + return + } + + if ct.hasTag { + + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + if kind == reflect.Invalid { + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + param: ct.param, + kind: kind, + }, + ) + + return + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: current.Type(), + }, + ) + + return + } + + case reflect.Struct: + + typ = current.Type() + + if typ != timeType { + + if ct != nil { + + if ct.typeof == typeStructOnly { + goto CONTINUE + } else if ct.typeof == typeIsDefault { + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !ct.fn(ctx, v) { + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + return + } + } + + ct = ct.next + } + + if ct != nil && ct.typeof == typeNoStructLevel { + return + } + + CONTINUE: + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, current, current, typ, ns, structNs, ct) + return + } + } + + if !ct.hasTag { + return + } + + typ = current.Type() + +OUTER: + for { + if ct == nil { + return + } + + switch ct.typeof { + + case typeOmitEmpty: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !v.fldIsPointer && !hasValue(v) { + return + } + + ct = ct.next + continue + + case typeEndKeys: + return + + case typeDive: + + ct = ct.next + + // traverse slice or map here + // or panic ;) + switch kind { + case reflect.Slice, reflect.Array: + + var i64 int64 + reusableCF := &cField{} + + for i := 0; i < current.Len(); i++ { + + i64 = int64(i) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct) + } + + case reflect.Map: + + var pv string + reusableCF := &cField{} + + for _, key := range current.MapKeys() { + + pv = fmt.Sprintf("%v", key.Interface()) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + + if ct != nil && ct.typeof == typeKeys && ct.keys != nil { + v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys) + // can be nil when just keys being validated + if ct.next != nil { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next) + } + } else { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct) + } + } + + default: + // throw error, if not a slice or map then should not have gotten here + // bad dive tag + panic("dive error! can't dive on a non slice or map") + } + + return + + case typeOr: + + v.misc = v.misc[0:0] + + for { + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if ct.fn(ctx, v) { + + // drain rest of the 'or' values, then continue or leave + for { + + ct = ct.next + + if ct == nil { + return + } + + if ct.typeof != typeOr { + continue OUTER + } + } + } + + v.misc = append(v.misc, '|') + v.misc = append(v.misc, ct.tag...) + + if ct.hasParam { + v.misc = append(v.misc, '=') + v.misc = append(v.misc, ct.param...) + } + + if ct.isBlockEnd || ct.next == nil { + // if we get here, no valid 'or' value and no more tags + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + if ct.hasAlias { + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.actualAliasTag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + + } else { + + tVal := string(v.misc)[1:] + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tVal, + actualTag: tVal, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + } + + return + } + + ct = ct.next + } + + default: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !ct.fn(ctx, v) { + + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + + return + } + ct = ct.next + } + } + +} diff --git a/backend/vendor/gopkg.in/go-playground/validator.v9/validator_instance.go b/backend/vendor/gopkg.in/go-playground/validator.v9/validator_instance.go new file mode 100644 index 00000000..fc9db5a3 --- /dev/null +++ b/backend/vendor/gopkg.in/go-playground/validator.v9/validator_instance.go @@ -0,0 +1,600 @@ +package validator + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + ut "github.com/go-playground/universal-translator" +) + +const ( + defaultTagName = "validate" + utf8HexComma = "0x2C" + utf8Pipe = "0x7C" + tagSeparator = "," + orSeparator = "|" + tagKeySeparator = "=" + structOnlyTag = "structonly" + noStructLevelTag = "nostructlevel" + omitempty = "omitempty" + isdefault = "isdefault" + skipValidationTag = "-" + diveTag = "dive" + keysTag = "keys" + endKeysTag = "endkeys" + requiredTag = "required" + namespaceSeparator = "." + leftBracket = "[" + rightBracket = "]" + restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}" + restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" + restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" +) + +var ( + timeType = reflect.TypeOf(time.Time{}) + defaultCField = &cField{namesEqual: true} +) + +// FilterFunc is the type used to filter fields using +// StructFiltered(...) function. +// returning true results in the field being filtered/skiped from +// validation +type FilterFunc func(ns []byte) bool + +// CustomTypeFunc allows for overriding or adding custom field type handler functions +// field = field value of the type to return a value to be validated +// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29 +type CustomTypeFunc func(field reflect.Value) interface{} + +// TagNameFunc allows for adding of a custom tag name parser +type TagNameFunc func(field reflect.StructField) string + +// Validate contains the validator settings and cache +type Validate struct { + tagName string + pool *sync.Pool + hasCustomFuncs bool + hasTagNameFunc bool + tagNameFunc TagNameFunc + structLevelFuncs map[reflect.Type]StructLevelFuncCtx + customFuncs map[reflect.Type]CustomTypeFunc + aliases map[string]string + validations map[string]FuncCtx + transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc + tagCache *tagCache + structCache *structCache +} + +// New returns a new instance of 'validate' with sane defaults. +func New() *Validate { + + tc := new(tagCache) + tc.m.Store(make(map[string]*cTag)) + + sc := new(structCache) + sc.m.Store(make(map[reflect.Type]*cStruct)) + + v := &Validate{ + tagName: defaultTagName, + aliases: make(map[string]string, len(bakedInAliases)), + validations: make(map[string]FuncCtx, len(bakedInValidators)), + tagCache: tc, + structCache: sc, + } + + // must copy alias validators for separate validations to be used in each validator instance + for k, val := range bakedInAliases { + v.RegisterAlias(k, val) + } + + // must copy validators for separate validations to be used in each instance + for k, val := range bakedInValidators { + + // no need to error check here, baked in will always be valid + _ = v.registerValidation(k, wrapFunc(val), true) + } + + v.pool = &sync.Pool{ + New: func() interface{} { + return &validate{ + v: v, + ns: make([]byte, 0, 64), + actualNs: make([]byte, 0, 64), + misc: make([]byte, 32), + } + }, + } + + return v +} + +// SetTagName allows for changing of the default tag name of 'validate' +func (v *Validate) SetTagName(name string) { + v.tagName = name +} + +// RegisterTagNameFunc registers a function to get alternate names for StructFields. +// +// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names: +// +// validate.RegisterTagNameFunc(func(fld reflect.StructField) string { +// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0] +// if name == "-" { +// return "" +// } +// return name +// }) +func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) { + v.tagNameFunc = fn + v.hasTagNameFunc = true +} + +// RegisterValidation adds a validation with the given tag +// +// NOTES: +// - if the key already exists, the previous validation function will be replaced. +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterValidation(tag string, fn Func) error { + return v.RegisterValidationCtx(tag, wrapFunc(fn)) +} + +// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation +// allowing context.Context validation support. +func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx) error { + return v.registerValidation(tag, fn, false) +} + +func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool) error { + + if len(tag) == 0 { + return errors.New("Function Key cannot be empty") + } + + if fn == nil { + return errors.New("Function cannot be empty") + } + + _, ok := restrictedTags[tag] + + if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) { + panic(fmt.Sprintf(restrictedTagErr, tag)) + } + + v.validations[tag] = fn + + return nil +} + +// RegisterAlias registers a mapping of a single validation tag that +// defines a common or complex set of validation(s) to simplify adding validation +// to structs. +// +// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterAlias(alias, tags string) { + + _, ok := restrictedTags[alias] + + if ok || strings.ContainsAny(alias, restrictedTagChars) { + panic(fmt.Sprintf(restrictedAliasErr, alias)) + } + + v.aliases[alias] = tags +} + +// RegisterStructValidation registers a StructLevelFunc against a number of types. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) { + v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...) +} + +// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing +// of contextual validation information via context.Context. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) { + + if v.structLevelFuncs == nil { + v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx) + } + + for _, t := range types { + tv := reflect.ValueOf(t) + if tv.Kind() == reflect.Ptr { + t = reflect.Indirect(tv).Interface() + } + + v.structLevelFuncs[reflect.TypeOf(t)] = fn + } +} + +// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types +// +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) { + + if v.customFuncs == nil { + v.customFuncs = make(map[reflect.Type]CustomTypeFunc) + } + + for _, t := range types { + v.customFuncs[reflect.TypeOf(t)] = fn + } + + v.hasCustomFuncs = true +} + +// RegisterTranslation registers translations against the provided tag. +func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) { + + if v.transTagFunc == nil { + v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc) + } + + if err = registerFn(trans); err != nil { + return + } + + m, ok := v.transTagFunc[trans] + if !ok { + m = make(map[string]TranslationFunc) + v.transTagFunc[trans] = m + } + + m[tag] = translationFn + + return +} + +// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) Struct(s interface{}) error { + return v.StructCtx(context.Background(), s) +} + +// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified +// and also allows passing of context.Context for contextual validation information. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) { + + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = false + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error { + return v.StructFilteredCtx(context.Background(), s, fn) +} + +// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified and also allows passing of contextual validation information via +// context.Context +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = fn + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructPartial validates the fields passed in only, ignoring all others. +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartial(s interface{}, fields ...string) error { + return v.StructPartialCtx(context.Background(), s, fields...) +} + +// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual +// validation validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = false + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, k := range fields { + + flds := strings.Split(k, namespaceSeparator) + if len(flds) > 0 { + + vd.misc = append(vd.misc[0:0], name...) + vd.misc = append(vd.misc, '.') + + for _, s := range flds { + + idx := strings.Index(s, leftBracket) + + if idx != -1 { + for idx != -1 { + vd.misc = append(vd.misc, s[:idx]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + + idx2 := strings.Index(s, rightBracket) + idx2++ + vd.misc = append(vd.misc, s[idx:idx2]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + s = s[idx2:] + idx = strings.Index(s, leftBracket) + } + } else { + + vd.misc = append(vd.misc, s...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.misc = append(vd.misc, '.') + } + } + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructExcept validates all fields except the ones passed in. +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExcept(s interface{}, fields ...string) error { + return v.StructExceptCtx(context.Background(), s, fields...) +} + +// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual +// validation validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = true + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, key := range fields { + + vd.misc = vd.misc[0:0] + + if len(name) > 0 { + vd.misc = append(vd.misc, name...) + vd.misc = append(vd.misc, '.') + } + + vd.misc = append(vd.misc, key...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// Var validates a single variable using tag style validation. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) Var(field interface{}, tag string) error { + return v.VarCtx(context.Background(), field, tag) +} + +// VarCtx validates a single variable using tag style validation and allows passing of contextual +// validation validation information via context.Context. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + + ctag := v.fetchCacheTag(tag) + val := reflect.ValueOf(field) + vd := v.pool.Get().(*validate) + vd.top = val + vd.isPartial = false + vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} + +// VarWithValue validates a single variable, against another variable/field's value using tag style validation +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error { + return v.VarWithValueCtx(context.Background(), field, other, tag) +} + +// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and +// allows passing of contextual validation validation information via context.Context. +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + ctag := v.fetchCacheTag(tag) + otherVal := reflect.ValueOf(other) + vd := v.pool.Get().(*validate) + vd.top = otherVal + vd.isPartial = false + vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} diff --git a/backend/vendor/modules.txt b/backend/vendor/modules.txt index 634c337b..99c90a5c 100644 --- a/backend/vendor/modules.txt +++ b/backend/vendor/modules.txt @@ -14,9 +14,9 @@ github.com/gin-gonic/gin/render # github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 github.com/globalsign/mgo github.com/globalsign/mgo/bson +github.com/globalsign/mgo/internal/json github.com/globalsign/mgo/internal/sasl github.com/globalsign/mgo/internal/scram -github.com/globalsign/mgo/internal/json # github.com/go-playground/locales v0.12.1 github.com/go-playground/locales github.com/go-playground/locales/currency @@ -25,19 +25,19 @@ github.com/go-playground/universal-translator # github.com/golang/protobuf v1.3.1 github.com/golang/protobuf/proto # github.com/gomodule/redigo v2.0.0+incompatible -github.com/gomodule/redigo/redis github.com/gomodule/redigo/internal +github.com/gomodule/redigo/redis # github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 github.com/gopherjs/gopherjs/js # github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl -github.com/hashicorp/hcl/hcl/printer github.com/hashicorp/hcl/hcl/ast github.com/hashicorp/hcl/hcl/parser -github.com/hashicorp/hcl/hcl/token -github.com/hashicorp/hcl/json/parser +github.com/hashicorp/hcl/hcl/printer github.com/hashicorp/hcl/hcl/scanner github.com/hashicorp/hcl/hcl/strconv +github.com/hashicorp/hcl/hcl/token +github.com/hashicorp/hcl/json/parser github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token # github.com/json-iterator/go v1.1.6 @@ -69,8 +69,8 @@ github.com/smartystreets/assertions/internal/go-render/render github.com/smartystreets/assertions/internal/oglematchers # github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 github.com/smartystreets/goconvey/convey -github.com/smartystreets/goconvey/convey/reporting github.com/smartystreets/goconvey/convey/gotest +github.com/smartystreets/goconvey/convey/reporting # github.com/spf13/afero v1.1.2 github.com/spf13/afero github.com/spf13/afero/mem diff --git a/frontend/package.json b/frontend/package.json index 5b835a60..724b5e36 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -18,11 +18,12 @@ "@fortawesome/free-solid-svg-icons": "^5.9.0", "@fortawesome/vue-fontawesome": "^0.1.6", "@tinymce/tinymce-vue": "^2.0.0", + "ansi-to-html": "^0.6.13", "axios": "0.18.0", "cross-env": "^5.2.0", "dayjs": "^1.8.6", "echarts": "^4.1.0", - "element-ui": "2.4.6", + "element-ui": "2.13.0", "font-awesome": "^4.7.0", "js-cookie": "2.2.0", "normalize.css": "7.0.0", diff --git a/frontend/src/components/Common/CrawlConfirmDialog.vue b/frontend/src/components/Common/CrawlConfirmDialog.vue index 2286beb2..f2ad70c2 100644 --- a/frontend/src/components/Common/CrawlConfirmDialog.vue +++ b/frontend/src/components/Common/CrawlConfirmDialog.vue @@ -2,13 +2,21 @@
{{$t('Are you sure to run this spider?')}}
- - - + + + + + + + + + + - - + + @@ -335,7 +860,7 @@ export default { .button-group-container { margin-top: 10px; - border-bottom: 1px dashed #dcdfe6; + /*border-bottom: 1px dashed #dcdfe6;*/ padding-bottom: 20px; } @@ -345,7 +870,7 @@ export default { .list-fields-container { margin-top: 20px; - border-bottom: 1px dashed #dcdfe6; + /*border-bottom: 1px dashed #dcdfe6;*/ padding-bottom: 20px; } @@ -369,4 +894,142 @@ export default { .el-table.table-header >>> .el-input .el-input__inner { border-radius: 0; } + + .selector-type-item { + margin: 0 5px; + cursor: pointer; + font-weight: bolder; + } + + .el-tag { + margin-right: 5px; + font-weight: bolder; + cursor: pointer; + } + + .el-tag.inactive { + opacity: 0.5; + } + + .stage-list { + width: 100%; + /*width: calc(80px + 320px);*/ + display: flex; + flex-wrap: wrap; + list-style: none; + margin: 0; + padding: 0; + } + + .stage-list .stage-item { + /*flex-basis: 320px;*/ + min-width: 120px; + display: flex; + align-items: center; + } + + .stage-list .stage-item label { + flex-basis: 90px; + margin-right: 10px; + justify-self: flex-end; + text-align: right; + } + + .stage-list .stage-item .el-input { + flex-basis: calc(100% - 90px); + height: 32px; + } + + .stage-list .stage-item .el-input .el-input__inner { + height: 32px; + inline-size: 32px; + } + + .stage-list .stage-item .action-item { + cursor: pointer; + width: 13px; + margin-right: 5px; + } + + .stage-list .stage-item .action-item:last-child { + margin-right: 10px; + } + + .stage-list .stage-item .text-wrapper { + display: flex; + align-items: center; + max-width: calc(100% - 90px - 10px); + } + + .stage-list .stage-item .text-wrapper .text { + text-overflow: ellipsis; + overflow: hidden; + } + + .stage-list .stage-item .text-wrapper .text:hover { + text-decoration: underline; + } + + .stage-list .stage-item .text-wrapper i { + margin-left: 5px; + } + + .stage-list .stage-item >>> .edit-text { + height: 32px; + line-height: 32px; + } + + .stage-list .stage-item >>> .edit-text .el-input__inner { + height: 32px; + line-height: 32px; + } + + .top-wrapper { + display: flex; + justify-content: space-between; + align-items: center; + } + + .top-wrapper .list { + list-style: none; + display: flex; + flex-wrap: wrap; + align-items: center; + padding: 0; + } + + .top-wrapper .list .item { + margin-bottom: 10px; + display: flex; + align-items: center; + } + + .top-wrapper .list .item label { + width: 100px; + text-align: right; + margin-right: 10px; + font-size: 12px; + } + + .top-wrapper .list .item label + * { + width: 240px; + } + + .invalid >>> .el-input__inner { + border: 1px solid red !important; + } + + #process-chart { + width: 100%; + height: 480px; + } + + .config-list >>> .file-content { + height: calc(100vh - 280px); + } + + .spiderfile-actions { + margin-bottom: 5px; + text-align: right; + } diff --git a/frontend/src/components/File/FileDetail.vue b/frontend/src/components/File/FileDetail.vue index d74f73b3..f5f8a4cc 100644 --- a/frontend/src/components/File/FileDetail.vue +++ b/frontend/src/components/File/FileDetail.vue @@ -18,6 +18,7 @@ import 'codemirror/mode/go/go.js' import 'codemirror/mode/shell/shell.js' import 'codemirror/mode/markdown/markdown.js' import 'codemirror/mode/php/php.js' +import 'codemirror/mode/yaml/yaml.js' export default { name: 'FileDetail', @@ -38,7 +39,7 @@ export default { }, options () { return { - mode: this.lanaguage, + mode: this.language, theme: 'darcula', styleActiveLine: true, lineNumbers: true, @@ -46,8 +47,9 @@ export default { matchBrackets: true } }, - lanaguage () { + language () { const fileName = this.$store.state.file.currentPath + if (!fileName) return '' if (fileName.match(/\.js$/)) { return 'text/javascript' } else if (fileName.match(/\.py$/)) { @@ -60,6 +62,8 @@ export default { return 'text/x-php' } else if (fileName.match(/\.md$/)) { return 'text/x-markdown' + } else if (fileName === 'Spiderfile') { + return 'text/x-yaml' } else { return 'text' } @@ -74,7 +78,7 @@ export default { diff --git a/frontend/src/components/TableView/SettingFieldsTableView.vue b/frontend/src/components/TableView/SettingFieldsTableView.vue new file mode 100644 index 00000000..7ce8a46c --- /dev/null +++ b/frontend/src/components/TableView/SettingFieldsTableView.vue @@ -0,0 +1,283 @@ + + + + + diff --git a/frontend/src/i18n/zh.js b/frontend/src/i18n/zh.js index c56959c9..65170117 100644 --- a/frontend/src/i18n/zh.js +++ b/frontend/src/i18n/zh.js @@ -125,6 +125,8 @@ export default { 'Customized Spider': '自定义爬虫', 'Configurable': '可配置', 'Customized': '自定义', + 'configurable': '可配置', + 'customized': '自定义', 'Text': '文本', 'Attribute': '属性', 'Field Name': '字段名称', @@ -148,6 +150,26 @@ export default { 'List Page Fields': '列表页字段', 'Detail Page Fields': '详情页字段', 'Detail Page URL': '详情页URL', + 'All': '全部', + 'Stages': '阶段', + 'Process': '流程', + 'Stage Process': '流程图', + 'Stage Name': '阶段名称', + 'Start Stage': '开始阶段', + 'Engine': '引擎', + 'Selector Type': '选择器类别', + 'Selector': '选择器', + 'Is Attribute': '是否为属性', + 'Next Stage': '下一阶段', + 'No Next Stage': '没有下一阶段', + 'Fields': '字段', + 'Stage': '阶段', + 'Is List': '是否为列表', + 'List': '列表', + 'Pagination': '分页', + 'Settings': '设置', + 'Display Name': '显示名称', + 'Template': '模版', // 爬虫列表 'Name': '名称', @@ -171,6 +193,9 @@ export default { 'Wait Duration (sec)': '等待时长(秒)', 'Runtime Duration (sec)': '运行时长(秒)', 'Total Duration (sec)': '总时长(秒)', + 'Run Type': '运行类型', + 'Random': '随机', + 'Selected Nodes': '指定节点', // 任务列表 'Node': '节点', diff --git a/frontend/src/store/modules/file.js b/frontend/src/store/modules/file.js index 5cc50acb..66b84651 100644 --- a/frontend/src/store/modules/file.js +++ b/frontend/src/store/modules/file.js @@ -42,12 +42,6 @@ const actions = { .then(response => { commit('SET_FILE_CONTENT', response.data.data) }) - }, - saveFileContent ({ state, rootState }, payload) { - const { path } = payload - const spiderId = rootState.spider.spiderForm._id - const content = state.fileContent - return request.post(`/spiders/${spiderId}/file`, { content, path }) } } diff --git a/frontend/src/store/modules/lang.js b/frontend/src/store/modules/lang.js index b1e57a04..dc6b8d18 100644 --- a/frontend/src/store/modules/lang.js +++ b/frontend/src/store/modules/lang.js @@ -1,5 +1,5 @@ const state = { - lang: window.localStorage.getItem('lang') || 'en' + lang: window.localStorage.getItem('lang') || 'zh' } const getters = { diff --git a/frontend/src/store/modules/spider.js b/frontend/src/store/modules/spider.js index 07a0bac3..f4d7b134 100644 --- a/frontend/src/store/modules/spider.js +++ b/frontend/src/store/modules/spider.js @@ -1,4 +1,6 @@ +import Vue from 'vue' import request from '../../api/request' +import axisModelCommonMixin from 'echarts/src/coord/axisModelCommonMixin' const state = { // list of spiders @@ -34,7 +36,10 @@ const state = { filterSite: '', // preview crawl data - previewCrawlData: [] + previewCrawlData: [], + + // template list + templateList: [] } const getters = {} @@ -72,6 +77,16 @@ const mutations = { }, SET_PREVIEW_CRAWL_DATA (state, value) { state.previewCrawlData = value + }, + SET_SPIDER_FORM_CONFIG_SETTINGS (state, payload) { + const settings = {} + payload.forEach(row => { + settings[row.name] = row.value + }) + Vue.set(state.spiderForm.config, 'settings', settings) + }, + SET_TEMPLATE_LIST (state, value) { + state.templateList = value } } @@ -103,10 +118,11 @@ const actions = { }) }, crawlSpider ({ state, dispatch }, payload) { - const { id, nodeId, param } = payload + const { spiderId, runType, nodeIds, param } = payload return request.put(`/tasks`, { - spider_id: id, - node_id: nodeId, + spider_id: spiderId, + run_type: runType, + node_ids: nodeIds, param: param }) }, @@ -148,6 +164,20 @@ const actions = { }, extractFields ({ state, commit }) { return request.post(`/spiders/${state.spiderForm._id}/extract_fields`) + }, + postConfigSpiderConfig ({ state }) { + return request.post(`/config_spiders/${state.spiderForm._id}/config`, state.spiderForm.config) + }, + saveConfigSpiderSpiderfile ({ state, rootState }) { + const content = rootState.file.fileContent + return request.post(`/config_spiders/${state.spiderForm._id}/spiderfile`, { content }) + }, + addConfigSpider ({ state }) { + return request.put(`/config_spiders`, state.spiderForm) + }, + async getTemplateList ({ state, commit }) { + const res = await request.get(`/config_spiders_templates`) + commit('SET_TEMPLATE_LIST', res.data.data) } } diff --git a/frontend/src/views/schedule/ScheduleList.vue b/frontend/src/views/schedule/ScheduleList.vue index b170c9ed..3a032b23 100644 --- a/frontend/src/views/schedule/ScheduleList.vue +++ b/frontend/src/views/schedule/ScheduleList.vue @@ -274,7 +274,7 @@ export default { // 爬虫列表 request.get('/spiders', {}) .then(response => { - this.spiderList = response.data.data.list + this.spiderList = response.data.data.list || [] }) } } diff --git a/frontend/src/views/spider/SpiderDetail.vue b/frontend/src/views/spider/SpiderDetail.vue index b42e750d..a743e47d 100644 --- a/frontend/src/views/spider/SpiderDetail.vue +++ b/frontend/src/views/spider/SpiderDetail.vue @@ -13,8 +13,8 @@ - - + + @@ -48,6 +48,13 @@ export default { FileList, SpiderOverview }, + watch: { + activeTabName () { + // 初始化文件 + this.$store.commit('file/SET_FILE_CONTENT', '') + this.$store.commit('file/SET_CURRENT_PATH', '') + } + }, data () { return { activeTabName: 'overview' @@ -77,6 +84,10 @@ export default { setTimeout(() => { this.$refs['spider-stats'].update() }, 0) + } else if (this.activeTabName === 'config') { + setTimeout(() => { + this.$refs['config'].update() + }, 0) } this.$st.sendEv('爬虫详情', '切换标签', tab.name) }, @@ -85,19 +96,26 @@ export default { this.$st.sendEv('爬虫详情', '切换爬虫') } }, - created () { + async created () { // get the list of the spiders // this.$store.dispatch('spider/getSpiderList') // get spider basic info - this.$store.dispatch('spider/getSpiderData', this.$route.params.id) - .then(() => { - // get spider file info - this.$store.dispatch('file/getFileList', this.spiderForm.src) - }) + await this.$store.dispatch('spider/getSpiderData', this.$route.params.id) + + // get spider file info + await this.$store.dispatch('file/getFileList', this.spiderForm.src) // get spider tasks - this.$store.dispatch('spider/getTaskList', this.$route.params.id) + await this.$store.dispatch('spider/getTaskList', this.$route.params.id) + + // get spider list + await this.$store.dispatch('spider/getSpiderList') + + // if spider is configurable spider, set to config tab by default + if (this.spiderForm.type === 'configurable') { + this.activeTabName = 'config' + } } } diff --git a/frontend/src/views/spider/SpiderList.vue b/frontend/src/views/spider/SpiderList.vue index eb1e548f..78c87a36 100644 --- a/frontend/src/views/spider/SpiderList.vue +++ b/frontend/src/views/spider/SpiderList.vue @@ -33,18 +33,50 @@ width="40%" :visible.sync="addDialogVisible" :before-close="onAddDialogClose"> -
-
- - {{$t('Configurable Spider')}} - -
-
- - {{$t('Customized Spider')}} - -
-
+ + + + + + + + + + + + + + + + + + +
+ {{$t('Add')}} +
+
+ + + + + {{$t('Upload')}} + + + + + +
@@ -81,19 +113,7 @@ width="40%" :visible.sync="addCustomizedDialogVisible" :before-close="onAddCustomizedDialogClose"> - - - - {{$t('Upload')}} - - - - + @@ -110,17 +130,24 @@
- - - - - + + + + + + + + + {{$t('Search')}} + +
@@ -133,16 +160,19 @@ @click="onAdd"> {{$t('Add Spider')}} - - {{$t('Refresh')}} - +
+ + + + + + + + @@ -248,7 +281,7 @@ import { import dayjs from 'dayjs' import CrawlConfirmDialog from '../../components/Common/CrawlConfirmDialog' import StatusTag from '../../components/Status/StatusTag' -import request from '../../api/request' + export default { name: 'SpiderList', components: { @@ -272,10 +305,9 @@ export default { activeSpiderId: undefined, filter: { keyword: '', - type: '' + type: 'all' }, types: [], - // tableData, columns: [ { name: 'display_name', label: 'Name', width: '160', align: 'left' }, { name: 'type', label: 'Spider Type', width: '120' }, @@ -287,7 +319,8 @@ export default { spiderFormRules: { name: [{ required: true, message: 'Required Field', trigger: 'change' }] }, - fileList: [] + fileList: [], + spiderType: 'configurable' } }, computed: { @@ -295,7 +328,8 @@ export default { 'importForm', 'spiderList', 'spiderForm', - 'spiderTotal' + 'spiderTotal', + 'templateList' ]), ...mapGetters('user', [ 'token' @@ -318,14 +352,26 @@ export default { this.getList() }, onAdd () { - // this.addDialogVisible = true - this.onAddCustomized() + this.$store.commit('spider/SET_SPIDER_FORM', { + template: this.templateList[0] + }) + this.addDialogVisible = true }, onAddConfigurable () { - this.$store.commit('spider/SET_SPIDER_FORM', {}) - this.addDialogVisible = false - this.addConfigurableDialogVisible = true - this.$st.sendEv('爬虫', '添加爬虫-可配置爬虫') + this.$refs['addConfigurableForm'].validate(async res => { + if (!res) return + + let res2 + try { + res2 = await this.$store.dispatch('spider/addConfigSpider') + } catch (e) { + this.$message.error(this.$t('Something wrong happened')) + return + } + await this.$store.dispatch('spider/getSpiderList') + this.$router.push(`/spiders/${res2.data.data._id}`) + this.$st.sendEv('爬虫', '添加爬虫-可配置爬虫') + }) }, onAddCustomized () { this.addDialogVisible = false @@ -374,7 +420,8 @@ export default { this.$store.commit('spider/SET_SPIDER_FORM', row) this.dialogVisible = true }, - onRemove (row) { + onRemove (row, ev) { + ev.stopPropagation() this.$confirm(this.$t('Are you sure to delete this spider?'), this.$t('Notification'), { confirmButtonText: this.$t('Confirm'), cancelButtonText: this.$t('Cancel'), @@ -390,12 +437,14 @@ export default { this.$st.sendEv('爬虫', '删除') }) }, - onCrawl (row) { + onCrawl (row, ev) { + ev.stopPropagation() this.crawlConfirmDialogVisible = true this.activeSpiderId = row._id this.$st.sendEv('爬虫', '点击运行') }, - onView (row) { + onView (row, ev) { + ev.stopPropagation() this.$router.push('/spiders/' + row._id) this.$st.sendEv('爬虫', '查看') }, @@ -483,10 +532,12 @@ export default { if (!str || str.match('^0001')) return 'NA' return dayjs(str).format('YYYY-MM-DD HH:mm:ss') }, - onRowClick (row, event, column) { - if (column.label !== this.$t('Action')) { - this.onView(row) - } + onRowClick (row, column, event) { + this.onView(row, event) + }, + onClickTab (tab) { + this.filter.type = tab.name + this.getList() }, getList () { let params = { @@ -496,19 +547,29 @@ export default { type: this.filter.type } this.$store.dispatch('spider/getSpiderList', params) - }, - getTypes () { - request.get(`/spider/types`).then(resp => { - this.types = resp.data.data - }) } + // getTypes () { + // request.get(`/spider/types`).then(resp => { + // this.types = resp.data.data + // }) + // } }, - created () { - this.getTypes() + async created () { + // fetch spider types + // await this.getTypes() + // fetch spider list - this.getList() + await this.getList() + + // fetch template list + await this.$store.dispatch('spider/getTemplateList') }, mounted () { + console.log(this.spiderForm) + const vm = this + this.$nextTick(() => { + vm.$store.commit('spider/SET_SPIDER_FORM', this.spiderForm) + }) } } @@ -594,4 +655,8 @@ export default { .el-table >>> tr { cursor: pointer; } + + .actions { + text-align: right; + } diff --git a/frontend/src/views/task/TaskList.vue b/frontend/src/views/task/TaskList.vue index 9db3623d..3ab19fee 100644 --- a/frontend/src/views/task/TaskList.vue +++ b/frontend/src/views/task/TaskList.vue @@ -125,7 +125,7 @@ - + @@ -250,7 +250,8 @@ export default { onSelectSpider () { this.$st.sendEv('任务', '选择爬虫') }, - onRemove (row) { + onRemove (row, ev) { + ev.stopPropagation() this.$confirm(this.$t('Are you sure to delete this task?'), this.$t('Notification'), { confirmButtonText: this.$t('Confirm'), cancelButtonText: this.$t('Cancel'), diff --git a/frontend/vue.config.js b/frontend/vue.config.js index c867140e..f5de4ac5 100644 --- a/frontend/vue.config.js +++ b/frontend/vue.config.js @@ -1,3 +1,4 @@ module.exports = { publicPath: process.env.BASE_URL || '/' + // TODO: need to configure output static files with hash } diff --git a/frontend/yarn.lock b/frontend/yarn.lock index 12175086..ef361b2b 100644 --- a/frontend/yarn.lock +++ b/frontend/yarn.lock @@ -1168,6 +1168,13 @@ ansi-styles@^3.2.0, ansi-styles@^3.2.1: dependencies: color-convert "^1.9.0" +ansi-to-html@^0.6.13: + version "0.6.13" + resolved "https://registry.yarnpkg.com/ansi-to-html/-/ansi-to-html-0.6.13.tgz#c72eae8b63e5ca0643aab11bfc6e6f2217425833" + integrity sha512-Ys2/umuaTlQvP9DLkaa7UzRKF2FLrfod/hNHXS9QhXCrw7seObG6ksOGmNz3UoK+adwM8L9vQfG7mvaxfJ3Jvw== + dependencies: + entities "^1.1.2" + anymatch@^2.0.0: version "2.0.0" resolved "http://registry.npm.taobao.org/anymatch/download/anymatch-2.0.0.tgz#bcb24b4f37934d9aa7ac17b4adaf89e7c76ef2eb" @@ -2947,9 +2954,10 @@ electron-to-chromium@^1.3.103: version "1.3.113" resolved "http://registry.npm.taobao.org/electron-to-chromium/download/electron-to-chromium-1.3.113.tgz#b1ccf619df7295aea17bc6951dc689632629e4a9" -element-ui@2.4.6: - version "2.4.6" - resolved "https://registry.yarnpkg.com/element-ui/-/element-ui-2.4.6.tgz#524d3d4cac0b68745dda87311ef0d8fe541b5fc4" +element-ui@2.13.0: + version "2.13.0" + resolved "https://registry.npm.taobao.org/element-ui/download/element-ui-2.13.0.tgz?cache=0&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Felement-ui%2Fdownload%2Felement-ui-2.13.0.tgz#f6bb04e5b0a76ea5f62466044b774407ba4ebd2d" + integrity sha1-9rsE5bCnbqX2JGYES3dEB7pOvS0= dependencies: async-validator "~1.8.1" babel-helper-vue-jsx-merge-props "^2.0.0" @@ -3008,7 +3016,7 @@ enhanced-resolve@^4.1.0: memory-fs "^0.4.0" tapable "^1.0.0" -entities@^1.1.1: +entities@^1.1.1, entities@^1.1.2: version "1.1.2" resolved "http://registry.npm.taobao.org/entities/download/entities-1.1.2.tgz#bdfa735299664dfafd34529ed4f8522a275fea56" diff --git a/jenkins/master/docker-compose.yaml b/jenkins/master/docker-compose.yaml index 7309829d..1b7a476b 100644 --- a/jenkins/master/docker-compose.yaml +++ b/jenkins/master/docker-compose.yaml @@ -3,7 +3,7 @@ services: master: image: "tikazyq/crawlab:master" environment: - CRAWLAB_API_ADDRESS: "crawlab.cn/api" + CRAWLAB_API_ADDRESS: "http://crawlab.cn/api" CRAWLAB_BASE_URL: "/demo" CRAWLAB_SERVER_MASTER: "Y" CRAWLAB_MONGO_HOST: "mongo" @@ -27,10 +27,14 @@ services: mongo: image: mongo:latest restart: always + volumes: + - "/opt/crawlab/mongo/data/db:/data/db" ports: - "27017:27017" redis: image: redis:latest restart: always + volumes: + - "/opt/crawlab/redis/data:/data" ports: - "6379:6379" \ No newline at end of file