Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,72 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"archive_test.go",
"dao_test.go",
"es_test.go",
"log_test.go",
"mng_test.go",
"mng_v2_test.go",
"query_extra_test.go",
"query_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/admin/main/search/conf:go_default_library",
"//app/admin/main/search/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
"//vendor/gopkg.in/olivere/elastic.v5:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"archive.go",
"dao.go",
"es.go",
"log.go",
"mng.go",
"mng_v2.go",
"query.go",
"query_extra.go",
],
importpath = "go-common/app/admin/main/search/dao",
tags = ["automanaged"],
deps = [
"//app/admin/main/search/conf:go_default_library",
"//app/admin/main/search/model:go_default_library",
"//library/database/sql:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/stat/prom:go_default_library",
"//library/sync/errgroup:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/gopkg.in/olivere/elastic.v5:go_default_library",
],
)

View File

@@ -0,0 +1,401 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"time"
"go-common/app/admin/main/search/model"
"gopkg.in/olivere/elastic.v5"
)
// ArchiveCheck search archive check from ES.
func (d *Dao) ArchiveCheck(c context.Context, p *model.ArchiveCheckParams) (res *model.SearchResult, err error) {
query := elastic.NewBoolQuery()
if len(p.Bsp.KWs) > 0 {
for _, v := range p.Bsp.KWs {
if p.Bsp.Pattern == "equal" {
query = query.Must(elastic.NewMultiMatchQuery(v, p.Bsp.KwFields...).Type("best_fields").TieBreaker(0.3).MinimumShouldMatch("100%"))
} else {
query = query.Should(elastic.NewMultiMatchQuery(v, p.Bsp.KwFields...).Type("best_fields").TieBreaker(0.3).MinimumShouldMatch("80%")).MinimumNumberShouldMatch(1)
}
}
} else if p.Bsp.KW != "" { //高级搜索比下面的高
query = query.Must(elastic.NewMultiMatchQuery(p.Bsp.KW, p.Bsp.KwFields...).Type("best_fields").TieBreaker(0.3).MinimumShouldMatch("100%"))
}
if p.FromIP != "" {
query = query.Must(elastic.NewQueryStringQuery("*" + p.FromIP + "*").AllowLeadingWildcard(true).Field("from_ip"))
}
if len(p.Aids) > 0 {
interfaceSlice := make([]interface{}, len(p.Aids))
for i, d := range p.Aids {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("aid", interfaceSlice...))
}
if len(p.TypeIds) > 0 {
interfaceSlice := make([]interface{}, len(p.TypeIds))
for i, d := range p.TypeIds {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("typeid", interfaceSlice...))
}
if len(p.Attrs) > 0 {
interfaceSlice := make([]interface{}, len(p.Attrs))
for i, d := range p.Attrs {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("attribute", interfaceSlice...))
}
if len(p.States) > 0 {
interfaceSlice := make([]interface{}, len(p.States))
for i, d := range p.States {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("state", interfaceSlice...))
}
if len(p.Mids) > 0 {
interfaceSlice := make([]interface{}, len(p.Mids))
for i, d := range p.Mids {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("mid", interfaceSlice...))
}
if p.MidFrom > 0 {
query = query.Filter(elastic.NewRangeQuery("mid").Gte(p.MidFrom))
}
if p.MidTo > 0 {
query = query.Filter(elastic.NewRangeQuery("mid").Lte(p.MidTo))
}
if p.DurationFrom > 0 {
query = query.Filter(elastic.NewRangeQuery("duration").Gte(p.DurationFrom))
}
if p.DurationTo > 0 {
query = query.Filter(elastic.NewRangeQuery("duration").Lte(p.DurationTo))
}
if p.TimeFrom != "" && (p.Time == "ctime" || p.Time == "mtime" || p.Time == "pubtime") {
query = query.Filter(elastic.NewRangeQuery(p.Time).Gte(p.TimeFrom))
}
if p.TimeTo != "" && (p.Time == "ctime" || p.Time == "mtime" || p.Time == "pubtime") {
query = query.Filter(elastic.NewRangeQuery(p.Time).Lte(p.TimeTo))
}
if res, err = d.searchResult(c, "ssd_archive", "archivecheck", query, p.Bsp); err != nil {
PromError(fmt.Sprintf("es:%s ", p.Bsp.AppID), "%v", err)
}
return
}
// Video search video from ES (deprecated).
func (d *Dao) Video(c context.Context, p *model.VideoParams) (res *model.SearchResult, err error) {
query := elastic.NewBoolQuery()
if p.Bsp.KW != "" {
query = query.Must(elastic.NewMultiMatchQuery(p.Bsp.KW, p.Bsp.KwFields...).Type("best_fields").TieBreaker(0.3))
}
if len(p.VIDs) > 0 {
interfaceSlice := make([]interface{}, len(p.VIDs))
for i, d := range p.VIDs {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("vid", interfaceSlice...))
}
if len(p.AIDs) > 0 {
interfaceSlice := make([]interface{}, len(p.AIDs))
for i, d := range p.AIDs {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("aid", interfaceSlice...))
}
if len(p.CIDs) > 0 {
interfaceSlice := make([]interface{}, len(p.CIDs))
for i, d := range p.CIDs {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("cid", interfaceSlice...))
}
if len(p.TIDs) > 0 {
interfaceSlice := make([]interface{}, len(p.TIDs))
for i, d := range p.TIDs {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("arc_typeid", interfaceSlice...))
}
if len(p.FileNames) > 0 {
interfaceSlice := make([]interface{}, len(p.FileNames))
for i, d := range p.FileNames {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("filename", interfaceSlice...))
}
if len(p.RelationStates) > 0 {
interfaceSlice := make([]interface{}, len(p.RelationStates))
for i, d := range p.RelationStates {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("relation_state", interfaceSlice...))
}
if len(p.ArcMids) > 0 {
interfaceSlice := make([]interface{}, len(p.ArcMids))
for i, d := range p.ArcMids {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("arc_mid", interfaceSlice...))
}
if len(p.ArcMids) > 0 {
interfaceSlice := make([]interface{}, len(p.ArcMids))
for i, d := range p.ArcMids {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("arc_mid", interfaceSlice...))
}
if p.TagID > 0 {
query = query.Filter(elastic.NewTermQuery("tag_id", p.TagID))
}
if len(p.Status) > 0 {
interfaceSlice := make([]interface{}, len(p.Status))
for i, d := range p.Status {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("status", interfaceSlice...))
}
if len(p.XCodeState) > 0 {
interfaceSlice := make([]interface{}, len(p.XCodeState))
for i, d := range p.XCodeState {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("xcode_state", interfaceSlice...))
}
// 不再查库过滤arc_mid
if p.UserType > 0 {
query = query.Filter(elastic.NewTermQuery("user_type", p.UserType))
}
if p.DurationFrom > 0 {
query = query.Filter(elastic.NewRangeQuery("duration").Gte(p.DurationFrom))
}
if p.DurationTo > 0 {
query = query.Filter(elastic.NewRangeQuery("duration").Lte(p.DurationTo))
}
if p.OrderType == 1 {
diffs := time.Now().Unix() - 1420041600
days := fmt.Sprintf("%dd", diffs/(3600*24))
score := elastic.NewFunctionScoreQuery().Add(elastic.NewTermQuery("user_type", 1), elastic.NewExponentialDecayFunction().FieldName("arc_senddate").Origin("2015-01-01 00:00:00").Scale(days).Offset("1d").Decay(0.8).Weight(float64(10000))).Add(nil, elastic.NewExponentialDecayFunction().FieldName("arc_senddate").Origin("2015-01-01 00:00:00").Scale(days).Offset("1d").Decay(0.8).Weight(float64(1)))
query = query.Must(score)
p.Bsp.Order = []string{}
}
if res, err = d.searchResult(c, "ssd_archive", "archive_video", query, p.Bsp); err != nil {
PromError(fmt.Sprintf("es:%s ", p.Bsp.AppID), "%v", err)
}
return
}
// TaskQa .
func (d *Dao) TaskQa(c context.Context, p *model.TaskQa) (res *model.SearchResult, err error) {
query := elastic.NewBoolQuery()
if p.Bsp.KW != "" {
query = query.Must(elastic.NewMultiMatchQuery(p.Bsp.KW, p.Bsp.KwFields...).Type("best_fields").TieBreaker(0.3))
}
if len(p.Ids) > 0 {
interfaceSlice := make([]interface{}, len(p.Ids))
for i, d := range p.Ids {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("id", interfaceSlice...))
}
if len(p.TaskIds) > 0 {
interfaceSlice := make([]interface{}, len(p.TaskIds))
for i, d := range p.TaskIds {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("task_id", interfaceSlice...))
}
if len(p.Uids) > 0 {
interfaceSlice := make([]interface{}, len(p.Uids))
for i, d := range p.Uids {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("uid", interfaceSlice...))
}
if len(p.ArcTagIds) > 0 {
interfaceSlice := make([]interface{}, len(p.ArcTagIds))
for i, d := range p.ArcTagIds {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("arc_tagid", interfaceSlice...))
}
if len(p.AuditTagIds) > 0 {
interfaceSlice := make([]interface{}, len(p.AuditTagIds))
for i, d := range p.AuditTagIds {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("audit_tagid", interfaceSlice...))
}
if len(p.UpGroups) > 0 {
interfaceSlice := make([]interface{}, len(p.UpGroups))
for i, d := range p.UpGroups {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("up_groups", interfaceSlice...))
}
if len(p.ArcTitles) > 0 {
interfaceSlice := make([]interface{}, len(p.ArcTitles))
for i, d := range p.ArcTitles {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("arc_title", interfaceSlice...))
}
if len(p.ArcTypeIds) > 0 {
interfaceSlice := make([]interface{}, len(p.ArcTypeIds))
for i, d := range p.ArcTypeIds {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("arc_typeid", interfaceSlice...))
}
if len(p.States) > 0 {
interfaceSlice := make([]interface{}, len(p.States))
for i, d := range p.States {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("state", interfaceSlice...))
}
if len(p.AuditStatuses) > 0 {
interfaceSlice := make([]interface{}, len(p.AuditStatuses))
for i, d := range p.AuditStatuses {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("audit_status", interfaceSlice...))
}
if p.FansFrom != "" {
query = query.Filter(elastic.NewRangeQuery("fans").Gte(p.FansFrom))
}
if p.FansTo != "" {
query = query.Filter(elastic.NewRangeQuery("fans").Lte(p.FansTo))
}
if p.CtimeFrom != "" {
query = query.Filter(elastic.NewRangeQuery("ctime").Gte(p.CtimeFrom))
}
if p.CtimeTo != "" {
query = query.Filter(elastic.NewRangeQuery("ctime").Lte(p.CtimeTo))
}
if p.FtimeFrom != "" {
query = query.Filter(elastic.NewRangeQuery("ftime").Gte(p.FtimeFrom))
}
if p.FtimeTo != "" {
query = query.Filter(elastic.NewRangeQuery("ftime").Lte(p.FtimeTo))
}
if res, err = d.searchResult(c, "ssd_archive", p.Bsp.AppID, query, p.Bsp); err != nil {
PromError(fmt.Sprintf("es:%s ", p.Bsp.AppID), "%v", err)
}
return
}
// ArchiveCommerce .
func (d *Dao) ArchiveCommerce(c context.Context, p *model.ArchiveCommerce) (res *model.SearchResult, err error) {
query := elastic.NewBoolQuery()
if p.Bsp.KW != "" {
query = query.Must(elastic.NewMultiMatchQuery(p.Bsp.KW, p.Bsp.KwFields...).Type("best_fields").TieBreaker(0.3))
}
if len(p.Ids) > 0 {
interfaceSlice := make([]interface{}, len(p.Ids))
for i, d := range p.Ids {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("id", interfaceSlice...))
}
if len(p.Mids) > 0 {
interfaceSlice := make([]interface{}, len(p.Mids))
for i, d := range p.Mids {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("mid", interfaceSlice...))
}
if len(p.PTypeIds) > 0 {
interfaceSlice := make([]interface{}, len(p.PTypeIds))
for i, d := range p.PTypeIds {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("ptypeid", interfaceSlice...))
}
if len(p.TypeIds) > 0 {
interfaceSlice := make([]interface{}, len(p.TypeIds))
for i, d := range p.TypeIds {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("typeid", interfaceSlice...))
}
if len(p.States) > 0 {
interfaceSlice := make([]interface{}, len(p.States))
for i, d := range p.States {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("state", interfaceSlice...))
}
if len(p.Copyrights) > 0 {
interfaceSlice := make([]interface{}, len(p.Copyrights))
for i, d := range p.Copyrights {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("copyright", interfaceSlice...))
}
if len(p.OrderIds) > 0 {
interfaceSlice := make([]interface{}, len(p.OrderIds))
for i, d := range p.OrderIds {
interfaceSlice[i] = d
}
query = query.Filter(elastic.NewTermsQuery("order_id", interfaceSlice...))
}
if p.IsOrder == 1 {
query = query.Filter(elastic.NewRangeQuery("order_id").Gt(0))
}
if p.IsOrder == 0 {
query = query.MustNot(elastic.NewRangeQuery("order_id").Gt(0))
}
if p.IsOriginal == 1 {
query = query.Filter(elastic.NewTermsQuery("copyright", 1))
}
if p.IsOriginal == 0 {
query = query.MustNot(elastic.NewTermsQuery("copyright", 1))
}
if p.Action == "get_ptypeids" {
if res, err = d.ArchiveCommercePTypeIds(c, query); err != nil {
PromError(fmt.Sprintf("es:%s ", p.Bsp.AppID), "%v", err)
}
return
}
if res, err = d.searchResult(c, "ssd_archive", "archive_commerce_v", query, p.Bsp); err != nil {
PromError(fmt.Sprintf("es:%s ", p.Bsp.AppID), "%v", err)
}
return
}
// ArchiveCommercePTypeIds .
func (d *Dao) ArchiveCommercePTypeIds(c context.Context, query *elastic.BoolQuery) (res *model.SearchResult, err error) {
res = &model.SearchResult{
Result: []json.RawMessage{},
Page: &model.Page{},
}
aggs := elastic.NewTermsAggregation()
aggs = aggs.Field("ptypeid").Size(1000)
if _, ok := d.esPool["ssd_archive"]; !ok {
PromError(fmt.Sprintf("es:集群不存在%s", "ssd_archive"), "s.dao.searchResult indexName:%s", "ssd_archive")
res = &model.SearchResult{Debug: fmt.Sprintf("es:集群不存在%s, %s", "ssd_archive", res.Debug)}
return
}
searchResult, err := d.esPool["ssd_archive"].Search().Index("archive_commerce_v").Query(query).Aggregation("group_by_ptypeid", aggs).Size(0).Do(context.Background())
if err != nil {
PromError(fmt.Sprintf("es:执行查询失败%s ", "ArchiveCommercePTypeIds"), "dao.log.ArchiveCommercePTypeIds(%v)", err)
return
}
result, ok := searchResult.Aggregations.Terms("group_by_ptypeid")
if !ok {
PromError(fmt.Sprintf("es:Unmarshal%s ", "log"), "es:Unmarshal%v", err)
return
}
for _, v := range result.Buckets {
res.Result = append(res.Result, []byte(v.Key.(string)))
}
res.Page.Pn = 1
res.Page.Ps = 1000
res.Page.Total = int64(len(res.Result))
return
}

View File

@@ -0,0 +1,151 @@
package dao
import (
"context"
"go-common/app/admin/main/search/model"
"testing"
"github.com/smartystreets/goconvey/convey"
"gopkg.in/olivere/elastic.v5"
)
func TestDaoArchiveCheck(t *testing.T) {
convey.Convey("ArchiveCheck", t, func(ctx convey.C) {
var (
c = context.Background()
p = &model.ArchiveCheckParams{
Bsp: &model.BasicSearchParams{},
Aids: []int64{0},
TypeIds: []int64{0},
Attrs: []int64{0},
States: []int64{0},
Mids: []int64{0},
MidFrom: 1,
MidTo: 1,
DurationFrom: 1,
DurationTo: 1,
TimeFrom: "0001-01-01 00:00:00",
TimeTo: "0001-01-01 00:00:00",
Time: "ctime",
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.ArchiveCheck(c, p)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoVideo(t *testing.T) {
convey.Convey("Video", t, func(ctx convey.C) {
var (
c = context.Background()
p = &model.VideoParams{
Bsp: &model.BasicSearchParams{},
VIDs: []int64{0},
AIDs: []int64{0},
CIDs: []int64{0},
TIDs: []int64{0},
FileNames: []string{""},
RelationStates: []int64{0},
ArcMids: []int64{0},
TagID: 1,
Status: []int64{0},
XCodeState: []int64{0},
UserType: 0,
DurationFrom: 1,
DurationTo: 1,
OrderType: 1,
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.Video(c, p)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoTaskQa(t *testing.T) {
convey.Convey("TaskQa", t, func(ctx convey.C) {
var (
c = context.Background()
p = &model.TaskQa{
Bsp: &model.BasicSearchParams{
AppID: "task_qa",
},
Ids: []int64{0},
TaskIds: []string{""},
Uids: []string{""},
ArcTagIds: []string{""},
AuditTagIds: []int64{0},
UpGroups: []string{""},
ArcTitles: []string{""},
ArcTypeIds: []string{""},
States: []string{""},
AuditStatuses: []string{""},
FansFrom: "0",
FansTo: "0",
CtimeFrom: "0001-01-01 00:00:00",
CtimeTo: "0001-01-01 00:00:00",
FtimeFrom: "0001-01-01 00:00:00",
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.TaskQa(c, p)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoArchiveCommerce(t *testing.T) {
convey.Convey("ArchiveCommerce", t, func(ctx convey.C) {
var (
c = context.Background()
p = &model.ArchiveCommerce{
Bsp: &model.BasicSearchParams{},
Ids: []string{"0"},
Mids: []string{"0"},
PTypeIds: []string{"0"},
TypeIds: []string{"0"},
States: []string{"0"},
Copyrights: []string{"0"},
OrderIds: []string{"0"},
IsOrder: 1,
IsOriginal: 1,
Action: "get_ptypeids",
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.ArchiveCommerce(c, p)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoArchiveCommercePTypeIds(t *testing.T) {
convey.Convey("ArchiveCommercePTypeIds", t, func(ctx convey.C) {
var (
c = context.Background()
query = &elastic.BoolQuery{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.ArchiveCommercePTypeIds(c, query)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,111 @@
package dao
import (
"context"
"go-common/app/admin/main/search/conf"
"go-common/library/database/sql"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/stat/prom"
"go-common/library/sync/errgroup"
"gopkg.in/olivere/elastic.v5"
)
const (
_managerDep = "/x/admin/manager/users/udepts"
_managerUnames = "/x/admin/manager/users/unames"
_managerIP = "/x/location/infos"
)
// Dao .
type Dao struct {
c *conf.Config
esPool map[string]*elastic.Client
db *sql.DB
client *bm.Client
managerDep string
managerUnames string
managerIP string
queryConfStmt *sql.Stmt
}
// New init dao
func New(c *conf.Config) (d *Dao) {
d = &Dao{
c: c,
db: sql.NewMySQL(c.DB.Search),
client: bm.NewClient(c.HTTPClient),
managerDep: c.Prop.Manager + _managerDep,
managerUnames: c.Prop.Manager + _managerUnames,
managerIP: c.Prop.API + _managerIP,
}
d.esPool = newEsPool(c, d)
d.NewLog()
go d.NewLogProcess()
d.queryConfStmt = d.db.Prepared(_queryConfSQL)
return
}
// BulkItem .
type BulkItem interface {
IndexName() string
IndexType() string
IndexID() string
}
// BulkMapItem .
type BulkMapItem interface {
IndexName() string
IndexType() string
IndexID() string
PField() map[string]interface{}
}
// newEsCluster cluster action
func newEsPool(c *conf.Config, d *Dao) (esCluster map[string]*elastic.Client) {
esCluster = make(map[string]*elastic.Client)
for esName, e := range c.Es {
cof := []elastic.ClientOptionFunc{}
cof = append(cof, elastic.SetURL(e.Addr...))
if esName == "ops_log" {
cof = append(cof, elastic.SetSniff(false))
}
client, err := elastic.NewClient(cof...)
if err != nil {
PromError("es:集群连接失败", "cluster: %s, %v", esName, err)
continue
}
esCluster[esName] = client
}
return
}
// PromError prometheus error count.
func PromError(name, format string, args ...interface{}) {
prom.BusinessErrCount.Incr(name)
log.Error(format, args...)
}
// Ping health
func (d *Dao) Ping(c context.Context) (err error) {
group := errgroup.Group{}
group.Go(func() (err error) {
err = d.db.Ping(context.Background())
if err != nil {
PromError("DB:Ping", "DB:Ping error(%v)", err)
}
return
})
for name, client := range d.esPool {
group.Go(func() (err error) {
_, _, err = client.Ping(d.c.Es[name].Addr[0]).Do(context.Background())
if err != nil {
PromError("Es:Ping", "%s:Ping error(%v)", name, err)
}
return
})
}
return group.Wait()
}

View File

@@ -0,0 +1,34 @@
package dao
import (
"flag"
"go-common/app/admin/main/search/conf"
"os"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.search.search-admin")
flag.Set("conf_token", "TH0EaGzhzup2cfu0S7yE7qUhZg1aYxlR")
flag.Set("tree_id", "7627")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/search-admin-test.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}

View File

@@ -0,0 +1,505 @@
package dao
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"strings"
"go-common/app/admin/main/search/model"
"go-common/library/log"
"gopkg.in/olivere/elastic.v5"
)
// UpdateMapBulk (Deprecated).
func (d *Dao) UpdateMapBulk(c context.Context, esName string, bulkData []BulkMapItem) (err error) {
bulkRequest := d.esPool[esName].Bulk()
for _, b := range bulkData {
request := elastic.NewBulkUpdateRequest().Index(b.IndexName()).Type(b.IndexType()).Id(b.IndexID()).Doc(b.PField()).DocAsUpsert(true)
bulkRequest.Add(request)
}
if _, err = bulkRequest.Do(c); err != nil {
log.Error("esName(%s) bulk error(%v)", esName, err)
}
return
}
// UpdateBulk (Deprecated).
func (d *Dao) UpdateBulk(c context.Context, esName string, bulkData []BulkItem) (err error) {
bulkRequest := d.esPool[esName].Bulk()
for _, b := range bulkData {
request := elastic.NewBulkUpdateRequest().Index(b.IndexName()).Type(b.IndexType()).Id(b.IndexID()).Doc(b).DocAsUpsert(true)
bulkRequest.Add(request)
}
if _, err = bulkRequest.Do(c); err != nil {
log.Error("esName(%s) bulk error(%v)", esName, err)
}
return
}
// UpsertBulk 为了替换UpdateMapBulk和UpdateBulk .
func (d *Dao) UpsertBulk(c context.Context, esCluster string, up *model.UpsertParams) (err error) {
es, ok := d.esPool[esCluster]
if !ok {
log.Error("esCluster(%s) not exists", esCluster)
return
}
bulkRequest := es.Bulk()
for _, b := range up.UpsertBody {
request := elastic.NewBulkUpdateRequest().Index(b.IndexName).Type(b.IndexType).Id(b.IndexID).Doc(b.Doc)
if up.Insert {
request.DocAsUpsert(true)
}
//fmt.Println(request)
bulkRequest.Add(request)
}
if _, err = bulkRequest.Do(c); err != nil {
log.Error("esCluster(%s) bulk error(%v)", esCluster, err)
}
return
}
// searchResult get result from ES. (Deprecated) v3迁移完要删掉.
func (d *Dao) searchResult(c context.Context, esClusterName, indexName string, query elastic.Query, bsp *model.BasicSearchParams) (res *model.SearchResult, err error) {
res = &model.SearchResult{Debug: ""}
if bsp.Debug {
if src, e := query.Source(); e == nil {
if data, er := json.Marshal(src); er == nil {
res = &model.SearchResult{Debug: string(data)}
} else {
err = er
log.Error("searchResult query.Source.json.Marshal error(%v)", err)
return
}
} else {
err = e
log.Error("searchResult query.Source error(%v)", err)
return
}
}
if _, ok := d.esPool[esClusterName]; !ok {
PromError(fmt.Sprintf("es:集群不存在%s", esClusterName), "s.dao.searchResult indexName:%s", indexName)
res = &model.SearchResult{Debug: fmt.Sprintf("es:集群不存在%s, %s", esClusterName, res.Debug)}
return
}
// multi sort
sorterSlice := []elastic.Sorter{}
if bsp.KW != "" && bsp.ScoreFirst {
sorterSlice = append(sorterSlice, elastic.NewScoreSort().Desc())
}
for i, d := range bsp.Order {
if len(bsp.Sort) < i+1 {
if bsp.Sort[0] == "desc" {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(d).Desc())
} else {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(d).Asc())
}
} else {
if bsp.Sort[i] == "desc" {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(d).Desc())
} else {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(d).Asc())
}
}
}
if bsp.KW != "" && !bsp.ScoreFirst {
sorterSlice = append(sorterSlice, elastic.NewScoreSort().Desc())
}
// source
fsc := elastic.NewFetchSourceContext(true).Include(bsp.Source...)
// highlight
hl := elastic.NewHighlight()
if bsp.Highlight && len(bsp.KwFields) > 0 {
for _, v := range bsp.KwFields {
hl = hl.Fields(elastic.NewHighlighterField(v))
}
hl = hl.PreTags("<em class=\"keyword\">").PostTags("</em>")
}
// from + size = 10,000
from := (bsp.Pn - 1) * bsp.Ps
size := bsp.Ps
if (from + size) > 10000 {
from = 10000 - size
}
// do
searchResult, err := d.esPool[esClusterName].
Search().Index(indexName).
Highlight(hl).
Query(query).
SortBy(sorterSlice...).
From(from).
Size(size).
Pretty(true).
FetchSourceContext(fsc).
Do(context.Background())
if err != nil {
PromError(fmt.Sprintf("es:执行查询失败%s ", esClusterName), "%v", err)
res = &model.SearchResult{Debug: res.Debug + "es:执行查询失败"}
return
}
var data []json.RawMessage
b := bytes.Buffer{}
b.WriteString("{")
b.WriteString("}")
for _, hit := range searchResult.Hits.Hits {
var t json.RawMessage
e := json.Unmarshal(*hit.Source, &t)
if e != nil {
PromError(fmt.Sprintf("es:%s 索引有脏数据", esClusterName), "s.dao.SearchArchiveCheck(%d,%d) error(%v) ", bsp.Pn*bsp.Ps, bsp.Ps, e)
continue
}
data = append(data, t)
// highlight
if len(hit.Highlight) > 0 {
b, _ := json.Marshal(hit.Highlight)
h := []byte(string(b))
data = append(data, h)
} else if bsp.Highlight {
data = append(data, b.Bytes()) //保证在高亮情况下,肯定有一对数据
}
}
if len(data) == 0 {
data = []json.RawMessage{}
}
res = &model.SearchResult{
Order: strings.Join(bsp.Order, ","),
Sort: strings.Join(bsp.Sort, ","),
Result: data,
Debug: res.Debug,
Page: &model.Page{
Pn: bsp.Pn,
Ps: bsp.Ps,
Total: searchResult.Hits.TotalHits,
},
}
return
}
// QueryResult query result from ES.
func (d *Dao) QueryResult(c context.Context, query elastic.Query, sp *model.QueryParams, qbDebug *model.QueryDebugResult) (res *model.QueryResult, qrDebug *model.QueryDebugResult, err error) {
qrDebug = &model.QueryDebugResult{}
if qbDebug != nil {
qrDebug = qbDebug
}
esCluster := sp.AppIDConf.ESCluster
if _, ok := d.esPool[esCluster]; !ok {
qrDebug.AddErrMsg("es:集群不存在" + esCluster)
return
}
if sp.DebugLevel != 0 {
qrDebug.Mapping, err = d.esPool[esCluster].GetMapping().Index(sp.QueryBody.From).Do(context.Background())
}
// 低级别debug在dsl执行前退出
if sp.DebugLevel == 1 {
return
}
// multi sort
sorterSlice := []elastic.Sorter{}
if len(sp.QueryBody.Where.Like) > 0 && sp.QueryBody.OrderScoreFirst { // like 长度 > 0但里面是空的也是个问题
sorterSlice = append(sorterSlice, elastic.NewScoreSort().Desc())
}
for _, i := range sp.QueryBody.Order {
for k, v := range i {
if v == "asc" {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(k).Asc())
} else {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(k).Desc())
}
}
}
if len(sp.QueryBody.Where.Like) > 0 && sp.QueryBody.OrderScoreFirst {
sorterSlice = append(sorterSlice, elastic.NewScoreSort().Desc())
}
// source
fsc := elastic.NewFetchSourceContext(true).Include(sp.QueryBody.Fields...)
// highlight
hl := elastic.NewHighlight()
if sp.QueryBody.Highlight && len(sp.QueryBody.Where.Like) > 0 {
for _, v := range sp.QueryBody.Where.Like {
for _, field := range v.KWFields {
hl = hl.Fields(elastic.NewHighlighterField(field))
}
}
hl = hl.PreTags("<em class=\"keyword\">").PostTags("</em>")
}
// from + size = 10,000
maxRows := 10000
if b, ok := model.PermConf["oht"][sp.Business]; ok && b == "true" {
maxRows = 100000
}
from := (sp.QueryBody.Pn - 1) * sp.QueryBody.Ps
size := sp.QueryBody.Ps
if (from + size) > maxRows {
from = maxRows - size
}
// Scroll
if sp.QueryBody.Scroll == true {
var (
tList []json.RawMessage
tLen int
ScrollID = ""
)
res = &model.QueryResult{}
esCluster := sp.AppIDConf.ESCluster
eSearch, ok := d.esPool[esCluster]
if !ok {
PromError(fmt.Sprintf("es:集群不存在%s", esCluster), "s.dao.searchResult indexName:%s", esCluster)
return
}
fsc := elastic.NewFetchSourceContext(true).Include(sp.QueryBody.Fields...)
// multi sort
sorterSlice := []elastic.Sorter{}
if len(sp.QueryBody.Where.Like) > 0 && sp.QueryBody.OrderScoreFirst { // like 长度 > 0但里面是空的也是个问题
sorterSlice = append(sorterSlice, elastic.NewScoreSort().Desc())
}
for _, i := range sp.QueryBody.Order {
for k, v := range i {
if v == "asc" {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(k).Asc())
} else {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(k).Desc())
}
}
}
if len(sp.QueryBody.Where.Like) > 0 && !sp.QueryBody.OrderScoreFirst {
sorterSlice = append(sorterSlice, elastic.NewScoreSort().Desc())
}
for {
searchResult, err := eSearch.Scroll().Index(sp.QueryBody.From).
Query(query).FetchSourceContext(fsc).Size(sp.QueryBody.Ps).Scroll("1m").ScrollId(ScrollID).SortBy(sorterSlice...).Do(c)
if err == io.EOF {
break
} else if err != nil {
PromError(fmt.Sprintf("es:执行查询失败%s ", "Scroll"), "es:执行查询失败%v", err)
break
}
ScrollID = searchResult.ScrollId
for _, hit := range searchResult.Hits.Hits {
var t json.RawMessage
if err = json.Unmarshal(*hit.Source, &t); err != nil {
PromError(fmt.Sprintf("es:Unmarshal%s ", "Scroll"), "es:Unmarshal%v", err)
break
}
tList = append(tList, t)
tLen++
if tLen >= sp.QueryBody.Pn*sp.QueryBody.Ps {
goto ClearScroll
}
}
}
ClearScroll:
go eSearch.ClearScroll().ScrollId(ScrollID).Do(context.Background())
if res.Result, err = json.Marshal(tList); err != nil {
PromError(fmt.Sprintf("es:Unmarshal%s ", "Scroll"), "es:Unmarshal%v", err)
return
}
return
}
// do
searchPrepare := d.esPool[esCluster].
Search().Index(sp.QueryBody.From).
Highlight(hl).
Query(query).
SortBy(sorterSlice...).
From(from).
Size(size).
FetchSourceContext(fsc).IgnoreUnavailable(true).AllowNoIndices(true)
if ec, ok := model.PermConf["es_cache"][sp.Business]; ok && ec == "true" {
searchPrepare.RequestCache(true)
}
if rt, ok := model.PermConf["routing"][sp.Business]; ok {
routing := make([]string, 0, 1)
if sp.QueryBody.Where.EQ != nil {
if eq, ok := sp.QueryBody.Where.EQ[rt]; ok {
routing = append(routing, fmt.Sprintf("%v", eq))
}
}
if sp.QueryBody.Where.In != nil {
if in, ok := sp.QueryBody.Where.In[rt]; ok {
for _, v := range in {
routing = append(routing, fmt.Sprintf("%v", v))
}
}
}
if len(routing) == 0 {
qrDebug.AddErrMsg("es:路由不存在" + rt)
return
}
searchPrepare.Routing(routing...)
}
if sp.DebugLevel == 2 {
searchPrepare.Profile(true)
}
// Enhanced
for _, v := range sp.QueryBody.Where.Enhanced {
aggKey := v.Mode + "_" + v.Field
switch v.Mode {
case model.EnhancedModeGroupBy:
aggs := elastic.NewTermsAggregation()
aggs = aggs.Field(v.Field).Size(1000) //要和业务方确定具体值
searchPrepare.Aggregation(aggKey, aggs)
case model.EnhancedModeCollapse, model.EnhancedModeDistinct:
collapse := elastic.NewCollapseBuilder(v.Field).MaxConcurrentGroupRequests(1)
innerHit := elastic.NewInnerHit().Name("last_one").Size(1)
for _, v := range v.Order {
for field, sort := range v {
if sort == "desc" {
innerHit.Sort(field, false)
} else {
innerHit.Sort(field, true)
}
}
}
if len(v.Order) > 0 {
collapse.InnerHit(innerHit)
}
searchPrepare.Collapse(collapse)
case model.EnhancedModeSum:
aggs := elastic.NewSumAggregation()
aggs = aggs.Field(v.Field)
searchPrepare.Aggregation(aggKey, aggs)
case model.EnhancedModeDistinctCount:
aggs := elastic.NewCardinalityAggregation()
aggs = aggs.Field(v.Field)
searchPrepare.Aggregation(aggKey, aggs)
}
}
searchResult, err := searchPrepare.Do(context.Background())
if err != nil {
qrDebug.AddErrMsg(fmt.Sprintf("es:执行查询失败%s. %v", esCluster, err))
PromError(fmt.Sprintf("es:执行查询失败%s ", esCluster), "%v", err)
return
}
// data
data := json.RawMessage{}
docHits := []json.RawMessage{}
docBuckets := map[string][]map[string]*json.RawMessage{}
b := bytes.Buffer{}
b.WriteString("{")
b.WriteString("}")
for _, hit := range searchResult.Hits.Hits {
var t json.RawMessage
e := json.Unmarshal(*hit.Source, &t)
if e != nil {
PromError(fmt.Sprintf("es:%s 索引有脏数据", esCluster), "s.dao.SearchArchiveCheck(%d,%d) error(%v) ", sp.QueryBody.Pn*sp.QueryBody.Ps, sp.QueryBody.Ps, e)
continue
}
docHits = append(docHits, t)
// highlight
if len(hit.Highlight) > 0 {
b, _ := json.Marshal(hit.Highlight)
docHits = append(docHits, b)
} else if sp.QueryBody.Highlight {
docHits = append(docHits, b.Bytes()) //保证在高亮情况下,肯定有一对数据
}
}
if len(docHits) > 0 {
if doc, er := json.Marshal(docHits); er != nil {
qrDebug.AddErrMsg(fmt.Sprintf("es:Unmarshal docHits es:Unmarshal%v ", er))
PromError(fmt.Sprintf("es:Unmarshal%s ", "docHits"), "es:Unmarshal%v", er)
} else {
data = doc
}
} else {
h := bytes.Buffer{}
h.WriteString("[")
h.WriteString("]")
data = h.Bytes()
}
// data overwrite
for _, v := range sp.QueryBody.Where.Enhanced {
key := v.Mode + "_" + v.Field
switch v.Mode {
case model.EnhancedModeGroupBy:
result, ok := searchResult.Aggregations.Terms(key)
if !ok {
PromError(fmt.Sprintf("es:Unmarshal%s ", key), "es:Unmarshal%v", err)
continue
}
for _, b := range result.Buckets {
docBuckets[key] = append(docBuckets[key], b.Aggregations)
}
data = b.Bytes() //保证无数据情况下,有正常返回
case model.EnhancedModeSum:
result, ok := searchResult.Aggregations.Sum(key)
if !ok {
PromError(fmt.Sprintf("es:Unmarshal%s ", key), "es:Unmarshal%v", err)
continue
}
docBuckets[key] = append(docBuckets[key], result.Aggregations)
data = b.Bytes() //保证无数据情况下,有正常返回
case model.EnhancedModeDistinctCount:
result, ok := searchResult.Aggregations.Cardinality(key)
if !ok {
PromError(fmt.Sprintf("es:Unmarshal%s ", key), "es:Unmarshal%v", err)
continue
}
docBuckets[key] = append(docBuckets[key], result.Aggregations)
data = b.Bytes() //保证无数据情况下,有正常返回
default:
// other modes...
}
}
if len(docBuckets) > 0 {
if doc, er := json.Marshal(docBuckets); er != nil {
qrDebug.AddErrMsg(fmt.Sprintf("es:Unmarshal docBuckets es:Unmarshal%v", er))
PromError(fmt.Sprintf("es:Unmarshal%s ", "docBuckets"), "es:Unmarshal%v", er)
} else {
data = doc
}
}
order := []string{}
sort := []string{}
for _, i := range sp.QueryBody.Order {
for k, v := range i {
order = append(order, k)
sort = append(sort, v)
}
}
res = &model.QueryResult{
Order: strings.Join(order, ","),
Sort: strings.Join(sort, ","),
Result: data,
Page: &model.Page{
Pn: sp.QueryBody.Pn,
Ps: sp.QueryBody.Ps,
Total: searchResult.Hits.TotalHits,
},
}
//默认的debug高级别debug在dsl执行后退出
if sp.DebugLevel == 2 {
qrDebug.Profile = searchResult.Profile
return
}
return
}
// BulkIndex .
func (d *Dao) BulkIndex(c context.Context, esName string, bulkData []BulkItem) (err error) {
bulkRequest := d.esPool[esName].Bulk()
for _, b := range bulkData {
request := elastic.NewBulkIndexRequest().Index(b.IndexName()).Type(b.IndexType()).Id(b.IndexID()).Doc(b)
bulkRequest.Add(request)
}
if _, err = bulkRequest.Do(c); err != nil {
log.Error("esName(%s) bulk error(%v)", esName, err)
}
return
}
// ExistIndex .
func (d *Dao) ExistIndex(c context.Context, esClusterName, indexName string) (exist bool, err error) {
if _, ok := d.esPool[esClusterName]; !ok {
PromError(fmt.Sprintf("es:集群不存在%s", esClusterName), "s.dao.searchResult indexName:%s", indexName)
err = fmt.Errorf("集群不存在")
return
}
exist, err = d.esPool[esClusterName].IndexExists(indexName).Do(c)
return
}

View File

@@ -0,0 +1,136 @@
package dao
import (
"context"
"go-common/app/admin/main/search/model"
"testing"
"github.com/smartystreets/goconvey/convey"
"gopkg.in/olivere/elastic.v5"
)
func TestDaoUpdateMapBulk(t *testing.T) {
convey.Convey("UpdateMapBulk", t, func(ctx convey.C) {
var (
c = context.Background()
esName = ""
bulkData = []BulkMapItem{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//err :=
d.UpdateMapBulk(c, esName, bulkData)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoUpdateBulk(t *testing.T) {
convey.Convey("UpdateBulk", t, func(ctx convey.C) {
var (
c = context.Background()
esName = ""
bulkData = []BulkItem{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//err :=
d.UpdateBulk(c, esName, bulkData)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoUpsertBulk(t *testing.T) {
convey.Convey("UpsertBulk", t, func(ctx convey.C) {
var (
c = context.Background()
esCluster = ""
up = &model.UpsertParams{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.UpsertBulk(c, esCluster, up)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaosearchResult(t *testing.T) {
convey.Convey("searchResult", t, func(ctx convey.C) {
var (
c = context.Background()
esClusterName = ""
indexName = ""
query elastic.Query
bsp = &model.BasicSearchParams{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.searchResult(c, esClusterName, indexName, query, bsp)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoQueryResult(t *testing.T) {
convey.Convey("QueryResult", t, func(ctx convey.C) {
var (
c = context.Background()
query elastic.Query
sp = &model.QueryParams{
AppIDConf: &model.QueryConfDetail{
ESCluster: "",
},
}
qbDebug = &model.QueryDebugResult{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, qrDebug, err := d.QueryResult(c, query, sp, qbDebug)
ctx.Convey("Then err should be nil.res,qrDebug should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(qrDebug, convey.ShouldNotBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoBulkIndex(t *testing.T) {
convey.Convey("BulkIndex", t, func(ctx convey.C) {
var (
c = context.Background()
esName = ""
bulkData = []BulkItem{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//err :=
d.BulkIndex(c, esName, bulkData)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoExistIndex(t *testing.T) {
convey.Convey("ExistIndex", t, func(ctx convey.C) {
var (
c = context.Background()
esClusterName = ""
indexName = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
exist, _ := d.ExistIndex(c, esClusterName, indexName)
ctx.Convey("Then err should be nil.exist should not be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
ctx.So(exist, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,474 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"time"
"go-common/app/admin/main/search/model"
"go-common/library/log"
"github.com/pkg/errors"
"gopkg.in/olivere/elastic.v5"
)
const (
_sql = "SELECT id, name, index_format, index_cluster, additional_mapping, permission_point FROM digger_"
_count = "INSERT INTO digger_count (`business`,`type`,`time`,`count`) values (?, 'inc', ?, 1) ON DUPLICATE KEY UPDATE count=count+1"
_percent = "INSERT INTO digger_count (`business`,`type`,`time`,`name`,`count`) values (?, 'inc', ?, ?, 1) ON DUPLICATE KEY UPDATE count=count+1"
)
var (
logAuditBusiness map[int]*model.Business
logUserActionBusiness map[int]*model.Business
)
func (d *Dao) NewLogProcess() {
for {
if err := d.NewLog(); err != nil {
time.Sleep(time.Second)
continue
}
time.Sleep(time.Minute)
}
}
// NewLog .
func (d *Dao) NewLog() (err error) {
if logAuditBusiness, err = d.initMapping("log_audit"); err != nil {
return
}
for k, v := range logAuditBusiness {
if _, ok := d.esPool[v.IndexCluster]; !ok {
log.Error("logAudit esPool no exist(%v)", k)
delete(logAuditBusiness, k)
}
}
if logUserActionBusiness, err = d.initMapping("log_user_action"); err != nil {
return
}
for k, v := range logUserActionBusiness {
if _, ok := d.esPool[v.IndexCluster]; !ok {
log.Error("logUserAction esPool no exist(%v)", k)
delete(logUserActionBusiness, k)
}
}
return
}
// GetLogInfo .
func (d *Dao) GetLogInfo(appID string, id int) (business *model.Business, ok bool) {
switch appID {
case "log_audit":
business, ok = logAuditBusiness[id]
return
case "log_user_action":
business, ok = logUserActionBusiness[id]
return
}
return &model.Business{}, false
}
func (d *Dao) initMapping(appID string) (business map[int]*model.Business, err error) {
defaultMapping := map[string]string{}
switch appID {
case "log_audit":
defaultMapping = model.LogAuditDefaultMapping
case "log_user_action":
defaultMapping = model.LogUserActionDefaultMapping
}
business = map[int]*model.Business{}
rows, err := d.db.Query(context.Background(), _sql+appID)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
var value = &model.Business{
AppID: appID,
Mapping: map[string]string{},
}
if err = rows.Scan(&value.ID, &value.Name, &value.IndexFormat, &value.IndexCluster, &value.AdditionalMapping, &value.PermissionPoint); err != nil {
log.Error("Log New DB (%v)(%v)", appID, err)
continue
}
if appID == "log_audit" {
value.IndexCluster = "log"
}
for k, v := range defaultMapping {
value.Mapping[k] = v
}
if value.AdditionalMapping != "" {
var additionalMappingDict map[string]string
if err = json.Unmarshal([]byte(value.AdditionalMapping), &additionalMappingDict); err != nil {
log.Error("Log New Json (%v)(%v)", value.ID, err)
continue
}
for k, v := range additionalMappingDict {
value.Mapping[k] = v
}
}
business[value.ID] = value
}
err = rows.Err()
return
}
/**
获取es索引名 多个用逗号分隔
按日分最多7天
按周分最多2月
按月分最多6月
按年分最多3年
*/
func (d *Dao) logIndexName(c context.Context, p *model.LogParams, business *model.Business) (res string, err error) {
var (
sTime = time.Now()
eTime = time.Now()
resArr []string
)
if p.CTimeFrom != "" {
sTime, err = time.Parse("2006-01-02 15:04:05", p.CTimeFrom)
if err != nil {
log.Error("d.LogAuditIndexName(%v)", p.CTimeFrom)
return
}
}
if p.CTimeTo != "" {
eTime, err = time.Parse("2006-01-02 15:04:05", p.CTimeTo)
if err != nil {
log.Error("d.LogAuditIndexName(p.CTimeTo)(%v)", p.CTimeTo)
return
}
}
resDict := map[string]bool{}
if strings.Contains(business.IndexFormat, "02") {
for a := 0; a <= 60; a++ {
resDict[getLogAuditIndexName(p.Business, business.AppID, business.IndexFormat, eTime)] = true
eTime = eTime.AddDate(0, 0, -1)
if (p.CTimeFrom == "" && a >= 1) || (p.CTimeFrom != "" && sTime.After(eTime)) {
break
}
}
} else if strings.Contains(business.IndexFormat, "week") {
for a := 0; a <= 366; a++ {
resDict[getLogAuditIndexName(p.Business, business.AppID, business.IndexFormat, eTime)] = true
eTime = eTime.AddDate(0, 0, -1)
if (p.CTimeFrom == "" && a >= 1) || (p.CTimeFrom != "" && sTime.After(eTime)) {
resDict[getLogAuditIndexName(p.Business, business.AppID, business.IndexFormat, sTime)] = true
break
}
}
} else if strings.Contains(business.IndexFormat, "01") {
// 1月31日时AddDate(0, -1, 0)会出现错误
year, month, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, month, 1, hour, min, sec, 0, eTime.Location())
for a := 0; a <= 360; a++ {
resDict[getLogAuditIndexName(p.Business, business.AppID, business.IndexFormat, eTime)] = true
eTime = eTime.AddDate(0, -1, 0)
if (p.CTimeFrom == "" && a >= 1) || p.CTimeFrom != "" && sTime.After(eTime) {
break
}
}
} else if strings.Contains(business.IndexFormat, "2006") {
// 2月29日时AddDate(-1, 0, 0)会出现错误
year, _, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, 1, 1, hour, min, sec, 0, eTime.Location())
for a := 0; a <= 100; a++ {
resDict[getLogAuditIndexName(p.Business, business.AppID, business.IndexFormat, eTime)] = true
eTime = eTime.AddDate(-1, 0, 0)
if (p.CTimeFrom == "" && a >= 1) || (p.CTimeFrom != "" && sTime.After(eTime)) {
break
}
}
} else if business.IndexFormat == "all" {
resDict[getLogAuditIndexName(p.Business, business.AppID, business.IndexFormat, eTime)] = true
}
for k := range resDict {
if exist, e := d.ExistIndex(c, business.IndexCluster, k); exist && e == nil {
resArr = append(resArr, k)
}
}
res = strings.Join(resArr, ",")
return
}
func getLogAuditIndexName(business int, indexName string, format string, time time.Time) (index string) {
var (
week = map[int]string{
0: "0107",
1: "0815",
2: "1623",
3: "2431",
}
)
format = strings.Replace(time.Format(format), "week", week[time.Day()/8], -1)
index = indexName + "_" + strconv.Itoa(business) + "_" + format
return
}
func (d *Dao) getQuery(pr map[string][]interface{}, indexMapping map[string]string) (query *elastic.BoolQuery) {
query = elastic.NewBoolQuery()
for k, t := range indexMapping {
switch t {
case "int", "int64":
if v, ok := pr[k]; ok {
query = query.Filter(elastic.NewTermsQuery(k, v...))
}
if v, ok := pr[k+"_from"]; ok {
query = query.Filter(elastic.NewRangeQuery(k).Gte(v[0]))
}
if v, ok := pr[k+"_to"]; ok {
query = query.Filter(elastic.NewRangeQuery(k).Lte(v[0]))
}
case "string":
if v, ok := pr[k]; ok {
query = query.Filter(elastic.NewTermsQuery(k, v...))
}
if v, ok := pr[k+"_like"]; ok {
likeMap := []model.QueryBodyWhereLike{
{
KWFields: []string{k},
KW: []string{fmt.Sprintf("%v", v)},
Level: model.LikeLevelHigh,
},
}
if o, e := d.queryBasicLike(likeMap, ""); e == nil {
query = query.Must(o...)
}
}
case "time":
if v, ok := pr[k+"_from"]; ok {
query = query.Filter(elastic.NewRangeQuery(k).Gte(v[0]))
}
if v, ok := pr[k+"_to"]; ok {
query = query.Filter(elastic.NewRangeQuery(k).Lte(v[0]))
}
case "int_to_bin":
if v, ok := pr[k]; ok {
var arr []elastic.Query
for _, i := range v {
item, err := strconv.ParseUint(i.(string), 10, 64)
if err != nil {
break
}
arr = append(arr, elastic.NewTermsQuery(k, 1<<(item-1)))
}
query = query.Filter(arr...)
}
case "array":
if v, ok := pr[k+"_and"]; ok {
for _, n := range v {
query = query.Filter(elastic.NewTermsQuery(k, n))
}
}
if v, ok := pr[k+"_or"]; ok {
query = query.Filter(elastic.NewTermsQuery(k, v...))
}
}
}
return query
}
// LogAudit .
func (d *Dao) LogAudit(c context.Context, pr map[string][]interface{}, sp *model.LogParams, business *model.Business) (res *model.SearchResult, err error) {
var indexName string
res = &model.SearchResult{
Result: []json.RawMessage{},
Page: &model.Page{},
}
query := d.getQuery(pr, business.Mapping)
indexName, err = d.logIndexName(c, sp, business)
if err != nil {
log.Error("d.LogAudit.logIndexName(%v)(%v)", err, indexName)
return
}
if indexName == "" {
return
}
if res, err = d.searchResult(c, business.IndexCluster, indexName, query, sp.Bsp); err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Bsp.AppID), "%v", err)
}
return
}
// LogAuditGroupBy .
func (d *Dao) LogAuditGroupBy(c context.Context, pr map[string][]interface{}, sp *model.LogParams, business *model.Business) (res *model.SearchResult, err error) {
res = &model.SearchResult{
Result: []json.RawMessage{},
Page: &model.Page{},
}
var (
indexName = ""
searchResult *elastic.SearchResult
)
group := pr["group"][0].(string)
if _, ok := d.esPool[business.IndexCluster]; !ok {
PromError(fmt.Sprintf("es:集群不存在%s", "LogAuditGroupBy"), "s.dao.LogAuditGroupBy indexName:%s", "LogAuditGroupBy")
return
}
query := d.getQuery(pr, business.Mapping)
indexName, err = d.logIndexName(c, sp, business)
if err != nil {
log.Error("d.LogAuditGroupBy.logIndexName(%v)(%v)", err, indexName)
return
}
if indexName == "" {
return
}
collapse := elastic.NewCollapseBuilder(group).MaxConcurrentGroupRequests(1)
searchResult, err = d.esPool[business.IndexCluster].Search().Index(indexName).Type("base").Query(query).
Sort("ctime", false).Collapse(collapse).Size(1000).Do(c)
if err != nil {
log.Error("d.LogAuditGroupBy(%v)", err)
return
}
for _, hit := range searchResult.Hits.Hits {
var t json.RawMessage
err = json.Unmarshal(*hit.Source, &t)
if err != nil {
log.Error("es:%s 返回不是json!!!", business.IndexCluster)
return
}
res.Result = append(res.Result, t)
}
res.Page.Ps = sp.Bsp.Ps
res.Page.Pn = sp.Bsp.Pn
res.Page.Total = int64(len(res.Result))
return
}
// LogAuditDelete .
func (d *Dao) LogAuditDelete(c context.Context, pr map[string][]interface{}, sp *model.LogParams, business *model.Business) (res *model.SearchResult, err error) {
var (
indexName string
searchResult *elastic.BulkIndexByScrollResponse
)
res = &model.SearchResult{
Result: []json.RawMessage{},
Page: &model.Page{},
}
query := d.getQuery(pr, business.Mapping)
indexName, err = d.logIndexName(c, sp, business)
if err != nil {
log.Error("d.LogAuditDelete.logIndexName(%v)(%v)", err, indexName)
return
}
if indexName == "" {
return
}
searchResult, err = d.esPool[business.IndexCluster].DeleteByQuery().Index(indexName).Type("base").Query(query).Size(10000).Do(c)
if err != nil {
log.Error("d.LogAuditDelete.DeleteByQuery(%v)(%v)", err, indexName)
return
}
res.Page.Total = searchResult.Total
return
}
// LogUserAction .
func (d *Dao) LogUserAction(c context.Context, pr map[string][]interface{}, sp *model.LogParams, business *model.Business) (res *model.SearchResult, err error) {
var indexName string
res = &model.SearchResult{
Result: []json.RawMessage{},
Page: &model.Page{},
}
query := d.getQuery(pr, business.Mapping)
indexName, err = d.logIndexName(c, sp, business)
if err != nil {
log.Error("d.LogUserAction.logIndexName(%v)(%v)", err, indexName)
return
}
if indexName == "" {
return
}
if res, err = d.searchResult(c, business.IndexCluster, indexName, query, sp.Bsp); err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Bsp.AppID), "%v", err)
}
return
}
// LogUserActionDelete .
func (d *Dao) LogUserActionDelete(c context.Context, pr map[string][]interface{}, sp *model.LogParams, business *model.Business) (res *model.SearchResult, err error) {
var (
indexName string
searchResult *elastic.BulkIndexByScrollResponse
)
res = &model.SearchResult{
Result: []json.RawMessage{},
Page: &model.Page{},
}
query := d.getQuery(pr, business.Mapping)
indexName, err = d.logIndexName(c, sp, business)
if err != nil {
log.Error("d.LogUserActionDelete.logIndexName(%v)(%v)", err, indexName)
return
}
if indexName == "" {
return
}
searchResult, err = d.esPool[business.IndexCluster].DeleteByQuery().Index(indexName).Type("base").Query(query).Size(10000).Do(c)
if err != nil {
log.Error("d.LogUserActionDelete.DeleteByQuery(%v)(%v)", err, indexName)
return
}
res.Page.Total = searchResult.Total
return
}
// UDepTs .
func (d *Dao) UDepTs(c context.Context, uids []string) (res *model.UDepTsData, err error) {
params := url.Values{}
params.Set("uids", strings.Join(uids, ","))
if err = d.client.Get(c, d.managerDep, "", params, &res); err != nil {
err = errors.Wrapf(err, "d.httpSearch url(%s)", d.managerDep+"?"+params.Encode())
log.Error("d.httpSearch url(%s)", d.managerDep+"?"+params.Encode())
return
}
if res.Code != 0 {
err = errors.Wrapf(err, "response url(%s) code(%d)", d.managerDep+"?"+params.Encode(), res.Code)
log.Error("response url(%s) code(%d)", d.managerDep+"?"+params.Encode(), res.Code)
return
}
return
}
// IP .
func (d *Dao) IP(c context.Context, ip []string) (res *model.IPData, err error) {
params := url.Values{}
params.Set("ips", strings.Join(ip, ","))
if err = d.client.Get(c, d.managerIP, "", params, &res); err != nil {
err = errors.Wrapf(err, "d.httpSearch url(%s)", d.managerIP+"?"+params.Encode())
log.Error("d.httpSearch url(%s)", d.managerIP+"?"+params.Encode())
return
}
if res.Code != 0 {
err = errors.Wrapf(err, "response url(%s) code(%d)", d.managerDep+"?"+params.Encode(), res.Code)
log.Error("response url(%s) code(%d)", d.managerIP+"?"+params.Encode(), res.Code)
return
}
return
}
// LogCount .
func (d *Dao) LogCount(c context.Context, name string, business int, uid interface{}) {
date := time.Now().Format("2006-01-02")
if _, err := d.db.Exec(c, _count, name+"_access", date); err != nil {
log.Error("d.db.Exec err(%v)", err)
return
}
if _, err := d.db.Exec(c, _percent, name+"_uid", date, uid); err != nil {
log.Error("d.db.Exec err(%v)", err)
return
}
if _, err := d.db.Exec(c, _percent, name+"_business", date, business); err != nil {
log.Error("d.db.Exec err(%v)", err)
return
}
}

View File

@@ -0,0 +1,282 @@
package dao
import (
"context"
"testing"
"time"
"github.com/smartystreets/goconvey/convey"
"go-common/app/admin/main/search/model"
)
func TestDaoNewLog(t *testing.T) {
convey.Convey("NewLog", t, func(ctx convey.C) {
ctx.Convey("When everything goes positive", func(ctx convey.C) {
d.NewLog()
ctx.Convey("No return values", func(ctx convey.C) {
})
})
})
}
func TestDaoGetLogInfo(t *testing.T) {
convey.Convey("GetLogInfo", t, func(ctx convey.C) {
var (
appID = ""
id = int(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
business, ok := d.GetLogInfo(appID, id)
ctx.Convey("Then business,ok should not be nil.", func(ctx convey.C) {
ctx.So(ok, convey.ShouldNotBeNil)
ctx.So(business, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoinitMapping(t *testing.T) {
convey.Convey("initMapping", t, func(ctx convey.C) {
var (
appID = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
business, err := d.initMapping(appID)
ctx.Convey("Then business should not be nil.", func(ctx convey.C) {
ctx.So(business, convey.ShouldNotBeNil)
ctx.So(err, convey.ShouldNotBeNil)
})
})
})
}
func TestDaologIndexName(t *testing.T) {
convey.Convey("logIndexName", t, func(ctx convey.C) {
var (
c = context.Background()
p = &model.LogParams{
CTimeFrom: "2010-01-01 00:00:00",
CTimeTo: "2020-01-01 00:00:00",
}
business = &model.Business{
ID: 0,
AppID: "log_audit",
}
)
ctx.Convey("2006", func(ctx convey.C) {
business.IndexFormat = "2006"
res, err := d.logIndexName(c, p, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
ctx.Convey("2006-01", func(ctx convey.C) {
business.IndexFormat = "2006-01"
res, err := d.logIndexName(c, p, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
ctx.Convey("2006-01-week", func(ctx convey.C) {
business.IndexFormat = "2006-01-week"
res, err := d.logIndexName(c, p, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
ctx.Convey("2006-01-02", func(ctx convey.C) {
business.IndexFormat = "2006-01-02"
res, err := d.logIndexName(c, p, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
ctx.Convey("all", func(ctx convey.C) {
business.IndexFormat = "all"
res, err := d.logIndexName(c, p, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaogetLogAuditIndexName(t *testing.T) {
convey.Convey("getLogAuditIndexName", t, func(ctx convey.C) {
var (
business = int(0)
indexName = ""
format = ""
time = time.Now()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
index := getLogAuditIndexName(business, indexName, format, time)
ctx.Convey("Then index should not be nil.", func(ctx convey.C) {
ctx.So(index, convey.ShouldNotBeNil)
})
})
})
}
func TestDaogetQuery(t *testing.T) {
convey.Convey("getQuery", t, func(ctx convey.C) {
var (
pr map[string][]interface{}
indexMapping map[string]string
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
query := d.getQuery(pr, indexMapping)
ctx.Convey("Then query should not be nil.", func(ctx convey.C) {
ctx.So(query, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoLogAudit(t *testing.T) {
convey.Convey("LogAudit", t, func(ctx convey.C) {
var (
c = context.Background()
pr map[string][]interface{}
sp = &model.LogParams{}
business = &model.Business{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.LogAudit(c, pr, sp, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoLogAuditGroupBy(t *testing.T) {
convey.Convey("LogAuditGroupBy", t, func(ctx convey.C) {
var (
c = context.Background()
pr = map[string][]interface{}{
"group": {"group"},
}
sp = &model.LogParams{}
business = &model.Business{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.LogAuditGroupBy(c, pr, sp, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoLogAuditDelete(t *testing.T) {
convey.Convey("LogAuditDelete", t, func(ctx convey.C) {
var (
c = context.Background()
pr map[string][]interface{}
sp = &model.LogParams{}
business = &model.Business{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.LogAuditDelete(c, pr, sp, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoLogUserAction(t *testing.T) {
convey.Convey("LogUserAction", t, func(ctx convey.C) {
var (
c = context.Background()
pr map[string][]interface{}
sp = &model.LogParams{}
business = &model.Business{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.LogUserAction(c, pr, sp, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoLogUserActionDelete(t *testing.T) {
convey.Convey("LogUserActionDelete", t, func(ctx convey.C) {
var (
c = context.Background()
pr map[string][]interface{}
sp = &model.LogParams{}
business = &model.Business{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.LogUserActionDelete(c, pr, sp, business)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoUDepTs(t *testing.T) {
convey.Convey("UDepTs", t, func(ctx convey.C) {
var (
c = context.Background()
uids = []string{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.UDepTs(c, uids)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoIP(t *testing.T) {
convey.Convey("IP", t, func(ctx convey.C) {
var (
c = context.Background()
)
ip := []string{
"127.0.0.1",
}
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//res, err :=
d.IP(c, ip)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoLogCount(t *testing.T) {
convey.Convey("LogCount", t, func(ctx convey.C) {
var (
c = context.Background()
name = ""
business = int(0)
uid = interface{}(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
d.LogCount(c, name, business, uid)
ctx.Convey("No return values", func(ctx convey.C) {
})
})
})
}

View File

@@ -0,0 +1,394 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"net/url"
"strings"
"time"
"go-common/app/admin/main/search/model"
sqlx "go-common/library/database/sql"
)
const (
_mngBusinessListSQL = `select id,business,description,app_ids from digger_business where business like '%%%s%%' limit ?,?`
_mngBusinessListTotalSQL = `select count(*) from digger_business where business like '%%%s%%'`
_mngBusinessAllSQL = `select id,business,description,app_ids from digger_business`
_mngAddBusinessSQL = `insert into digger_business (business,description,app_ids) values (?,?,?)`
_mngUpdateBusinessSQL = `update digger_business set business=?,description=?,app_ids=? where id=?`
_mngBusinessInfoSQL = `select id,business,description,app_ids from digger_business where id=?`
_mngBusinessInfoByNameSQL = `select id,business,description,app_ids from digger_business where business=?`
_mngAssetListSQL = `select id,name,type,src,description from digger_asset %s limit ?,?`
_mngAssetTotalSQL = `select count(*) from digger_asset %s`
_mngAssetAllSQL = `select id,name,type,src,description from digger_asset`
_mngAssetInfoSQL = `select id,name,type,src,description from digger_asset where id=?`
_mngAssetInfoByNameSQL = `select id,name,type,src,description from digger_asset where name=?`
_mngAddAssetSQL = `insert into digger_asset (name,type,src,description) values (?,?,?,?)`
_mngUpdateAssetSQL = `update digger_asset set name=?,type=?,src=?,description=? where id=?`
_mngApplistSQL = `select id,business,appid,description,db_name,es_name,table_name,databus_name,table_prefix,table_format,index_prefix,
index_version,index_format,index_type,index_id,data_index_suffix,index_mapping,data_fields,data_extra,review_num,review_time,
sleep,size,sql_by_id,sql_by_mtime,sql_by_idmtime,databus_info,databus_index_id,query_max_indexes from digger_app where business=?`
_mngAppInfoSQL = `select id,business,appid,description,db_name,es_name,table_name,databus_name,table_prefix,table_format,index_prefix,
index_version,index_format,index_type,index_id,data_index_suffix,index_mapping,data_fields,data_extra,review_num,review_time,
sleep,size,sql_by_id,sql_by_mtime,sql_by_idmtime,databus_info,databus_index_id,query_max_indexes from digger_app where id=?`
_mngAppInfoByAppidSQL = `select id,business,appid,description,db_name,es_name,table_name,databus_name,table_prefix,table_format,index_prefix,
index_version,index_format,index_type,index_id,data_index_suffix,index_mapping,data_fields,data_extra,review_num,review_time,
sleep,size,sql_by_id,sql_by_mtime,sql_by_idmtime,databus_info,databus_index_id,query_max_indexes from digger_app where appid=?`
_mngAddAppSQL = `insert into digger_app (business,appid,description) values (?,?,?)`
_mngUpdateAppSQL = `update digger_app set business=?,appid=?,description=?,db_name=?,es_name=?,table_name=?,databus_name=?,table_prefix=?,table_format=?,index_prefix=?,
index_version=?,index_format=?,index_type=?,index_id=?,data_index_suffix=?,index_mapping=?,data_fields=?,data_extra=?,review_num=?,review_time=?,
sleep=?,size=?,sql_by_id=?,sql_by_mtime=?,sql_by_idmtime=?,databus_info=?,databus_index_id=?,query_max_indexes=? where id=?`
_mngUpdateAppAssetTableSQL = `update digger_app set table_prefix=?,table_format=? where table_name=?`
_mngUpdateAppAssetDatabusSQL = `update digger_app set databus_info=?,databus_index_id=? where databus_name=?`
_mngCountSQL = `select time,count from digger_count where business=? and type=? and time >= ?`
_mngPercentSQL = `select name,count from digger_count where business=? and type=? and time = ?`
)
// BusinessList .
func (d *Dao) BusinessList(ctx context.Context, name string, offset, limit int) (list []*model.MngBusiness, err error) {
sqlStr := fmt.Sprintf(_mngBusinessListSQL, name)
rows, err := d.db.Query(ctx, sqlStr, offset, limit)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
b := &model.MngBusiness{}
if err = rows.Scan(&b.ID, &b.Name, &b.Desc, &b.AppsJSON); err != nil {
return
}
b.Apps = make([]*model.MngBusinessApp, 0)
if b.AppsJSON != "" {
if err = json.Unmarshal([]byte(b.AppsJSON), &b.Apps); err != nil {
return
}
}
list = append(list, b)
}
err = rows.Err()
return
}
// BusinessTotal .
func (d *Dao) BusinessTotal(ctx context.Context, name string) (total int64, err error) {
sqlStr := fmt.Sprintf(_mngBusinessListTotalSQL, name)
err = d.db.QueryRow(ctx, sqlStr).Scan(&total)
return
}
// BusinessAll .
func (d *Dao) BusinessAll(ctx context.Context) (list []*model.MngBusiness, err error) {
rows, err := d.db.Query(ctx, _mngBusinessAllSQL)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
b := &model.MngBusiness{}
if err = rows.Scan(&b.ID, &b.Name, &b.Desc, &b.AppsJSON); err != nil {
return
}
b.Apps = make([]*model.MngBusinessApp, 0)
if b.AppsJSON != "" {
if err = json.Unmarshal([]byte(b.AppsJSON), &b.Apps); err != nil {
return
}
}
list = append(list, b)
}
err = rows.Err()
return
}
// AddBusiness .
func (d *Dao) AddBusiness(ctx context.Context, b *model.MngBusiness) (id int64, err error) {
res, err := d.db.Exec(ctx, _mngAddBusinessSQL, b.Name, b.Desc, b.AppsJSON)
if err != nil {
return
}
id, err = res.LastInsertId()
return
}
// UpdateBusiness .
func (d *Dao) UpdateBusiness(ctx context.Context, b *model.MngBusiness) (err error) {
_, err = d.db.Exec(ctx, _mngUpdateBusinessSQL, b.Name, b.Desc, b.AppsJSON, b.ID)
return
}
// BusinessInfo .
func (d *Dao) BusinessInfo(ctx context.Context, id int64) (info *model.MngBusiness, err error) {
info = new(model.MngBusiness)
if err = d.db.QueryRow(ctx, _mngBusinessInfoSQL, id).Scan(&info.ID, &info.Name, &info.Desc, &info.AppsJSON); err != nil {
if err == sqlx.ErrNoRows {
info = nil
err = nil
}
return
}
info.Apps = make([]*model.MngBusinessApp, 0)
if info.AppsJSON != "" {
err = json.Unmarshal([]byte(info.AppsJSON), &info.Apps)
}
return
}
// BusinessInfoByName .
func (d *Dao) BusinessInfoByName(ctx context.Context, name string) (info *model.MngBusiness, err error) {
info = new(model.MngBusiness)
if err = d.db.QueryRow(ctx, _mngBusinessInfoByNameSQL, name).Scan(&info.ID, &info.Name, &info.Desc, &info.AppsJSON); err != nil {
if err == sqlx.ErrNoRows {
info = nil
err = nil
}
return
}
info.Apps = make([]*model.MngBusinessApp, 0)
if info.AppsJSON != "" {
err = json.Unmarshal([]byte(info.AppsJSON), &info.Apps)
}
return
}
// AssetList .
func (d *Dao) AssetList(ctx context.Context, typ int, name string, offset, limit int) (list []*model.MngAsset, err error) {
where := " where 1 "
if typ > 0 {
where += fmt.Sprintf(" and type=%d ", typ)
}
if name != "" {
where += fmt.Sprintf(" and name like '%%%s%%'", name)
}
sqlStr := fmt.Sprintf(_mngAssetListSQL, where)
rows, err := d.db.Query(ctx, sqlStr, offset, limit)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
a := &model.MngAsset{}
if err = rows.Scan(&a.ID, &a.Name, &a.Type, &a.Config, &a.Desc); err != nil {
return
}
list = append(list, a)
}
err = rows.Err()
return
}
// AssetTotal .
func (d *Dao) AssetTotal(ctx context.Context, typ int, name string) (total int64, err error) {
where := " where 1 "
if typ > 0 {
where += fmt.Sprintf(" and type=%d ", typ)
}
if name != "" {
where += fmt.Sprintf(" and name like '%%%s%%'", name)
}
sqlStr := fmt.Sprintf(_mngAssetTotalSQL, where)
err = d.db.QueryRow(ctx, sqlStr).Scan(&total)
return
}
// AssetAll .
func (d *Dao) AssetAll(ctx context.Context) (list []*model.MngAsset, err error) {
rows, err := d.db.Query(ctx, _mngAssetAllSQL)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
a := &model.MngAsset{}
if err = rows.Scan(&a.ID, &a.Name, &a.Type, &a.Config, &a.Desc); err != nil {
return
}
list = append(list, a)
}
err = rows.Err()
return
}
// AssetInfo .
func (d *Dao) AssetInfo(ctx context.Context, id int64) (info *model.MngAsset, err error) {
info = new(model.MngAsset)
if err = d.db.QueryRow(ctx, _mngAssetInfoSQL, id).Scan(&info.ID, &info.Name, &info.Type, &info.Config, &info.Desc); err != nil {
if err == sqlx.ErrNoRows {
info = nil
err = nil
}
return
}
return
}
// AssetInfoByName .
func (d *Dao) AssetInfoByName(ctx context.Context, name string) (info *model.MngAsset, err error) {
info = new(model.MngAsset)
if err = d.db.QueryRow(ctx, _mngAssetInfoByNameSQL, name).Scan(&info.ID, &info.Name, &info.Type, &info.Config, &info.Desc); err != nil {
if err == sqlx.ErrNoRows {
info = nil
err = nil
}
return
}
return
}
// AddAsset .
func (d *Dao) AddAsset(ctx context.Context, b *model.MngAsset) (id int64, err error) {
res, err := d.db.Exec(ctx, _mngAddAssetSQL, b.Name, b.Type, b.Config, b.Desc)
if err != nil {
return
}
id, err = res.LastInsertId()
return
}
// UpdateAsset .
func (d *Dao) UpdateAsset(ctx context.Context, b *model.MngAsset) (err error) {
_, err = d.db.Exec(ctx, _mngUpdateAssetSQL, b.Name, b.Type, b.Config, b.Desc, b.ID)
return
}
// AppList .
func (d *Dao) AppList(ctx context.Context, business string) (list []*model.MngApp, err error) {
rows, err := d.db.Query(ctx, _mngApplistSQL, business)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
a := &model.MngApp{}
if err = rows.Scan(&a.ID, &a.Business, &a.AppID, &a.Desc, &a.DBName, &a.ESName, &a.TableName, &a.DatabusName, &a.TablePrefix, &a.TableFormat,
&a.IndexPrefix, &a.IndexVersion, &a.IndexFormat, &a.IndexType, &a.IndexID, &a.DataIndexSuffix, &a.IndexMapping,
&a.DataFields, &a.DataExtra, &a.ReviewNum, &a.ReviewTime, &a.Sleep, &a.Size, &a.SQLByID, &a.SQLByMtime,
&a.SQLByIDMtime, &a.DatabusInfo, &a.DatabusIndexID, &a.QueryMaxIndexes); err != nil {
return
}
list = append(list, a)
}
err = rows.Err()
return
}
// AppInfo .
func (d *Dao) AppInfo(ctx context.Context, id int64) (a *model.MngApp, err error) {
a = new(model.MngApp)
if err = d.db.QueryRow(ctx, _mngAppInfoSQL, id).Scan(&a.ID, &a.Business, &a.AppID, &a.Desc, &a.DBName, &a.ESName, &a.TableName, &a.DatabusName,
&a.TablePrefix, &a.TableFormat, &a.IndexPrefix, &a.IndexVersion, &a.IndexFormat, &a.IndexType, &a.IndexID, &a.DataIndexSuffix, &a.IndexMapping,
&a.DataFields, &a.DataExtra, &a.ReviewNum, &a.ReviewTime, &a.Sleep, &a.Size, &a.SQLByID, &a.SQLByMtime,
&a.SQLByIDMtime, &a.DatabusInfo, &a.DatabusIndexID, &a.QueryMaxIndexes); err != nil {
if err == sqlx.ErrNoRows {
a = nil
err = nil
}
return
}
return
}
// AppInfoByAppid .
func (d *Dao) AppInfoByAppid(ctx context.Context, appid string) (a *model.MngApp, err error) {
a = new(model.MngApp)
if err = d.db.QueryRow(ctx, _mngAppInfoByAppidSQL, appid).Scan(&a.ID, &a.Business, &a.AppID, &a.Desc, &a.DBName, &a.ESName, &a.TableName, &a.DatabusName,
&a.TablePrefix, &a.TableFormat, &a.IndexPrefix, &a.IndexVersion, &a.IndexFormat, &a.IndexType, &a.IndexID, &a.DataIndexSuffix, &a.IndexMapping,
&a.DataFields, &a.DataExtra, &a.ReviewNum, &a.ReviewTime, &a.Sleep, &a.Size, &a.SQLByID, &a.SQLByMtime,
&a.SQLByIDMtime, &a.DatabusInfo, &a.DatabusIndexID, &a.QueryMaxIndexes); err != nil {
if err == sqlx.ErrNoRows {
a = nil
err = nil
}
return
}
return
}
// AddApp .
func (d *Dao) AddApp(ctx context.Context, a *model.MngApp) (id int64, err error) {
res, err := d.db.Exec(ctx, _mngAddAppSQL, a.Business, a.AppID, a.Desc)
if err != nil {
return
}
id, err = res.LastInsertId()
return
}
// UpdateApp .
func (d *Dao) UpdateApp(ctx context.Context, a *model.MngApp) (err error) {
_, err = d.db.Exec(ctx, _mngUpdateAppSQL, a.Business, a.AppID, a.Desc, a.DBName, a.ESName, a.TableName, a.DatabusName, a.TablePrefix, a.TableFormat,
a.IndexPrefix, a.IndexVersion, a.IndexFormat, a.IndexType, a.IndexID, a.DataIndexSuffix, a.IndexMapping,
a.DataFields, a.DataExtra, a.ReviewNum, a.ReviewTime, a.Sleep, a.Size, a.SQLByID, a.SQLByMtime,
a.SQLByIDMtime, a.DatabusInfo, a.DatabusIndexID, a.QueryMaxIndexes, a.ID)
return
}
// UpdateAppAssetTable .
func (d *Dao) UpdateAppAssetTable(ctx context.Context, name string, t *model.MngAssetTable) (err error) {
_, err = d.db.Exec(ctx, _mngUpdateAppAssetTableSQL, t.TablePrefix, t.TableFormat, name)
return
}
// UpdateAppAssetDatabus .
func (d *Dao) UpdateAppAssetDatabus(ctx context.Context, name string, v *model.MngAssetDatabus) (err error) {
_, err = d.db.Exec(ctx, _mngUpdateAppAssetDatabusSQL, v.DatabusInfo, v.DatabusIndexID, name)
return
}
// MngCount .
func (d *Dao) MngCount(ctx context.Context, c *model.MngCount) (list []*model.MngCountRes, err error) {
list = []*model.MngCountRes{}
sTime := time.Now().AddDate(0, 0, -365).Format("2006-01-02")
rows, err := d.db.Query(ctx, _mngCountSQL, c.Business, c.Type, sTime)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
a := &model.MngCountRes{}
if err = rows.Scan(&a.Time, &a.Count); err != nil {
return
}
a.Time = a.Time[:10]
list = append(list, a)
}
err = rows.Err()
return
}
// MngPercent .
func (d *Dao) MngPercent(ctx context.Context, c *model.MngCount) (list []*model.MngPercentRes, err error) {
list = []*model.MngPercentRes{}
yesterday := time.Now().AddDate(0, 0, -1).Format("2006-01-02")
rows, err := d.db.Query(ctx, _mngPercentSQL, c.Business, c.Type, yesterday)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
a := &model.MngPercentRes{}
if err = rows.Scan(&a.Name, &a.Count); err != nil {
return
}
list = append(list, a)
}
err = rows.Err()
return
}
// Unames .
func (d *Dao) Unames(c context.Context, uids []string) (res *model.UnamesData, err error) {
params := url.Values{}
params.Set("uids", strings.Join(uids, ","))
if err = d.client.Get(c, d.managerUnames, "", params, &res); err != nil {
return
}
if res.Code != 0 {
return
}
return
}

View File

@@ -0,0 +1,396 @@
package dao
import (
"context"
"go-common/app/admin/main/search/model"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoBusinessList(t *testing.T) {
convey.Convey("BusinessList", t, func(ctx convey.C) {
var (
c = context.Background()
name = ""
offset = int(0)
limit = int(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.BusinessList(c, name, offset, limit)
ctx.Convey("Then err should be nil.list should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
//ctx.So(list, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoBusinessTotal(t *testing.T) {
convey.Convey("BusinessTotal", t, func(ctx convey.C) {
var (
c = context.Background()
name = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
total, err := d.BusinessTotal(c, name)
ctx.Convey("Then err should be nil.total should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(total, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoBusinessAll(t *testing.T) {
convey.Convey("BusinessAll", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
list, err := d.BusinessAll(c)
ctx.Convey("Then err should be nil.list should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(list, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAddBusiness(t *testing.T) {
convey.Convey("AddBusiness", t, func(ctx convey.C) {
//var (
// c = context.Background()
// b = &model.MngBusiness{}
//)
//ctx.Convey("When everything goes positive", func(ctx convey.C) {
// id, _ := d.AddBusiness(c, b)
// ctx.Convey("Then err should be nil.id should not be nil.", func(ctx convey.C) {
// //ctx.So(err, convey.ShouldBeNil)
// ctx.So(id, convey.ShouldNotBeNil)
// })
//})
})
}
func TestDaoUpdateBusiness(t *testing.T) {
convey.Convey("UpdateBusiness", t, func(ctx convey.C) {
//var (
// c = context.Background()
// b = &model.MngBusiness{}
//)
//ctx.Convey("When everything goes positive", func(ctx convey.C) {
// err := d.UpdateBusiness(c, b)
// ctx.Convey("Then err should be nil.", func(ctx convey.C) {
// ctx.So(err, convey.ShouldBeNil)
// })
//})
})
}
func TestDaoBusinessInfo(t *testing.T) {
convey.Convey("BusinessInfo", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.BusinessInfo(c, id)
ctx.Convey("Then err should be nil.info should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
//ctx.So(info, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoBusinessInfoByName(t *testing.T) {
convey.Convey("BusinessInfoByName", t, func(ctx convey.C) {
var (
c = context.Background()
name = "log"
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
info, err := d.BusinessInfoByName(c, name)
ctx.Convey("Then err should be nil.info should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(info, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAssetList(t *testing.T) {
convey.Convey("AssetList", t, func(ctx convey.C) {
var (
c = context.Background()
typ = int(0)
name = ""
offset = int(0)
limit = int(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.AssetList(c, typ, name, offset, limit)
ctx.Convey("Then err should be nil.list should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
//ctx.So(list, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAssetTotal(t *testing.T) {
convey.Convey("AssetTotal", t, func(ctx convey.C) {
var (
c = context.Background()
typ = int(0)
name = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
total, err := d.AssetTotal(c, typ, name)
ctx.Convey("Then err should be nil.total should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(total, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAssetAll(t *testing.T) {
convey.Convey("AssetAll", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
list, err := d.AssetAll(c)
ctx.Convey("Then err should be nil.list should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(list, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAssetInfo(t *testing.T) {
convey.Convey("AssetInfo", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.AssetInfo(c, id)
ctx.Convey("Then err should be nil.info should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
//ctx.So(info, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAssetInfoByName(t *testing.T) {
convey.Convey("AssetInfoByName", t, func(ctx convey.C) {
var (
c = context.Background()
name = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
info, err := d.AssetInfoByName(c, name)
ctx.Convey("Then err should be nil.info should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(info, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAddAsset(t *testing.T) {
convey.Convey("AddAsset", t, func(ctx convey.C) {
//var (
// c = context.Background()
// b = &model.MngAsset{}
//)
//ctx.Convey("When everything goes positive", func(ctx convey.C) {
// //id, err :=
// d.AddAsset(c, b)
// ctx.Convey("Then err should be nil.id should not be nil.", func(ctx convey.C) {
// //ctx.So(err, convey.ShouldBeNil)
// //ctx.So(id, convey.ShouldNotBeNil)
// })
//})
})
}
func TestDaoUpdateAsset(t *testing.T) {
convey.Convey("UpdateAsset", t, func(ctx convey.C) {
//var (
// c = context.Background()
// b = &model.MngAsset{}
//)
//ctx.Convey("When everything goes positive", func(ctx convey.C) {
// err := d.UpdateAsset(c, b)
// ctx.Convey("Then err should be nil.", func(ctx convey.C) {
// ctx.So(err, convey.ShouldBeNil)
// })
//})
})
}
func TestDaoAppList(t *testing.T) {
convey.Convey("AppList", t, func(ctx convey.C) {
var (
c = context.Background()
business = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
list, err := d.AppList(c, business)
ctx.Convey("Then err should be nil.list should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(list, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAppInfo(t *testing.T) {
convey.Convey("AppInfo", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//a, err :=
d.AppInfo(c, id)
ctx.Convey("Then err should be nil.a should not be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
//ctx.So(a, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAppInfoByAppid(t *testing.T) {
convey.Convey("AppInfoByAppid", t, func(ctx convey.C) {
var (
c = context.Background()
appid = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.AppInfoByAppid(c, appid)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoAddApp(t *testing.T) {
convey.Convey("AddApp", t, func(ctx convey.C) {
//var (
// c = context.Background()
// a = &model.MngApp{}
//)
//ctx.Convey("When everything goes positive", func(ctx convey.C) {
// id, err :=
// d.AddApp(c, a)
// ctx.Convey("Then err should be nil.id should not be nil.", func(ctx convey.C) {
// ctx.So(err, convey.ShouldBeNil)
// ctx.So(id, convey.ShouldNotBeNil)
// })
//})
})
}
func TestDaoUpdateApp(t *testing.T) {
convey.Convey("UpdateApp", t, func(ctx convey.C) {
//var (
// c = context.Background()
// a = &model.MngApp{}
//)
//ctx.Convey("When everything goes positive", func(ctx convey.C) {
// err := d.UpdateApp(c, a)
// ctx.Convey("Then err should be nil.", func(ctx convey.C) {
// ctx.So(err, convey.ShouldBeNil)
// })
//})
})
}
func TestDaoUpdateAppAssetTable(t *testing.T) {
convey.Convey("UpdateAppAssetTable", t, func(ctx convey.C) {
//var (
// c = context.Background()
// name = ""
// no = &model.MngAssetTable{}
//)
//ctx.Convey("When everything goes positive", func(ctx convey.C) {
// err := d.UpdateAppAssetTable(c, name, no)
// ctx.Convey("Then err should be nil.", func(ctx convey.C) {
// ctx.So(err, convey.ShouldBeNil)
// })
//})
})
}
func TestDaoUpdateAppAssetDatabus(t *testing.T) {
convey.Convey("UpdateAppAssetDatabus", t, func(ctx convey.C) {
//var (
// c = context.Background()
// name = ""
// v = &model.MngAssetDatabus{}
//)
//ctx.Convey("When everything goes positive", func(ctx convey.C) {
// err := d.UpdateAppAssetDatabus(c, name, v)
// ctx.Convey("Then err should be nil.", func(ctx convey.C) {
// ctx.So(err, convey.ShouldBeNil)
// })
//})
})
}
func TestDaoMngCount(t *testing.T) {
convey.Convey("MngCount", t, func(ctx convey.C) {
var (
c = context.Background()
v = &model.MngCount{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
list, err := d.MngCount(c, v)
ctx.Convey("Then err should be nil.list should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(list, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoMngPercent(t *testing.T) {
convey.Convey("MngPercent", t, func(ctx convey.C) {
var (
c = context.Background()
v = &model.MngCount{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
list, err := d.MngPercent(c, v)
ctx.Convey("Then err should be nil.list should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(list, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoUnames(t *testing.T) {
convey.Convey("Unames", t, func(ctx convey.C) {
var (
c = context.Background()
uids = []string{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.Unames(c, uids)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,147 @@
package dao
import (
"context"
"database/sql"
"fmt"
"time"
"go-common/app/admin/main/search/model"
"go-common/library/log"
)
const (
_typeDatabus = `databus`
_typeDB = `db`
_typeTable = `table`
_businessAllV2SQL = `SELECT id,pid,name,description,state FROM gf_business`
_businessInfoV2SQL = `SELECT id,pid,name,description,data_conf,index_conf,business_conf,state,mtime FROM gf_business WHERE name=?`
_bussinessInsSQL = `INSERT INTO gf_business (pid,name,description) VALUES(?,?,?)`
_bussinessUpdateSQL = `UPDATE gf_business SET %s=? WHERE name=?`
_assetDBTablesV2SQL = `SELECT id,type,db,name,regex,fields,description,state FROM gf_asset WHERE type=? OR type=?`
_assetDBInsSQL = `INSERT INTO gf_asset (type,name,description,dsn) VALUES(?,?,?,?)`
_assetTableInsSQL = `INSERT INTO gf_asset (type,name,db,regex,fields,description) VALUES(?,?,?,?,?,?)`
_assetTableUpdateSQL = `UPDATE gf_asset set fields=? WHERE name=?`
_assetSQL = `SELECT id,type,name,dsn,db,regex,fields,description,state FROM gf_asset WHERE name=?`
)
// BusinessAllV2 .
func (d *Dao) BusinessAllV2(c context.Context) (list []*model.GFBusiness, err error) {
rows, err := d.db.Query(c, _businessAllV2SQL)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
t := new(model.GFBusiness)
if err = rows.Scan(&t.ID, &t.PID, &t.Name, &t.Description, &t.State); err != nil {
return
}
list = append(list, t)
}
err = rows.Err()
return
}
// BusinessInfoV2 .
func (d *Dao) BusinessInfoV2(c context.Context, name string) (b *model.GFBusiness, err error) {
row := d.db.QueryRow(c, _businessInfoV2SQL, name)
if err != nil {
return
}
b = new(model.GFBusiness)
if err = row.Scan(&b.ID, &b.PID, &b.Name, &b.Description, &b.DataConf, &b.IndexConf, &b.BusinessConf, &b.State, &b.Mtime); err != nil {
if err == sql.ErrNoRows {
err = nil
b = nil
return
}
}
tm, _ := time.Parse(time.RFC3339, b.Mtime)
b.Mtime = tm.Format("2006-01-02 15:04:05")
return
}
// BusinessIns insert business.
func (d *Dao) BusinessIns(c context.Context, pid int64, name, description string) (rows int64, err error) {
res, err := d.db.Exec(c, _bussinessInsSQL, pid, name, description)
if err != nil {
log.Error("d.db.Exec error(%v)", err)
return
}
return res.LastInsertId()
}
// BusinessUpdate update business.
func (d *Dao) BusinessUpdate(c context.Context, name, field, value string) (rows int64, err error) {
res, err := d.db.Exec(c, fmt.Sprintf(_bussinessUpdateSQL, field), value, name)
if err != nil {
log.Error("d.db.Exec error(%v)", err)
return
}
return res.RowsAffected()
}
// AssetDBTables .
func (d *Dao) AssetDBTables(c context.Context) (list []*model.GFAsset, err error) {
rows, err := d.db.Query(c, _assetDBTablesV2SQL, _typeDB, _typeTable)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
t := new(model.GFAsset)
if err = rows.Scan(&t.ID, &t.Type, &t.DB, &t.Name, &t.Regex, &t.Fields, &t.Description, &t.State); err != nil {
return
}
list = append(list, t)
}
err = rows.Err()
return
}
// AssetDBIns insert db asset.
func (d *Dao) AssetDBIns(c context.Context, name, description, dsn string) (rows int64, err error) {
res, err := d.db.Exec(c, _assetDBInsSQL, _typeDB, name, description, dsn)
if err != nil {
log.Error("d.db.Exec error(%v)", err)
return
}
return res.LastInsertId()
}
// AssetTableIns insert table asset.
func (d *Dao) AssetTableIns(c context.Context, name, db, regex, fields, description string) (rows int64, err error) {
res, err := d.db.Exec(c, _assetTableInsSQL, _typeTable, name, db, regex, fields, description)
if err != nil {
log.Error("d.db.Exec error(%v)", err)
return
}
return res.LastInsertId()
}
// UpdateAssetTable update table asset.
func (d *Dao) UpdateAssetTable(c context.Context, name, fields string) (rows int64, err error) {
res, err := d.db.Exec(c, _assetTableUpdateSQL, fields, name)
if err != nil {
log.Error("d.db.Exec error(%v)", err)
return
}
return res.RowsAffected()
}
// Asset .
func (d *Dao) Asset(c context.Context, name string) (r *model.GFAsset, err error) {
row := d.db.QueryRow(c, _assetSQL, name)
r = new(model.GFAsset)
if err = row.Scan(&r.ID, &r.Type, &r.Name, &r.DSN, &r.DB, &r.Regex, &r.Fields, &r.Description, &r.State); err != nil {
if err == sql.ErrNoRows {
err = nil
r = nil
return
}
}
return
}

View File

@@ -0,0 +1,145 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoBusinessAllV2(t *testing.T) {
convey.Convey("BusinessAllV2", t, func(convCtx convey.C) {
var (
c = context.Background()
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
list, err := d.BusinessAllV2(c)
convCtx.Convey("Then err should be nil.list should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
convCtx.So(list, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoBusinessInfoV2(t *testing.T) {
convey.Convey("BusinessInfoV2", t, func(convCtx convey.C) {
var (
c = context.Background()
name = "dm"
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
b, err := d.BusinessInfoV2(c, name)
convCtx.Convey("Then err should be nil.b should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
convCtx.So(b, convey.ShouldNotBeNil)
})
})
})
}
//func TestDaoBusinessIns(t *testing.T) {
// convey.Convey("BusinessIns", t, func(convCtx convey.C) {
// var (
// c = context.Background()
// pid = int64(0)
// name = ""
// description = ""
// )
// convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
// rows, err := d.BusinessIns(c, pid, name, description)
// convCtx.Convey("Then err should be nil.rows should not be nil.", func(convCtx convey.C) {
// convCtx.So(err, convey.ShouldBeNil)
// convCtx.So(rows, convey.ShouldNotBeNil)
// })
// })
// })
//}
//func TestDaoBusinessUpdate(t *testing.T) {
// convey.Convey("BusinessUpdate", t, func(convCtx convey.C) {
// var (
// c = context.Background()
// name = ""
// field = ""
// value = ""
// )
// convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
// rows, err := d.BusinessUpdate(c, name, field, value)
// convCtx.Convey("Then err should be nil.rows should not be nil.", func(convCtx convey.C) {
// convCtx.So(err, convey.ShouldBeNil)
// convCtx.So(rows, convey.ShouldNotBeNil)
// })
// })
// })
//}
func TestDaoAssetDBTables(t *testing.T) {
convey.Convey("AssetDBTables", t, func(convCtx convey.C) {
var (
c = context.Background()
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
list, err := d.AssetDBTables(c)
convCtx.Convey("Then err should be nil.list should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
convCtx.So(list, convey.ShouldNotBeNil)
})
})
})
}
//
//func TestDaoAssetDBIns(t *testing.T) {
// convey.Convey("AssetDBIns", t, func(convCtx convey.C) {
// var (
// c = context.Background()
// name = ""
// description = ""
// dsn = ""
// )
// convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
// rows, err := d.AssetDBIns(c, name, description, dsn)
// convCtx.Convey("Then err should be nil.rows should not be nil.", func(convCtx convey.C) {
// convCtx.So(err, convey.ShouldBeNil)
// convCtx.So(rows, convey.ShouldNotBeNil)
// })
// })
// })
//}
//func TestDaoAssetTableIns(t *testing.T) {
// convey.Convey("AssetTableIns", t, func(convCtx convey.C) {
// var (
// c = context.Background()
// name = ""
// db = ""
// regex = ""
// fields = ""
// description = ""
// )
// convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
// rows, err := d.AssetTableIns(c, name, db, regex, fields, description)
// convCtx.Convey("Then err should be nil.rows should not be nil.", func(convCtx convey.C) {
// convCtx.So(err, convey.ShouldBeNil)
// convCtx.So(rows, convey.ShouldNotBeNil)
// })
// })
// })
//}
func TestDaoAsset(t *testing.T) {
convey.Convey("Asset", t, func(convCtx convey.C) {
var (
c = context.Background()
name = "bilibili_article"
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
r, err := d.Asset(c, name)
convCtx.Convey("Then err should be nil.r should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
convCtx.So(r, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,443 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"io"
"strings"
"go-common/app/admin/main/search/model"
"go-common/library/log"
"gopkg.in/olivere/elastic.v5"
)
const (
_queryConfSQL = `select appid,es_name,index_prefix,index_type,index_id,index_mapping,query_max_indexes from digger_app`
)
// QueryConf query conf
func (d *Dao) QueryConf(ctx context.Context) (res map[string]*model.QueryConfDetail, err error) {
rows, err := d.queryConfStmt.Query(ctx)
if err != nil {
log.Error("d.queryConfStmt.Query() error(%v)", err)
return
}
defer rows.Close()
res = make(map[string]*model.QueryConfDetail)
for rows.Next() {
var (
appid string
qcd = new(model.QueryConfDetail)
)
if err = rows.Scan(&appid, &qcd.ESCluster, &qcd.IndexPrefix, &qcd.IndexType, &qcd.IndexID, &qcd.IndexMapping, &qcd.MaxIndicesNum); err != nil {
log.Error("d.QueryConf() rows.Scan() error(%v)", err)
return
}
res[appid] = qcd
}
err = rows.Err()
return
}
type querysModel struct {
field string
whereKind string
esQuery elastic.Query
}
// QueryBasic 其中boolQuery方便定制化业务传参过来.
func (d *Dao) QueryBasic(c context.Context, sp *model.QueryParams) (mixedQuery *elastic.BoolQuery, qbDebug *model.QueryDebugResult) {
mixedQuery = elastic.NewBoolQuery()
qbDebug = &model.QueryDebugResult{}
querys := []*querysModel{}
netstedQuerys := map[string]*elastic.BoolQuery{} // key: path value: boolQuery
//fields
if len(sp.QueryBody.Fields) == 0 {
sp.QueryBody.Fields = []string{}
}
//from done
//where
if sp.QueryBody.Where == nil {
sp.QueryBody.Where = &model.QueryBodyWhere{} //要给个默认值
}
//where - eq
for k, v := range sp.QueryBody.Where.EQ {
querys = append(querys, &querysModel{
field: k,
whereKind: "eq",
esQuery: elastic.NewTermQuery(k, v),
})
}
//where - or
for k, v := range sp.QueryBody.Where.Or {
querys = append(querys, &querysModel{
field: k,
whereKind: "or",
esQuery: elastic.NewTermQuery(k, v),
})
}
//where - in
for k, v := range sp.QueryBody.Where.In {
if len(v) > 1024 {
e := fmt.Sprintf("where in 超过1024 business(%s) error(%v)", sp.Business, v)
log.Error(e)
qbDebug.AddErrMsg(e)
continue
}
querys = append(querys, &querysModel{
field: k,
whereKind: "in",
esQuery: elastic.NewTermsQuery(k, v...),
})
}
//where - range
ranges, err := d.queryBasicRange(sp.QueryBody.Where.Range)
if err != nil {
qbDebug.AddErrMsg(err.Error())
}
for k, v := range ranges {
querys = append(querys, &querysModel{
field: k,
whereKind: "range",
esQuery: v,
})
}
//where - combo
for _, v := range sp.QueryBody.Where.Combo {
//外面用bool+should+minimum包裹
combo := elastic.NewBoolQuery()
//里面每个子项也是bool+should+minimum
cmbEQ := elastic.NewBoolQuery()
cmbIn := elastic.NewBoolQuery()
cmbRange := elastic.NewBoolQuery()
cmbNotEQ := elastic.NewBoolQuery()
cmbNotIn := elastic.NewBoolQuery()
cmbNotRange := elastic.NewBoolQuery()
//所有的minumum
if v.Min.Min == 0 {
v.Min.Min = 1
}
if v.Min.EQ == 0 {
v.Min.EQ = 1
}
if v.Min.In == 0 {
v.Min.In = 1
}
if v.Min.Range == 0 {
v.Min.Range = 1
}
if v.Min.NotEQ == 0 {
v.Min.NotEQ = 1
}
if v.Min.NotIn == 0 {
v.Min.NotIn = 1
}
if v.Min.NotRange == 0 {
v.Min.NotRange = 1
}
//子项should
for _, vEQ := range v.EQ {
for eqK, eqV := range vEQ {
cmbEQ.Should(elastic.NewTermQuery(eqK, eqV))
}
}
for _, vIn := range v.In {
for inK, inV := range vIn {
cmbIn.Should(elastic.NewTermsQuery(inK, inV...))
}
}
for _, vRange := range v.Range {
ranges, _ := d.queryBasicRange(vRange)
for _, rangeV := range ranges {
cmbRange.Should(rangeV)
}
}
for _, notEQ := range v.NotEQ {
for k, v := range notEQ {
cmbNotEQ.Should(elastic.NewTermQuery(k, v))
}
}
for _, notIn := range v.NotIn {
for k, v := range notIn {
cmbNotIn.Should(elastic.NewTermsQuery(k, v...))
}
}
for _, notRange := range v.NotRange {
ranges, _ := d.queryBasicRange(notRange)
for _, v := range ranges {
cmbNotRange.Should(v)
}
}
//子项minimum
if len(v.EQ) > 0 {
combo.Should(cmbEQ.MinimumNumberShouldMatch(v.Min.EQ))
}
if len(v.In) > 0 {
combo.Should(cmbIn.MinimumNumberShouldMatch(v.Min.In))
}
if len(v.Range) > 0 {
combo.Should(cmbRange.MinimumNumberShouldMatch(v.Min.Range))
}
if len(v.NotEQ) > 0 {
combo.MustNot(elastic.NewBoolQuery().Should(cmbNotEQ.MinimumNumberShouldMatch(v.Min.NotEQ)))
}
if len(v.NotIn) > 0 {
combo.MustNot(elastic.NewBoolQuery().Should(cmbNotIn.MinimumNumberShouldMatch(v.Min.NotIn)))
}
if len(v.NotRange) > 0 {
combo.MustNot(elastic.NewBoolQuery().Should(cmbNotRange.MinimumNumberShouldMatch(v.Min.NotRange)))
}
//合并子项
mixedQuery.Filter(combo.MinimumNumberShouldMatch(v.Min.Min))
}
//where - like
like, err := d.queryBasicLike(sp.QueryBody.Where.Like, sp.Business)
if err != nil {
qbDebug.AddErrMsg(err.Error())
}
for _, v := range like {
querys = append(querys, &querysModel{
whereKind: "like",
esQuery: v,
})
}
//mixedQuery
for _, q := range querys {
// like TODO like的map型字段也要支持must not和 nested
if q.field == "" && q.whereKind == "like" {
mixedQuery.Must(q.esQuery)
continue
}
if q.field == "" {
continue
}
// prepare nested 一个DSL只能出现一个nested不然会有问题
if mapField := strings.Split(q.field, "."); len(mapField) > 1 && mapField[0] != "" {
if _, ok := netstedQuerys[mapField[0]]; !ok {
netstedQuerys[mapField[0]] = elastic.NewBoolQuery()
}
if bl, ok := sp.QueryBody.Where.Not[q.whereKind][q.field]; ok && bl {
// mixedQuery.Must(elastic.NewNestedQuery(mapField[0], elastic.NewBoolQuery().MustNot(q.esQuery)))
netstedQuerys[mapField[0]].MustNot(q.esQuery)
continue
}
// mixedQuery.Must(elastic.NewNestedQuery(mapField[0], elastic.NewBoolQuery().Must(q.esQuery)))
netstedQuerys[mapField[0]].Must(q.esQuery)
continue
}
// must not
if bl, ok := sp.QueryBody.Where.Not[q.whereKind][q.field]; ok && bl {
mixedQuery.MustNot(q.esQuery)
continue
}
// should
if q.whereKind == "or" {
mixedQuery.Should(q.esQuery)
mixedQuery.MinimumShouldMatch("1") // 暂时为1
continue
}
// default
mixedQuery.Filter(q.esQuery)
// random order with seed
if sp.QueryBody.OrderRandomSeed != "" {
random := elastic.NewRandomFunction().Seed(sp.QueryBody.OrderRandomSeed)
score := elastic.NewFunctionScoreQuery().Add(elastic.NewBoolQuery(), random)
mixedQuery.Must(score)
}
}
// insert nested
for k, n := range netstedQuerys {
mixedQuery.Must(elastic.NewNestedQuery(k, n))
}
// DSL
if sp.DebugLevel != 0 {
if src, e := mixedQuery.Source(); e == nil {
if data, er := json.Marshal(src); er == nil {
qbDebug.DSL = string(data)
}
}
}
return
}
// queryBasicRange .
func (d *Dao) queryBasicRange(rangeMap map[string]string) (rangeQuery map[string]*elastic.RangeQuery, err error) {
rangeQuery = make(map[string]*elastic.RangeQuery)
for k, v := range rangeMap {
if r := strings.Trim(v, " "); r != "" {
if rs := []rune(r); len(rs) > 3 {
firstStr := string(rs[0:1])
endStr := string(rs[len(rs)-1:])
rangeStr := strings.Trim(v, "[]() ")
FromTo := strings.Split(rangeStr, ",")
if len(FromTo) != 2 {
err = fmt.Errorf("sp.QueryBody.Where.Range Fromto err")
continue
}
rQuery := elastic.NewRangeQuery(k)
rc := 0
if firstStr == "(" && strings.Trim(FromTo[0], " ") != "" {
rQuery.Gt(strings.Trim(FromTo[0], " "))
rc++
}
if firstStr == "[" && strings.Trim(FromTo[0], " ") != "" {
rQuery.Gte(strings.Trim(FromTo[0], " "))
rc++
}
if endStr == ")" && strings.Trim(FromTo[1], " ") != "" {
rQuery.Lt(strings.Trim(FromTo[1], " "))
rc++
}
if endStr == "]" && strings.Trim(FromTo[1], " ") != "" {
rQuery.Lte(strings.Trim(FromTo[1], " "))
rc++
}
if rc == 0 {
continue
}
rangeQuery[k] = rQuery
} else {
// 范围格式有问题
err = fmt.Errorf("sp.QueryBody.Where.Range range format err. error(%v)", v)
continue
}
}
}
return
}
func (d *Dao) queryBasicLike(likeMap []model.QueryBodyWhereLike, business string) (likeQuery []elastic.Query, err error) {
for _, v := range likeMap {
if len(v.KW) == 0 {
continue
}
switch v.Level {
case model.LikeLevelHigh:
kw := []string{}
r := []rune(v.KW[0])
for i := 0; i < len(r); i++ {
if k := string(r[i : i+1]); !strings.ContainsAny(k, "~[](){}^?:\"\\/!+-=&* ") { //去掉特殊符号
kw = append(kw, k)
} else if len(kw) > 1 && kw[len(kw)-1:][0] != "*" {
kw = append(kw, "*", " ", "*")
}
}
if len(kw) == 0 || strings.Join(kw, "") == "* *" {
continue
}
qs := elastic.NewQueryStringQuery("*" + strings.Trim(strings.Join(kw, ""), "* ") + "*").AllowLeadingWildcard(true) //默认是or
if !v.Or {
qs.DefaultOperator("AND")
}
for _, v := range v.KWFields {
qs.Field(v)
}
likeQuery = append(likeQuery, qs)
case model.LikeLevelMiddel:
// 单个字要特殊处理
if r := []rune(v.KW[0]); len(r) == 1 && len(v.KW) == 1 {
qs := elastic.NewQueryStringQuery("*" + string(r[:]) + "*").AllowLeadingWildcard(true) //默认是or
if !v.Or {
qs.DefaultOperator("AND")
}
for _, v := range v.KWFields {
qs.Field(v)
}
likeQuery = append(likeQuery, qs)
continue
}
// 自定义analyzer时multi_match无法使用minimum_should_match默认为至少一个满足导致结果集还是很大
// ngram(2,2)
for _, kw := range v.KW {
rn := []rune(kw)
for i := 0; i+1 < len(rn); i++ {
kwStr := string(rn[i : i+2])
for _, kwField := range v.KWFields {
likeQuery = append(likeQuery, elastic.NewTermQuery(kwField, kwStr))
}
}
}
case "", model.LikeLevelLow:
qs := elastic.NewMultiMatchQuery(strings.Join(v.KW, " "), v.KWFields...).Type("best_fields").TieBreaker(0.6).MinimumShouldMatch("90%") //默认是and
// TODO 业务自定义match
if business == "copyright" {
qs.MinimumShouldMatch("10%")
}
if business == "academy_archive" {
qs.MinimumShouldMatch("50%")
}
if v.Or {
qs.Operator("OR")
}
likeQuery = append(likeQuery, qs)
}
}
return
}
func (d *Dao) Scroll(c context.Context, sp *model.QueryParams) (res *model.QueryResult, debug *model.QueryDebugResult, err error) {
var (
tList []json.RawMessage
tLen int
ScrollID = ""
)
res = &model.QueryResult{}
esCluster := sp.AppIDConf.ESCluster
query, _ := d.QueryBasic(c, sp)
eSearch, ok := d.esPool[esCluster]
if !ok {
PromError(fmt.Sprintf("es:集群不存在%s", esCluster), "s.dao.searchResult indexName:%s", esCluster)
return
}
fsc := elastic.NewFetchSourceContext(true).Include(sp.QueryBody.Fields...)
// multi sort
sorterSlice := []elastic.Sorter{}
if len(sp.QueryBody.Where.Like) > 0 && sp.QueryBody.OrderScoreFirst { // like 长度 > 0但里面是空的也是个问题
sorterSlice = append(sorterSlice, elastic.NewScoreSort().Desc())
}
for _, i := range sp.QueryBody.Order {
for k, v := range i {
if v == "asc" {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(k).Asc())
} else {
sorterSlice = append(sorterSlice, elastic.NewFieldSort(k).Desc())
}
}
}
if len(sp.QueryBody.Where.Like) > 0 && !sp.QueryBody.OrderScoreFirst {
sorterSlice = append(sorterSlice, elastic.NewScoreSort().Desc())
}
for {
searchResult, err := eSearch.Scroll().Index(sp.QueryBody.From).Type("base").
Query(query).FetchSourceContext(fsc).Size(5000).Scroll("1m").ScrollId(ScrollID).SortBy(sorterSlice...).Do(c)
if err == io.EOF {
break
} else if err != nil {
PromError(fmt.Sprintf("es:执行查询失败%s ", "Scroll"), "es:执行查询失败%v", err)
break
}
ScrollID = searchResult.ScrollId
for _, hit := range searchResult.Hits.Hits {
var t json.RawMessage
if err = json.Unmarshal(*hit.Source, &t); err != nil {
PromError(fmt.Sprintf("es:Unmarshal%s ", "Scroll"), "es:Unmarshal%v", err)
break
}
tList = append(tList, t)
tLen++
if tLen >= sp.QueryBody.Pn*sp.QueryBody.Ps {
goto ClearScroll
}
}
}
ClearScroll:
go eSearch.ClearScroll().ScrollId(ScrollID).Do(context.Background())
if res.Result, err = json.Marshal(tList); err != nil {
PromError(fmt.Sprintf("es:Unmarshal%s ", "Scroll"), "es:Unmarshal%v", err)
return
}
return
}

View File

@@ -0,0 +1,413 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"go-common/app/admin/main/search/model"
"go-common/library/ecode"
"go-common/library/log"
"gopkg.in/olivere/elastic.v5"
)
// ArchiveVideoScore 稿件一审打分排序.
func (d *Dao) ArchiveVideoScore(c context.Context, sp *model.QueryParams) (res *model.QueryResult, debug *model.QueryDebugResult, err error) {
query, qbDebug := d.QueryBasic(c, sp)
// query append
diffs := time.Now().Unix() - 1420041600
days := fmt.Sprintf("%dd", diffs/(3600*24))
score := elastic.NewFunctionScoreQuery().Add(elastic.NewTermQuery("user_type", 1), elastic.NewExponentialDecayFunction().FieldName("arc_senddate").Origin("2015-01-01 00:00:00").Scale(days).Offset("1d").Decay(0.8).Weight(float64(10000))).Add(nil, elastic.NewExponentialDecayFunction().FieldName("arc_senddate").Origin("2015-01-01 00:00:00").Scale(days).Offset("1d").Decay(0.8).Weight(float64(1)))
query = query.Must(score)
sp.QueryBody.Order = []map[string]string{}
// do
if res, debug, err = d.QueryResult(c, query, sp, qbDebug); err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Business), "%v", err)
}
return
}
// ArchiveScore 稿件二审打分排序.
func (d *Dao) ArchiveScore(c context.Context, sp *model.QueryParams) (res *model.QueryResult, debug *model.QueryDebugResult, err error) {
query, qbDebug := d.QueryBasic(c, sp)
// query append
diffs := time.Now().Unix() - 1420041600
days := fmt.Sprintf("%dd", diffs/(3600*24))
score := elastic.NewFunctionScoreQuery().Add(elastic.NewTermQuery("user_type", 1), elastic.NewExponentialDecayFunction().FieldName("ctime").Origin("2015-01-01 00:00:00").Scale(days).Offset("1d").Decay(0.8).Weight(float64(10000))).Add(nil, elastic.NewExponentialDecayFunction().FieldName("ctime").Origin("2015-01-01 00:00:00").Scale(days).Offset("1d").Decay(0.8).Weight(float64(1)))
query = query.Must(score)
sp.QueryBody.Order = []map[string]string{}
// do
if res, debug, err = d.QueryResult(c, query, sp, qbDebug); err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Business), "%v", err)
}
return
}
// TaskQaRandom .
func (d *Dao) TaskQaRandom(c context.Context, sp *model.QueryParams) (res *model.QueryResult, debug *model.QueryDebugResult, err error) {
random := elastic.NewRandomFunction()
if sp != nil && sp.QueryBody != nil && sp.QueryBody.Where != nil && sp.QueryBody.Where.EQ != nil {
if seed, ok := sp.QueryBody.Where.EQ["seed"]; ok {
random = elastic.NewRandomFunction().Seed(seed)
delete(sp.QueryBody.Where.EQ, "seed")
}
}
query, qbDebug := d.QueryBasic(c, sp)
if err != nil {
PromError(fmt.Sprintf("es basic:%s ", sp.Business), "%v", err)
}
score := elastic.NewFunctionScoreQuery().Add(elastic.NewBoolQuery(), random)
qy := elastic.NewBoolQuery().Must(query, score)
if res, debug, err = d.QueryResult(c, qy, sp, qbDebug); err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Business), "%v", err)
}
return
}
// EsportsContestsDate 电竞右侧日历联动.
func (d *Dao) EsportsContestsDate(c context.Context, sp *model.QueryParams) (res *model.QueryResult, debug *model.QueryDebugResult, err error) {
res = &model.QueryResult{}
// query basic
query, qbDebug := d.QueryBasic(c, sp)
debug = qbDebug
esCluster := sp.AppIDConf.ESCluster
if _, ok := d.esPool[esCluster]; !ok {
debug.AddErrMsg("es:集群不存在" + esCluster)
return
}
aggs := elastic.NewTermsAggregation()
fsc := elastic.NewFetchSourceContext(true).Include("ids")
aggs = aggs.Field("stime").Size(1000).SubAggregation("top_ids_hits", elastic.NewTopHitsAggregation().FetchSourceContext(fsc).Size(1000))
searchPrepare := d.esPool[esCluster].Search().Index(sp.QueryBody.From).Query(query).Aggregation("group_by_stime", aggs).Size(0)
if sp.DebugLevel == 2 {
searchPrepare.Profile(true)
}
searchResult, err := searchPrepare.Do(context.Background())
if err != nil {
debug.AddErrMsg(fmt.Sprintf("es:执行查询失败%s. %v", esCluster, err))
PromError(fmt.Sprintf("es:执行查询失败%s ", esCluster), "%v", err)
return
}
result, ok := searchResult.Aggregations.Terms("group_by_stime")
if !ok {
return
}
type hitDoc struct {
Hits []struct {
Source struct {
IDs []string `json:"ids"`
} `json:"_source"`
} `json:"hits"`
}
type idsRes struct {
Date string
IDs []string
}
ids := []idsRes{}
for _, b := range result.Buckets {
var hit hitDoc
//b.KeyAsString
if list, ok := b.Terms("top_ids_hits"); ok {
a, _ := list.Aggregations["hits"].MarshalJSON()
if err = json.Unmarshal(a, &hit); err != nil {
return
}
for _, h := range hit.Hits {
ids = append(ids, idsRes{
Date: *b.KeyAsString,
IDs: h.Source.IDs,
})
}
}
}
resDoc := map[string]int{}
resDocTmp := map[string]map[string]bool{}
for _, v := range ids {
if _, ok := resDocTmp[v.Date]; !ok {
resDocTmp[v.Date] = map[string]bool{}
}
for _, id := range v.IDs {
resDocTmp[v.Date][id] = true
}
}
for date, idList := range resDocTmp {
resDoc[date] = len(idList)
}
if doc, er := json.Marshal(resDoc); er != nil {
debug.AddErrMsg(fmt.Sprintf("es:Unmarshal docBuckets es:Unmarshal%v", er))
} else {
res.Result = doc
}
return
}
var (
_pubed = []interface{}{-40, 0, 10000, 1, 1001, 15000, 20000, 30000}
_notpubed = []interface{}{-2, -4, -5, -11, -12, -16}
_ispubing = []interface{}{-1, -6, -7, -8, -9, -10, -13, -15, -30}
_all = append(append(_pubed, _notpubed...), _ispubing...)
)
// CreativeArchiveSearch 创作中心
func (d *Dao) CreativeArchiveSearch(c context.Context, sp *model.QueryParams) (res *model.QueryResult, debug *model.QueryDebugResult, err error) {
var (
mid interface{}
ok bool
)
docBuckets := map[string]interface{}{}
if sp == nil && sp.QueryBody == nil && sp.QueryBody.Where == nil && sp.QueryBody.Where.EQ == nil {
return res, debug, ecode.RequestErr
}
if mid, ok = sp.QueryBody.Where.EQ["mid"]; !ok {
return res, debug, ecode.RequestErr
}
// 列表
if state, ok := sp.QueryBody.Where.EQ["state"]; ok {
if sp.QueryBody.Where.In == nil {
sp.QueryBody.Where.In = map[string][]interface{}{}
}
switch state {
case "pubed":
sp.QueryBody.Where.In["state"] = _pubed
case "not_pubed":
sp.QueryBody.Where.In["state"] = _notpubed
case "is_pubing":
sp.QueryBody.Where.In["state"] = _ispubing
default:
sp.QueryBody.Where.In["state"] = _all
}
delete(sp.QueryBody.Where.EQ, "state")
} else {
if sp.QueryBody.Where.In == nil {
sp.QueryBody.Where.In = map[string][]interface{}{}
}
sp.QueryBody.Where.In["state"] = _all
}
query, qbDebug := d.QueryBasic(c, sp)
if res, debug, err = d.QueryResult(c, query, sp, qbDebug); err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Business), "%v", err)
return
}
docBuckets["vlist"] = res.Result
// 类型统计
typeFilter := elastic.NewBoolQuery().Must(elastic.NewTermsQuery("mid", mid))
typeFilter = typeFilter.Filter(elastic.NewTermsQuery("state", _all...))
for _, v := range sp.QueryBody.Where.Like {
typeFilter = typeFilter.Filter(elastic.NewMultiMatchQuery(strings.Join(v.KW, " "), v.KWFields...).Type("best_fields").TieBreaker(0.6).MinimumShouldMatch("100%"))
}
typeAgg := elastic.NewTermsAggregation().Field("pid")
request1 := elastic.NewSearchRequest().Index(sp.QueryBody.From).Type("base").Source(elastic.NewSearchSource().Query(typeFilter).Aggregation("pid", typeAgg))
// 状态统计
stateFilter := elastic.NewBoolQuery().Filter(elastic.NewTermsQuery("mid", mid))
if pid, ok := sp.QueryBody.Where.EQ["pid"]; ok {
stateFilter = stateFilter.Filter(elastic.NewTermsQuery("pid", pid))
}
for _, v := range sp.QueryBody.Where.Like {
stateFilter = typeFilter.Filter(elastic.NewMultiMatchQuery(strings.Join(v.KW, " "), v.KWFields...).Type("best_fields").TieBreaker(0.6).MinimumShouldMatch("100%"))
}
stateAgg := elastic.NewFiltersAggregation().
FilterWithName("pubed", elastic.NewTermsQuery("state", _pubed...)).
FilterWithName("not_pubed", elastic.NewTermsQuery("state", _notpubed...)).
FilterWithName("is_pubing", elastic.NewTermsQuery("state", _ispubing...))
request2 := elastic.NewSearchRequest().Index(sp.QueryBody.From).Type("base").Source(elastic.NewSearchSource().Query(stateFilter).Aggregation("state", stateAgg))
MultiRes, err := d.esPool[sp.AppIDConf.ESCluster].MultiSearch().Add(request1, request2).Do(c)
if err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Business), "%v", err)
return
}
// 取得数据
tmp := map[string]interface{}{}
json.Unmarshal(*MultiRes.Responses[0].Aggregations["pid"], &tmp)
docBuckets["tlist"] = tmp["buckets"]
tmp = map[string]interface{}{}
json.Unmarshal(*MultiRes.Responses[1].Aggregations["state"], &tmp)
docBuckets["plist"] = tmp["buckets"]
if resResult, e := json.Marshal(docBuckets); e != nil {
log.Error("CreativeArchiveSearch.json.error(%v)", e)
} else {
res.Result = resResult
}
return
}
// CreativeArchiveStaff 创作中心
func (d *Dao) CreativeArchiveStaff(c context.Context, sp *model.QueryParams) (res *model.QueryResult, debug *model.QueryDebugResult, err error) {
docBuckets := map[string]interface{}{}
if sp == nil || sp.QueryBody == nil || sp.QueryBody.Where == nil || sp.QueryBody.Where.Combo == nil || len(sp.QueryBody.Where.Combo) != 1 {
return res, debug, ecode.RequestErr
}
combo := sp.QueryBody.Where.Combo[0]
if len(combo.EQ) == 0 {
return res, debug, ecode.RequestErr
}
queryListParams := &model.QueryParams{
QueryBody: &model.QueryBody{
Where: &model.QueryBodyWhere{
Combo: sp.QueryBody.Where.Combo,
},
},
}
queryList, _ := d.QueryBasic(c, queryListParams)
// 列表
if state, ok := sp.QueryBody.Where.EQ["state"]; ok {
if sp.QueryBody.Where.In == nil {
sp.QueryBody.Where.In = map[string][]interface{}{}
}
switch state {
case "pubed":
sp.QueryBody.Where.In["state"] = _pubed
case "not_pubed":
sp.QueryBody.Where.In["state"] = _notpubed
case "is_pubing":
sp.QueryBody.Where.In["state"] = _ispubing
default:
sp.QueryBody.Where.In["state"] = _all
}
delete(sp.QueryBody.Where.EQ, "state")
} else {
if sp.QueryBody.Where.In == nil {
sp.QueryBody.Where.In = map[string][]interface{}{}
}
sp.QueryBody.Where.In["state"] = _all
}
query, qbDebug := d.QueryBasic(c, sp)
if res, debug, err = d.QueryResult(c, query, sp, qbDebug); err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Business), "%v", err)
return
}
docBuckets["vlist"] = res.Result
// 类型统计
typeFilter := elastic.NewBoolQuery().Filter(queryList)
typeFilter = typeFilter.Filter(elastic.NewTermsQuery("state", _all...))
for _, v := range sp.QueryBody.Where.Like {
typeFilter = typeFilter.Filter(elastic.NewMultiMatchQuery(strings.Join(v.KW, " "), v.KWFields...).Type("best_fields").TieBreaker(0.6).MinimumShouldMatch("90%"))
}
typeAgg := elastic.NewTermsAggregation().Field("pid")
request1 := elastic.NewSearchRequest().Index(sp.QueryBody.From).Type("base").Source(elastic.NewSearchSource().Query(typeFilter).Aggregation("pid", typeAgg).Size(0))
// 状态统计
stateFilter := elastic.NewBoolQuery().Filter(queryList)
if pid, ok := sp.QueryBody.Where.EQ["pid"]; ok {
stateFilter = stateFilter.Filter(elastic.NewTermsQuery("pid", pid))
}
for _, v := range sp.QueryBody.Where.Like {
stateFilter = typeFilter.Filter(elastic.NewMultiMatchQuery(strings.Join(v.KW, " "), v.KWFields...).Type("best_fields").TieBreaker(0.6).MinimumShouldMatch("90%"))
}
stateAgg := elastic.NewFiltersAggregation().
// 稿件状态
FilterWithName("pubed", elastic.NewTermsQuery("state", _pubed...)).
FilterWithName("not_pubed", elastic.NewTermsQuery("state", _notpubed...)).
FilterWithName("is_pubing", elastic.NewTermsQuery("state", _ispubing...))
request2 := elastic.NewSearchRequest().Index(sp.QueryBody.From).Type("base").Source(elastic.NewSearchSource().Query(stateFilter).Aggregation("state", stateAgg).Size(0))
MultiRes, err := d.esPool[sp.AppIDConf.ESCluster].MultiSearch().Add(request1, request2).Do(c)
if err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Business), "%v", err)
return
}
// 取得数据
tmp := map[string]interface{}{}
json.Unmarshal(*MultiRes.Responses[0].Aggregations["pid"], &tmp)
docBuckets["tlist"] = tmp["buckets"]
tmp = map[string]interface{}{}
json.Unmarshal(*MultiRes.Responses[1].Aggregations["state"], &tmp)
docBuckets["plist"] = tmp["buckets"]
if resResult, e := json.Marshal(docBuckets); e != nil {
log.Error("CreativeArchiveSearch.json.error(%v)", e)
} else {
res.Result = resResult
}
return
}
// CreativeArchiveStaff 创作中心
func (d *Dao) CreativeArchiveApply(c context.Context, sp *model.QueryParams) (res *model.QueryResult, debug *model.QueryDebugResult, err error) {
var (
applyStaffMid interface{}
ok bool
)
docBuckets := map[string]interface{}{}
if sp == nil || sp.QueryBody == nil || sp.QueryBody.Where == nil || sp.QueryBody.Where.EQ == nil {
return res, debug, ecode.RequestErr
}
if applyStaffMid, ok = sp.QueryBody.Where.EQ["apply_staff.apply_staff_mid"]; !ok {
return res, debug, ecode.RequestErr
}
// 列表
if state, ok := sp.QueryBody.Where.EQ["apply_staff.deal_state"]; ok {
if sp.QueryBody.Where.In == nil {
sp.QueryBody.Where.In = map[string][]interface{}{}
}
switch state {
case "pending": //待处理
sp.QueryBody.Where.In["apply_staff.deal_state"] = []interface{}{1}
case "processed": //已处理
sp.QueryBody.Where.In["apply_staff.deal_state"] = []interface{}{2}
case "neglected": //已忽略
sp.QueryBody.Where.In["apply_staff.deal_state"] = []interface{}{3}
default:
sp.QueryBody.Where.In["apply_staff.deal_state"] = []interface{}{1, 2, 3}
}
delete(sp.QueryBody.Where.EQ, "apply_staff.deal_state")
} else {
if sp.QueryBody.Where.In == nil {
sp.QueryBody.Where.In = map[string][]interface{}{}
}
sp.QueryBody.Where.In["apply_staff.deal_state"] = []interface{}{1, 2, 3}
}
sp.QueryBody.Where.In["state"] = _all
query, qbDebug := d.QueryBasic(c, sp)
if res, debug, err = d.QueryResult(c, query, sp, qbDebug); err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Business), "%v", err)
return
}
docBuckets["vlist"] = res.Result
// 类型统计
typeFilter := elastic.NewBoolQuery().Filter(
elastic.NewTermsQuery("state", _all...),
elastic.NewNestedQuery("apply_staff", elastic.NewBoolQuery().Must(
elastic.NewTermQuery("apply_staff.apply_staff_mid", applyStaffMid),
elastic.NewTermsQuery("apply_staff.deal_state", []interface{}{1, 2, 3}...),
)),
)
for _, v := range sp.QueryBody.Where.Like {
typeFilter = typeFilter.Filter(elastic.NewMultiMatchQuery(strings.Join(v.KW, " "), v.KWFields...).Type("best_fields").TieBreaker(0.6).MinimumShouldMatch("90%"))
}
typeAgg := elastic.NewTermsAggregation().Field("pid")
request1 := elastic.NewSearchRequest().Index(sp.QueryBody.From).Type("base").Source(elastic.NewSearchSource().Query(typeFilter).Aggregation("pid", typeAgg).Size(0))
// 状态统计
stateFilter := elastic.NewBoolQuery().Filter(
elastic.NewTermsQuery("state", _all...),
elastic.NewNestedQuery("apply_staff", elastic.NewBoolQuery().Must(elastic.NewTermQuery("apply_staff.apply_staff_mid", applyStaffMid))),
)
if pid, ok := sp.QueryBody.Where.EQ["pid"]; ok {
stateFilter = stateFilter.Filter(elastic.NewTermsQuery("pid", pid))
}
for _, v := range sp.QueryBody.Where.Like {
stateFilter = typeFilter.Filter(elastic.NewMultiMatchQuery(strings.Join(v.KW, " "), v.KWFields...).Type("best_fields").TieBreaker(0.6).MinimumShouldMatch("90%"))
}
stateAgg := elastic.NewFiltersAggregation().
FilterWithName("pending", elastic.NewNestedQuery("apply_staff", elastic.NewBoolQuery().Must(elastic.NewTermQuery("apply_staff.apply_staff_mid", applyStaffMid), elastic.NewTermQuery("apply_staff.deal_state", 1)))).
FilterWithName("processed", elastic.NewNestedQuery("apply_staff", elastic.NewBoolQuery().Must(elastic.NewTermQuery("apply_staff.apply_staff_mid", applyStaffMid), elastic.NewTermQuery("apply_staff.deal_state", 2)))).
FilterWithName("neglected", elastic.NewNestedQuery("apply_staff", elastic.NewBoolQuery().Must(elastic.NewTermQuery("apply_staff.apply_staff_mid", applyStaffMid), elastic.NewTermQuery("apply_staff.deal_state", 3))))
request2 := elastic.NewSearchRequest().Index(sp.QueryBody.From).Type("base").Source(elastic.NewSearchSource().Query(stateFilter).Aggregation("state", stateAgg).Size(0))
MultiRes, err := d.esPool[sp.AppIDConf.ESCluster].MultiSearch().Add(request1, request2).Do(c)
if err != nil {
PromError(fmt.Sprintf("es:%s ", sp.Business), "%v", err)
return
}
// 取得数据
tmp := map[string]interface{}{}
json.Unmarshal(*MultiRes.Responses[0].Aggregations["pid"], &tmp)
docBuckets["tlist"] = tmp["buckets"]
tmp = map[string]interface{}{}
json.Unmarshal(*MultiRes.Responses[1].Aggregations["state"], &tmp)
docBuckets["plist"] = tmp["buckets"]
if resResult, e := json.Marshal(docBuckets); e != nil {
log.Error("CreativeArchiveSearch.json.error(%v)", e)
} else {
res.Result = resResult
}
return
}

View File

@@ -0,0 +1,191 @@
package dao
import (
"context"
"go-common/app/admin/main/search/model"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoArchiveVideoScore(t *testing.T) {
convey.Convey("ArchiveVideoScore", t, func(ctx convey.C) {
var (
c = context.Background()
sp = &model.QueryParams{
QueryBody: &model.QueryBody{},
AppIDConf: &model.QueryConfDetail{
ESCluster: "",
},
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, debug, err := d.ArchiveVideoScore(c, sp)
ctx.Convey("Then err should be nil.res,debug should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(debug, convey.ShouldNotBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoArchiveScore(t *testing.T) {
convey.Convey("ArchiveScore", t, func(ctx convey.C) {
var (
c = context.Background()
sp = &model.QueryParams{
QueryBody: &model.QueryBody{},
AppIDConf: &model.QueryConfDetail{
ESCluster: "",
},
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, debug, err := d.ArchiveScore(c, sp)
ctx.Convey("Then err should be nil.res,debug should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(debug, convey.ShouldNotBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoTaskQaRandom(t *testing.T) {
convey.Convey("TaskQaRandom", t, func(ctx convey.C) {
var (
c = context.Background()
sp = &model.QueryParams{
QueryBody: &model.QueryBody{},
AppIDConf: &model.QueryConfDetail{
ESCluster: "",
},
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, debug, err := d.TaskQaRandom(c, sp)
ctx.Convey("Then err should be nil.res,debug should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(debug, convey.ShouldNotBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoEsportsContestsDate(t *testing.T) {
convey.Convey("EsportsContestsDate", t, func(ctx convey.C) {
var (
c = context.Background()
sp = &model.QueryParams{
QueryBody: &model.QueryBody{},
AppIDConf: &model.QueryConfDetail{
ESCluster: "pcie_pub_out01",
},
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//res, debug, err :=
d.EsportsContestsDate(c, sp)
ctx.Convey("Then err should be nil.res,debug should not be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
//ctx.So(debug, convey.ShouldNotBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoCreativeArchiveSearch(t *testing.T) {
convey.Convey("CreativeArchiveSearch", t, func(ctx convey.C) {
var (
c = context.Background()
sp = &model.QueryParams{
QueryBody: &model.QueryBody{
Where: &model.QueryBodyWhere{
EQ: map[string]interface{}{"mid": 1},
},
},
AppIDConf: &model.QueryConfDetail{
ESCluster: "ssd_pub_in01",
},
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//res, debug, err :=
d.CreativeArchiveSearch(c, sp)
ctx.Convey("Then err should be nil.res,debug should not be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
//ctx.So(debug, convey.ShouldNotBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoCreativeArchiveStaff(t *testing.T) {
convey.Convey("CreativeArchiveStaff", t, func(ctx convey.C) {
var (
c = context.Background()
sp = &model.QueryParams{
QueryBody: &model.QueryBody{
Where: &model.QueryBodyWhere{
Combo: []model.QueryBodyWhereCombo{
{
EQ: []map[string]interface{}{{"mid": 1}},
},
},
Like: []model.QueryBodyWhereLike{{
KWFields: []string{"title"},
KW: []string{"title"},
}},
},
},
AppIDConf: &model.QueryConfDetail{
ESCluster: "ssd_pub_in02",
},
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//res, debug, err :=
d.CreativeArchiveStaff(c, sp)
ctx.Convey("Then err should be nil.res,debug should not be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
//ctx.So(debug, convey.ShouldNotBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoCreativeArchiveApply(t *testing.T) {
convey.Convey("CreativeArchiveApply", t, func(ctx convey.C) {
var (
c = context.Background()
sp = &model.QueryParams{
QueryBody: &model.QueryBody{
Where: &model.QueryBodyWhere{
EQ: map[string]interface{}{"apply_staff.apply_staff_mid": "1"},
Like: []model.QueryBodyWhereLike{{
KWFields: []string{"title"},
KW: []string{"title"},
}},
},
},
AppIDConf: &model.QueryConfDetail{
ESCluster: "ssd_pub_in02",
},
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//res, debug, err :=
d.CreativeArchiveApply(c, sp)
ctx.Convey("Then err should be nil.res,debug should not be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
//ctx.So(debug, convey.ShouldNotBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,96 @@
package dao
import (
"context"
"go-common/app/admin/main/search/model"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoQueryConf(t *testing.T) {
convey.Convey("QueryConf", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
res, err := d.QueryConf(c)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoQueryBasic(t *testing.T) {
convey.Convey("QueryBasic", t, func(ctx convey.C) {
var (
c = context.Background()
sp = &model.QueryParams{
QueryBody: &model.QueryBody{},
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
mixedQuery, qbDebug := d.QueryBasic(c, sp)
ctx.Convey("Then mixedQuery,qbDebug should not be nil.", func(ctx convey.C) {
ctx.So(qbDebug, convey.ShouldNotBeNil)
ctx.So(mixedQuery, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoqueryBasicRange(t *testing.T) {
convey.Convey("queryBasicRange", t, func(ctx convey.C) {
var (
rangeMap map[string]string
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
rangeQuery, err := d.queryBasicRange(rangeMap)
ctx.Convey("Then err should be nil.rangeQuery should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(rangeQuery, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoqueryBasicLike(t *testing.T) {
convey.Convey("queryBasicLike", t, func(ctx convey.C) {
var (
likeMap = []model.QueryBodyWhereLike{}
business = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.queryBasicLike(likeMap, business)
ctx.Convey("Then err should be nil.likeQuery should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
//ctx.So(likeQuery, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoScroll(t *testing.T) {
convey.Convey("Scroll", t, func(ctx convey.C) {
var (
c = context.Background()
sp = &model.QueryParams{
QueryBody: &model.QueryBody{},
AppIDConf: &model.QueryConfDetail{
ESCluster: "",
},
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
//res, debug, err :=
d.Scroll(c, sp)
ctx.Convey("Then err should be nil.res,debug should not be nil.", func(ctx convey.C) {
//ctx.So(err, convey.ShouldBeNil)
//ctx.So(debug, convey.ShouldNotBeNil)
//ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}