Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,77 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"app_multiple_databus_test.go",
"app_single_test.go",
"config_offset_test.go",
"dao_test.go",
"es_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"app_databus.go",
"app_multiple.go",
"app_multiple_databus.go",
"app_single.go",
"config_asset.go",
"config_attr.go",
"config_business.go",
"config_offset.go",
"dao.go",
"es.go",
"hbase.go",
"sms.go",
],
importpath = "go-common/app/job/main/search/dao",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/model:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/queue/databus:go_default_library",
"//library/stat/prom:go_default_library",
"//library/time:go_default_library",
"//library/xstr:go_default_library",
"//vendor/gopkg.in/olivere/elastic.v5:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/search/dao/base:all-srcs",
"//app/job/main/search/dao/business:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,197 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AppDatabus single table consume databus.
type AppDatabus struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offset *model.LoopOffset
mapData []model.MapData
commits map[int32]*databus.Message
}
// NewAppDatabus .
func NewAppDatabus(d *Dao, appid string) (a *AppDatabus) {
a = &AppDatabus{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
dtb: d.DatabusPool[d.AttrPool[appid].Databus.Databus],
commits: make(map[int32]*databus.Message),
}
return
}
// Business return business.
func (a *AppDatabus) Business() string {
return a.attrs.Business
}
// InitIndex init index.
func (a *AppDatabus) InitIndex(c context.Context) {
if aliases, err := a.d.GetAliases(a.attrs.ESName, a.attrs.Index.IndexAliasPrefix); err != nil {
a.d.InitIndex(c, nil, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
} else {
a.d.InitIndex(c, aliases, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (a *AppDatabus) InitOffset(c context.Context) {
a.d.InitOffset(c, a.offset, a.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
a.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (a *AppDatabus) Offset(c context.Context) {
for {
offset, err := a.d.Offset(c, a.appid, a.attrs.Table.TablePrefix)
if err != nil {
log.Error("a.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
a.offset.SetReview(offset.ReviewID, offset.ReviewTime)
a.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (a *AppDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
a.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (a *AppDatabus) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(a.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-a.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", a.attrs.Databus)
break
}
m := &model.Message{}
a.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Table == a.attrs.Table.TablePrefix {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = a.d.JSON2map(m.New)
log.Info(fmt.Sprintf("%v: %+v", a.attrs.AppID, parseMap))
if err != nil {
log.Error("a.JSON2map error(%v)", err)
continue
}
a.mapData = append(a.mapData, parseMap)
}
}
if len(a.mapData) < a.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "dtb", []string{})
}
length = len(a.mapData)
return
}
// AllMessages .
func (a *AppDatabus) AllMessages(c context.Context) (length int, err error) {
rows, err := a.db.Query(c, a.attrs.DataSQL.SQLByID, a.offset.OffsetID, a.attrs.Other.Size)
log.Info("appid: %s allMessages Current offsetID: %d", a.appid, a.offset.OffsetID)
if err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := InitMapData(a.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
return
}
a.mapData = append(a.mapData, item)
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "db", []string{})
// offset
if v, ok := a.mapData[len(a.mapData)-1]["_id"]; ok && v != nil {
if v2, ok := v.(interface{}); ok {
a.offset.SetTempOffset((v2).(int64), "")
a.offset.SetRecoverTempOffset((v2).(int64), "")
} else {
log.Error("dtb.all._id interface error")
}
} else {
log.Error("dtb.all._id nil error")
}
}
length = len(a.mapData)
return
}
// BulkIndex .
func (a *AppDatabus) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
partData := a.mapData[start:end]
if a.d.c.Business.Index {
err = a.d.BulkDBData(c, a.attrs, writeEntityIndex, partData...)
} else {
err = a.d.BulkDatabusData(c, a.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (a *AppDatabus) Commit(c context.Context) (err error) {
if a.d.c.Business.Index {
err = a.d.CommitOffset(c, a.offset, a.attrs.AppID, a.attrs.Table.TablePrefix)
} else {
for k, cos := range a.commits {
if err = cos.Commit(); err != nil {
log.Error("appid(%s) commit error(%v)", a.attrs.AppID, err)
continue
}
delete(a.commits, k)
}
}
a.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (a *AppDatabus) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(a.attrs.Other.Sleep))
}
// Size return size.
func (a *AppDatabus) Size(c context.Context) int {
return a.attrs.Other.Size
}

View File

@@ -0,0 +1,185 @@
package dao
import (
"context"
"fmt"
"strconv"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
)
// AppMultiple .
type AppMultiple struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
offsets model.LoopOffsets
mapData []model.MapData
}
// NewAppMultiple .
func NewAppMultiple(d *Dao, appid string) (am *AppMultiple) {
am = &AppMultiple{
d: d,
appid: appid,
db: d.DBPool[d.AttrPool[appid].DBName],
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
}
for i := am.attrs.Table.TableFrom; i <= am.attrs.Table.TableTo; i++ {
am.offsets[i] = &model.LoopOffset{}
}
return
}
// Business return business
func (am *AppMultiple) Business() string {
return am.attrs.Business
}
// InitIndex .
func (am *AppMultiple) InitIndex(c context.Context) {
var (
indexAliasName string
indexEntityName string
)
aliases, err := am.d.GetAliases(am.attrs.ESName, am.attrs.Index.IndexAliasPrefix)
for i := am.attrs.Index.IndexFrom; i <= am.attrs.Index.IndexTo; i++ {
indexAliasName = fmt.Sprintf("%s%0"+am.attrs.Index.IndexZero+"d", am.attrs.Index.IndexAliasPrefix, i)
indexEntityName = fmt.Sprintf("%s%0"+am.attrs.Index.IndexZero+"d", am.attrs.Index.IndexEntityPrefix, i)
if err != nil {
am.d.InitIndex(c, nil, am.attrs.ESName, indexAliasName, indexEntityName, am.attrs.Index.IndexMapping)
} else {
am.d.InitIndex(c, aliases, am.attrs.ESName, indexAliasName, indexEntityName, am.attrs.Index.IndexMapping)
}
}
}
// InitOffset insert init value to offset.
func (am *AppMultiple) InitOffset(c context.Context) {
am.d.InitOffset(c, am.offsets[0], am.attrs, []string{})
}
// Offset .
func (am *AppMultiple) Offset(c context.Context) {
for i := am.attrs.Table.TableFrom; i < am.attrs.Table.TableTo; i++ {
offset, err := am.d.Offset(c, am.attrs.AppID, am.attrs.Table.TablePrefix+strconv.Itoa(i))
if err != nil {
log.Error("as.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
}
am.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
am.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (am *AppMultiple) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
am.offsets.SetRecoverOffsets(i, recoverID, recoverTime)
}
// IncrMessages .
func (am *AppMultiple) IncrMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
am.mapData = []model.MapData{}
for i := am.attrs.Table.TableFrom; i <= am.attrs.Table.TableTo; i++ {
if !am.offsets[i].IsLoop {
rows, err = am.db.Query(c, fmt.Sprintf(am.attrs.DataSQL.SQLByMTime, am.attrs.DataSQL.SQLFields, i), am.offsets[i].OffsetTime, am.attrs.Other.Size)
} else {
rows, err = am.db.Query(c, fmt.Sprintf(am.attrs.DataSQL.SQLByIDMTime, am.attrs.DataSQL.SQLFields, i), am.offsets[i].OffsetID, am.offsets[i].OffsetTime, am.attrs.Other.Size)
}
if err != nil {
log.Error("db.Query error(%v)", err)
continue
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(am.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("rows.Scan() error(%v)", err)
continue
}
tempList = append(tempList, item)
am.mapData = append(am.mapData, item)
}
rows.Close()
if len(tempList) > 0 {
UpdateOffsetByMap(am.offsets[i], tempList...)
}
}
if len(am.mapData) > 0 {
//fmt.Println("before", am.mapData)
am.mapData, err = am.d.ExtraData(c, am.mapData, am.attrs, "db", []string{})
//fmt.Println("after", am.mapData)
}
length = len(am.mapData)
return
}
// AllMessages .
func (am *AppMultiple) AllMessages(c context.Context) (length int, err error) {
am.mapData = []model.MapData{}
for i := am.attrs.Table.TableFrom; i <= am.attrs.Table.TableTo; i++ {
var rows *xsql.Rows
if rows, err = am.db.Query(c, fmt.Sprintf(am.attrs.DataSQL.SQLByID, am.attrs.DataSQL.SQLFields, i), am.offsets[i].OffsetID, am.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(am.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("IncrMessages rows.Scan() error(%v)", err)
continue
}
tempList = append(tempList, item)
am.mapData = append(am.mapData, item)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
am.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime())
}
}
if len(am.mapData) > 0 {
am.mapData, err = am.d.ExtraData(c, am.mapData, am.attrs, "db", []string{})
}
length = len(am.mapData)
return
}
// BulkIndex .
func (am *AppMultiple) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
if len(am.mapData) >= (start+1) && len(am.mapData) >= end {
partData := am.mapData[start:end]
err = am.d.BulkDBData(c, am.attrs, writeEntityIndex, partData...)
}
return
}
// Commit .
func (am *AppMultiple) Commit(c context.Context) (err error) {
for i := am.attrs.Table.TableFrom; i <= am.attrs.Table.TableTo; i++ {
if err = am.d.CommitOffset(c, am.offsets[i], am.attrs.AppID, am.attrs.Table.TablePrefix+strconv.Itoa(i)); err != nil {
log.Error("Commit error(%v)", err)
continue
}
}
am.mapData = []model.MapData{}
return
}
// Sleep .
func (am *AppMultiple) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(am.attrs.Other.Sleep))
}
// Size .
func (am *AppMultiple) Size(c context.Context) (size int) {
size = am.attrs.Other.Size
return
}

View File

@@ -0,0 +1,451 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AppMultipleDatabus .
type AppMultipleDatabus struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offsets model.LoopOffsets
mapData []model.MapData
tableName []string
indexNameSuffix []string
commits map[int32]*databus.Message
}
// IndexNameSuffix .
func (amd *AppMultipleDatabus) IndexNameSuffix(format string, startDate string) (res []string, err error) {
var (
sTime time.Time
eTime = time.Now()
)
sTime, err = time.Parse(format, startDate)
if err != nil {
log.Error("d.LogAuditIndexName(%v)", startDate)
return
}
resDict := map[string]bool{}
if strings.Contains(format, "02") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -1)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "week") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -7)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "01") {
// 1月31日时AddDate(0, -1, 0)会出现错误
year, month, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, month, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, -1, 0)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "2006") {
// 2月29日时AddDate(-1, 0, 0)会出现错误
year, _, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, 1, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(-1, 0, 0)
if sTime.After(eTime) {
break
}
}
}
for k := range resDict {
res = append(res, k)
}
return
}
func (amd *AppMultipleDatabus) getIndexName(format string, time time.Time) (index string) {
var (
week = map[int]string{
0: "0108",
1: "0916",
2: "1724",
3: "2531",
}
)
return strings.Replace(time.Format(format), "week", week[time.Day()/9], -1)
}
// NewAppMultipleDatabus .
func NewAppMultipleDatabus(d *Dao, appid string) (amd *AppMultipleDatabus) {
var err error
amd = &AppMultipleDatabus{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
tableName: []string{},
indexNameSuffix: []string{},
commits: make(map[int32]*databus.Message),
}
amd.db = d.DBPool[amd.attrs.DBName]
amd.dtb = d.DatabusPool[amd.attrs.Databus.Databus]
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" {
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
amd.tableName = append(amd.tableName, tableName)
amd.offsets[i] = &model.LoopOffset{}
}
} else {
var tableNameSuffix []string
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if tableNameSuffix, err = amd.IndexNameSuffix(tableFormat[0], tableFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range tableNameSuffix {
amd.tableName = append(amd.tableName, amd.attrs.Table.TablePrefix+v)
}
for i := range amd.tableName {
amd.offsets[i] = &model.LoopOffset{}
}
}
return
}
// Business return business.
func (amd *AppMultipleDatabus) Business() string {
return amd.attrs.Business
}
// InitIndex .
func (amd *AppMultipleDatabus) InitIndex(c context.Context) {
var (
err error
indexAliasName string
indexEntityName string
)
indexFormat := strings.Split(amd.attrs.Index.IndexFormat, ",")
aliases, aliasErr := amd.d.GetAliases(amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix)
if indexFormat[0] == "int" || indexFormat[0] == "single" {
for i := amd.attrs.Index.IndexFrom; i <= amd.attrs.Index.IndexTo; i++ {
// == "0" 有问题,不通用
if amd.attrs.Index.IndexZero == "0" {
indexAliasName = amd.attrs.Index.IndexAliasPrefix
indexEntityName = amd.attrs.Index.IndexEntityPrefix
} else {
indexAliasName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexAliasPrefix, i)
indexEntityName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexEntityPrefix, i)
}
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
}
}
} else {
if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range amd.indexNameSuffix {
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
}
}
}
}
// InitOffset insert init value to offset.
func (amd *AppMultipleDatabus) InitOffset(c context.Context) {
amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName)
}
// Offset .
func (amd *AppMultipleDatabus) Offset(c context.Context) {
for i, v := range amd.tableName {
offset, err := amd.d.Offset(c, amd.attrs.AppID, v)
if err != nil {
log.Error("amd.d.offset error(%v)", err)
time.Sleep(time.Second * 3)
}
amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime)
}
// IncrMessages .
func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-amd.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus)
break
}
m := &model.Message{}
amd.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if amd.attrs.Business == "creative_reply" {
r, _ := regexp.Compile("reply_\\d+")
if !r.MatchString(m.Table) {
continue
}
}
if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) ||
(amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = amd.d.JSON2map(m.New)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
// esports fav type filter
if amd.attrs.AppID == "esports_fav" {
if t, ok := parseMap["type"]; ok && t.(int64) != 10 {
continue
}
}
// playlist fav type and attr filter
if amd.attrs.AppID == "fav_playlist" {
if t, ok := parseMap["type"]; ok && t.(int64) != 2 {
continue
}
if t, ok := parseMap["attr"]; ok {
if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) {
continue
}
}
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, m.Table, parseMap)
if err != nil {
if amd.attrs.AppID == "creative_reply" {
continue
}
log.Error("amd.newParseMap error(%v)", err)
continue
}
amd.mapData = append(amd.mapData, newParseMap)
}
}
if len(amd.mapData) < amd.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "dtb")
return
}
// AllMessages .
func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) {
amd.mapData = []model.MapData{}
for i, v := range amd.tableName {
var (
rows *xsql.Rows
sql string
)
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i)
} else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i)
log.Info(sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size)
} else {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, v)
}
if rows, err = amd.db.Query(c, sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(amd.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AppMultipleDatabus.AllMessages rows.Scan() error(%v)", err)
continue
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, v, item)
if err != nil {
log.Error("amd.newParseMap error(%v)", err)
continue
}
tempList = append(tempList, newParseMap)
amd.mapData = append(amd.mapData, newParseMap)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
amd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime())
}
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "db", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "db")
return
}
// BulkIndex .
func (amd *AppMultipleDatabus) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := amd.mapData[start:end]
if amd.d.c.Business.Index {
err = amd.d.BulkDBData(c, amd.attrs, writeEntityIndex, partData...)
} else {
err = amd.d.BulkDatabusData(c, amd.attrs, writeEntityIndex, partData...)
}
return
}
// Commit .
func (amd *AppMultipleDatabus) Commit(c context.Context) (err error) {
if amd.d.c.Business.Index {
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" { // 兼容只传后缀,不传表名
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, tableName); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
}
} else {
for i, v := range amd.indexNameSuffix {
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, v); err != nil {
log.Error("Commit error(%v)", err)
continue
}
}
}
} else {
for k, c := range amd.commits {
if err = c.Commit(); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
delete(amd.commits, k)
}
}
amd.mapData = []model.MapData{}
return
}
// Sleep .
func (amd *AppMultipleDatabus) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(amd.attrs.Other.Sleep))
}
// Size .
func (amd *AppMultipleDatabus) Size(c context.Context) (size int) {
return amd.attrs.Other.Size
}
// indexField .
// func (amd *AppMultipleDatabus) indexField(c context.Context, tableName string) (fieldName string, fieldValue int) {
// suffix, _ := strconv.Atoi(strings.Split(tableName, "_")[2])
// s := strings.Split(amd.attrs.DataSQL.DataIndexSuffix, ";")
// v := strings.Split(s[1], ":")
// fieldName = v[0]
// indexNum, _ := strconv.Atoi(v[2])
// fieldValue = suffix + indexNum
// return
// }
// newParseMap .
func (amd *AppMultipleDatabus) newParseMap(c context.Context, table string, parseMap map[string]interface{}) (res map[string]interface{}, err error) {
res = parseMap
//TODO 实体索引写不进去
if (amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm") && !amd.d.c.Business.Index {
indexSuffix := strings.Split(table, "_")[2]
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
if _, ok := res["msg"]; ok {
// dm_content_
res["index_field"] = true // 删除ctime
res["index_id"] = fmt.Sprintf("%v", res["dmid"])
} else {
// dm_index_
res["index_id"] = fmt.Sprintf("%v", res["id"])
}
} else if amd.attrs.AppID == "dmreport" {
if ztime, ok := res["ctime"].(*interface{}); ok { // 数据库
if ctime, cok := (*ztime).(time.Time); cok {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
} else if ztime, ok := res["ctime"].(string); ok { // databus
var ctime time.Time
if ctime, err = time.Parse("2006-01-02 15:04:05", ztime); err == nil {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
}
} else if amd.attrs.AppID == "creative_reply" && !amd.d.c.Business.Index {
if replyType, ok := res["type"].(int64); ok {
if replyType == 1 || replyType == 12 || replyType == 14 {
} else {
err = fmt.Errorf("多余数据")
}
} else {
err = fmt.Errorf("错误数据")
}
} else if amd.attrs.Index.IndexSplit == "single" {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix
} else {
indexSuffix := string([]rune(table)[strings.Count(amd.attrs.Table.TablePrefix, "")-1:])
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
}
//dtb index_id
if amd.attrs.AppID == "favorite" && !amd.d.c.Business.Index {
if fid, ok := res["fid"].(int64); ok {
if oid, ok := res["oid"].(int64); ok {
res["index_id"] = fmt.Sprintf("%d_%d", fid, oid)
return
}
}
res["index_id"] = "err"
res["indexName"] = ""
}
return
}

View File

@@ -0,0 +1 @@
package dao

View File

@@ -0,0 +1,169 @@
package dao
import (
"context"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
)
// AppSingle .
type AppSingle struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
offset *model.LoopOffset
mapData []model.MapData
}
// NewAppSingle .
func NewAppSingle(d *Dao, appid string) (as *AppSingle) {
as = &AppSingle{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
}
return
}
// Business return business.
func (as *AppSingle) Business() string {
return as.attrs.Business
}
// InitIndex init index.
func (as *AppSingle) InitIndex(c context.Context) {
if aliases, err := as.d.GetAliases(as.attrs.ESName, as.attrs.Index.IndexAliasPrefix); err != nil {
as.d.InitIndex(c, nil, as.attrs.ESName, as.attrs.Index.IndexAliasPrefix, as.attrs.Index.IndexEntityPrefix, as.attrs.Index.IndexMapping)
} else {
as.d.InitIndex(c, aliases, as.attrs.ESName, as.attrs.Index.IndexAliasPrefix, as.attrs.Index.IndexEntityPrefix, as.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (as *AppSingle) InitOffset(c context.Context) {
as.d.InitOffset(c, as.offset, as.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
as.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (as *AppSingle) Offset(c context.Context) {
for {
offset, err := as.d.Offset(c, as.appid, as.attrs.Table.TablePrefix)
if err != nil {
log.Error("ac.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
as.offset.SetReview(offset.ReviewID, offset.ReviewTime)
as.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (as *AppSingle) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
as.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (as *AppSingle) IncrMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
//fmt.Println("start", as.offset.OffsetTime)
if !as.offset.IsLoop {
rows, err = as.db.Query(c, as.attrs.DataSQL.SQLByMTime, as.offset.OffsetTime, as.attrs.Other.Size)
} else {
rows, err = as.db.Query(c, as.attrs.DataSQL.SQLByIDMTime, as.offset.OffsetID, as.offset.OffsetTime, as.attrs.Other.Size)
}
if err != nil {
log.Error("db.Query error(%v)", err)
return
}
defer rows.Close()
tempPartList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(as.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("IncrMessages rows.Scan() error(%v)", err)
return
}
as.mapData = append(as.mapData, item)
tempPartList = append(tempPartList, item)
}
if len(as.mapData) > 0 {
// extra relevant data
as.mapData, err = as.d.ExtraData(c, as.mapData, as.attrs, "db", []string{})
// offset
UpdateOffsetByMap(as.offset, tempPartList...)
}
length = len(as.mapData)
return
}
// AllMessages .
func (as *AppSingle) AllMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
if rows, err = as.db.Query(c, as.attrs.DataSQL.SQLByID, as.offset.RecoverID, as.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := InitMapData(as.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
continue
}
as.mapData = append(as.mapData, item)
}
if len(as.mapData) > 0 {
// extra relevant data
as.mapData, err = as.d.ExtraData(c, as.mapData, as.attrs, "db", []string{})
// offset
if v, ok := as.mapData[len(as.mapData)-1]["_id"]; ok && v != nil {
if v2, ok := v.(interface{}); ok {
as.offset.SetTempOffset((v2).(int64), "")
as.offset.SetRecoverTempOffset((v2).(int64), "")
} else {
log.Error("single.all._id interface error")
}
} else {
log.Error("single.all._id nil error")
}
}
length = len(as.mapData)
return
}
// BulkIndex .
func (as *AppSingle) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
if len(as.mapData) >= (start+1) && len(as.mapData) >= end {
partData := as.mapData[start:end]
err = as.d.BulkDBData(c, as.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (as *AppSingle) Commit(c context.Context) (err error) {
err = as.d.CommitOffset(c, as.offset, as.attrs.AppID, as.attrs.Table.TablePrefix)
as.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (as *AppSingle) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(as.attrs.Other.Sleep))
}
// Size return size.
func (as *AppSingle) Size(c context.Context) int {
return as.attrs.Other.Size
}

View File

@@ -0,0 +1 @@
package dao

View File

@@ -0,0 +1,46 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["base_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["base.go"],
importpath = "go-common/app/job/main/search/dao/base",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/dao:go_default_library",
"//app/job/main/search/dao/business:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,57 @@
package base
import (
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
bsn "go-common/app/job/main/search/dao/business"
)
// Base .
type Base struct {
D *dao.Dao
C *conf.Config
}
// NewBase .
func NewBase(c *conf.Config) (b *Base) {
b = &Base{
C: c,
D: dao.New(c),
}
b.D.AppPool = b.newAppPool(b.D)
return
}
// newAppPool .
func (b *Base) newAppPool(d *dao.Dao) (pool map[string]dao.App) {
pool = make(map[string]dao.App)
for k, v := range d.BusinessPool {
switch v.IncrWay {
case "single":
pool[k] = dao.NewAppSingle(d, k)
case "multiple":
pool[k] = dao.NewAppMultiple(d, k)
case "dtb":
pool[k] = dao.NewAppDatabus(d, k)
case "multipleDtb":
pool[k] = dao.NewAppMultipleDatabus(d, k)
case "business":
switch k {
case "archive_video":
pool[k] = bsn.NewAvr(d, k, b.C)
case "avr_archive", "avr_video":
pool[k] = bsn.NewAvrArchive(d, k)
case "log_audit", "log_user_action":
pool[k] = bsn.NewLog(d, k)
case "dm_date":
pool[k] = bsn.NewDmDate(d, k)
case "aegis_resource":
pool[k] = bsn.NewAegisResource(d, k, b.C)
}
default:
// to do other thing
}
}
//fmt.Println("strace:app-pool>", pool)
return
}

View File

@@ -0,0 +1,30 @@
package base
import (
"flag"
"fmt"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
. "github.com/smartystreets/goconvey/convey"
)
func WithBase(f func(b *Base)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := NewBase(conf.Conf)
f(d)
}
}
func Test_NewAppPool(t *testing.T) {
Convey("newAppPool", t, WithBase(func(b *Base) {
pool := b.newAppPool(b.D)
fmt.Println(pool)
}))
}

View File

@@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"aegis_resource.go",
"archive_video_relation.go",
"avr_archive.go",
"dm_date.go",
"log.go",
],
importpath = "go-common/app/job/main/search/dao/business",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/dao:go_default_library",
"//app/job/main/search/model:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/queue/databus:go_default_library",
"//library/xstr:go_default_library",
"//vendor/gopkg.in/olivere/elastic.v5:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["business_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/dao:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)

View File

@@ -0,0 +1,208 @@
package business
import (
"context"
"encoding/json"
"fmt"
"time"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AegisResource single table consume databus.
type AegisResource struct {
d *dao.Dao
c *conf.Config
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offset *model.LoopOffset
mapData []model.MapData
commits map[int32]*databus.Message
}
// NewAppDatabus .
func NewAegisResource(d *dao.Dao, appid string, c *conf.Config) (a *AegisResource) {
a = &AegisResource{
d: d,
c: c,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
dtb: d.DatabusPool[d.AttrPool[appid].Databus.Databus],
commits: make(map[int32]*databus.Message),
}
return
}
// Business return business.
func (a *AegisResource) Business() string {
return a.attrs.Business
}
// InitIndex init index.
func (a *AegisResource) InitIndex(c context.Context) {
if aliases, err := a.d.GetAliases(a.attrs.ESName, a.attrs.Index.IndexAliasPrefix); err != nil {
a.d.InitIndex(c, nil, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
} else {
a.d.InitIndex(c, aliases, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (a *AegisResource) InitOffset(c context.Context) {
a.d.InitOffset(c, a.offset, a.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
a.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (a *AegisResource) Offset(c context.Context) {
for {
offset, err := a.d.Offset(c, a.appid, a.attrs.Table.TablePrefix)
if err != nil {
log.Error("a.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
a.offset.SetReview(offset.ReviewID, offset.ReviewTime)
a.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (a *AegisResource) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
a.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (a *AegisResource) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(a.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-a.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", a.attrs.Databus)
break
}
m := &model.Message{}
a.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Table == "resource" || m.Table == "resource_result" || m.Table == "net_flow_resource" {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = a.d.JSON2map(m.New)
if _, ok := parseMap["rid"]; ok {
parseMap["_id"] = parseMap["rid"]
parseMap["id"] = parseMap["rid"]
}
if _, sok := parseMap["state"]; m.Table != "resource_result" && sok {
delete(parseMap, "state")
}
log.Info(fmt.Sprintf("%v: %+v", a.attrs.AppID, parseMap))
if err != nil {
log.Error("a.JSON2map error(%v)", err)
continue
}
a.mapData = append(a.mapData, parseMap)
}
}
if len(a.mapData) < a.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "dtb", []string{})
}
length = len(a.mapData)
return
}
// AllMessages .
func (a *AegisResource) AllMessages(c context.Context) (length int, err error) {
rows, err := a.db.Query(c, a.attrs.DataSQL.SQLByID, a.offset.OffsetID, a.attrs.Other.Size)
log.Info("appid: %s allMessages Current offsetID: %d", a.appid, a.offset.OffsetID)
if err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(a.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
return
}
a.mapData = append(a.mapData, item)
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "db", []string{})
// offset
if v, ok := a.mapData[len(a.mapData)-1]["_id"]; ok && v != nil {
if v2, ok := v.(interface{}); ok {
a.offset.SetTempOffset((v2).(int64), "")
a.offset.SetRecoverTempOffset((v2).(int64), "")
} else {
log.Error("dtb.all._id interface error")
}
} else {
log.Error("dtb.all._id nil error")
}
}
length = len(a.mapData)
return
}
// BulkIndex .
func (a *AegisResource) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
partData := a.mapData[start:end]
if a.c.Business.Index {
err = a.d.BulkDBData(c, a.attrs, writeEntityIndex, partData...)
} else {
err = a.d.BulkDatabusData(c, a.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (a *AegisResource) Commit(c context.Context) (err error) {
if a.c.Business.Index {
err = a.d.CommitOffset(c, a.offset, a.attrs.AppID, a.attrs.Table.TablePrefix)
} else {
for k, cos := range a.commits {
if err = cos.Commit(); err != nil {
log.Error("appid(%s) commit error(%v)", a.attrs.AppID, err)
continue
}
delete(a.commits, k)
}
}
a.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (a *AegisResource) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(a.attrs.Other.Sleep))
}
// Size return size.
func (a *AegisResource) Size(c context.Context) int {
return a.attrs.Other.Size
}

View File

@@ -0,0 +1,197 @@
package business
import (
"context"
"encoding/json"
"time"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// Avr single table consume databus.
type Avr struct {
c *conf.Config
d *dao.Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offset *model.LoopOffset
mapData []model.MapData
commits map[int32]*databus.Message
}
// NewAvr .
func NewAvr(d *dao.Dao, appid string, c *conf.Config) (a *Avr) {
a = &Avr{
c: c,
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
dtb: d.DatabusPool[d.AttrPool[appid].Databus.Databus],
commits: make(map[int32]*databus.Message),
}
return
}
// Business return business.
func (a *Avr) Business() string {
return a.attrs.Business
}
// InitIndex init index.
func (a *Avr) InitIndex(c context.Context) {
if aliases, err := a.d.GetAliases(a.attrs.ESName, a.attrs.Index.IndexAliasPrefix); err != nil {
a.d.InitIndex(c, nil, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
} else {
a.d.InitIndex(c, aliases, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (a *Avr) InitOffset(c context.Context) {
a.d.InitOffset(c, a.offset, a.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
a.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (a *Avr) Offset(c context.Context) {
for {
offset, err := a.d.Offset(c, a.appid, a.attrs.Table.TablePrefix)
if err != nil {
log.Error("a.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
a.offset.SetReview(offset.ReviewID, offset.ReviewTime)
a.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (a *Avr) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
a.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (a *Avr) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(a.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-a.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", a.attrs.Databus)
break
}
m := &model.Message{}
a.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
//fmt.Println("origin msg", m)
//log.Info("origin msg: (%v)", m.Table, a.mapData)
if m.Table == a.attrs.Table.TablePrefix {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = a.d.JSON2map(m.New)
if err != nil {
log.Error("a.JSON2map error(%v)", err)
continue
}
a.mapData = append(a.mapData, parseMap)
}
}
if len(a.mapData) < a.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
//log.Info("origin msg: (%v)", a.mapData)
if len(a.mapData) > 0 {
//fmt.Println("before", a.mapData)
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "dtb", []string{"archive", "video", "audit", "ups"})
//fmt.Println("after", a.mapData)
log.Info("dtb msg: (%v)", a.mapData)
}
length = len(a.mapData)
return
}
// AllMessages .
func (a *Avr) AllMessages(c context.Context) (length int, err error) {
rows, err := a.db.Query(c, a.attrs.DataSQL.SQLByID, a.offset.RecoverID, a.attrs.Other.Size)
log.Info("appid: %s allMessages Current RecoverID: %d", a.appid, a.offset.RecoverID)
if err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(a.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
return
}
a.mapData = append(a.mapData, item)
}
if len(a.mapData) > 0 {
//fmt.Println("before", a.mapData)
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "db", []string{"audit", "ups"})
if v, ok := a.mapData[len(a.mapData)-1]["_id"]; ok {
a.offset.SetTempOffset(v.(int64), "")
a.offset.SetRecoverTempOffset(v.(int64), "")
}
}
length = len(a.mapData)
return
}
// BulkIndex .
func (a *Avr) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
if len(a.mapData) > 0 {
partData := a.mapData[start:end]
err = a.d.BulkDBData(c, a.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (a *Avr) Commit(c context.Context) (err error) {
if a.c.Business.Index {
err = a.d.CommitOffset(c, a.offset, a.attrs.AppID, a.attrs.Table.TablePrefix)
} else {
for k, cos := range a.commits {
if err = cos.Commit(); err != nil {
log.Error("appid(%s) commit error(%v)", a.attrs.AppID, err)
continue
}
delete(a.commits, k)
}
}
a.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (a *Avr) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(a.attrs.Other.Sleep))
}
// Size return size.
func (a *Avr) Size(c context.Context) int {
return a.attrs.Other.Size
}

View File

@@ -0,0 +1,259 @@
package business
import (
"context"
"fmt"
"time"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/xstr"
)
// AvrArchive .
type AvrArchive struct {
d *dao.Dao
appid string
attrs *model.Attrs
db *xsql.DB
offset *model.LoopOffset
mapData []model.MapData
}
// NewAvrArchive .
func NewAvrArchive(d *dao.Dao, appid string) (av *AvrArchive) {
av = &AvrArchive{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
}
return
}
// Business return business.
func (av *AvrArchive) Business() string {
return av.attrs.Business
}
// InitIndex init index.
func (av *AvrArchive) InitIndex(c context.Context) {
if aliases, err := av.d.GetAliases(av.attrs.ESName, av.attrs.Index.IndexAliasPrefix); err != nil {
av.d.InitIndex(c, nil, av.attrs.ESName, av.attrs.Index.IndexAliasPrefix, av.attrs.Index.IndexEntityPrefix, av.attrs.Index.IndexMapping)
} else {
av.d.InitIndex(c, aliases, av.attrs.ESName, av.attrs.Index.IndexAliasPrefix, av.attrs.Index.IndexEntityPrefix, av.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (av *AvrArchive) InitOffset(c context.Context) {
av.d.InitOffset(c, av.offset, av.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
av.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (av *AvrArchive) Offset(c context.Context) {
for {
offset, err := av.d.Offset(c, av.appid, av.attrs.Table.TablePrefix)
if err != nil {
log.Error("ac.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
av.offset.SetReview(offset.ReviewID, offset.ReviewTime)
av.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (av *AvrArchive) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
av.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (av *AvrArchive) IncrMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
log.Info("appid: %s IncrMessages Current OffsetTime: %s, OffsetID: %d", av.appid, av.offset.OffsetTime, av.offset.OffsetID)
if !av.offset.IsLoop {
rows, err = av.db.Query(c, av.attrs.DataSQL.SQLByMTime, av.offset.OffsetTime, av.attrs.Other.Size)
} else {
rows, err = av.db.Query(c, av.attrs.DataSQL.SQLByIDMTime, av.offset.OffsetID, av.offset.OffsetTime, av.attrs.Other.Size)
}
if err != nil {
log.Error("db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(av.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("IncrMessages rows.Scan() error(%v)", err)
return
}
av.mapData = append(av.mapData, item)
}
length = len(av.mapData)
if length > 0 {
// offset
dao.UpdateOffsetByMap(av.offset, av.mapData...)
// extra relevant data
length, err = av.extraData(c, "db", map[string]bool{"Avr": true})
}
return
}
// AllMessages .
func (av *AvrArchive) AllMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
log.Info("appid: %s allMessages Current RecoverID: %d", av.appid, av.offset.RecoverID)
if rows, err = av.db.Query(c, av.attrs.DataSQL.SQLByID, av.offset.RecoverID, av.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(av.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
continue
}
av.mapData = append(av.mapData, item)
}
length = len(av.mapData)
if length > 0 {
// offset
if av.mapData[length-1]["_id"] != nil {
v := av.mapData[length-1]["_id"]
if v2, ok := v.(*interface{}); ok {
av.offset.SetTempOffset((*v2).(int64), "")
av.offset.SetRecoverTempOffset((*v2).(int64), "")
}
}
// extra relevant data
length, err = av.extraData(c, "db", map[string]bool{"Avr": true})
}
return
}
// extraData extra data for appid
func (av *AvrArchive) extraData(c context.Context, way string, tags map[string]bool) (length int, err error) {
switch way {
case "db":
for i, item := range av.mapData {
item.TransData(av.attrs)
for k, v := range item {
av.mapData[i][k] = v
}
}
case "dtb":
for i, item := range av.mapData {
item.TransDtb(av.attrs)
av.mapData[i] = model.MapData{}
for k, v := range item {
av.mapData[i][k] = v
}
}
}
for _, ex := range av.attrs.DataExtras {
if _, ok := tags[ex.Tag]; !ok {
continue
}
switch ex.Type {
case "slice":
continue
//av.extraDataSlice(c, ex)
default:
length, _ = av.extraDataDefault(c, ex)
}
}
return
}
// extraData-default
func (av *AvrArchive) extraDataDefault(c context.Context, ex model.AttrDataExtra) (length int, err error) {
// filter ids from in_fields
var (
ids []int64
items map[int64]model.MapData
temp map[int64]model.MapData
)
cdtInField := ex.Condition["in_field"]
items = make(map[int64]model.MapData)
temp = make(map[int64]model.MapData)
for _, md := range av.mapData {
if v, ok := md[cdtInField]; ok {
ids = append(ids, v.(int64)) // 加去重
temp[v.(int64)] = md
}
}
// query extra data
if len(ids) > 0 {
var rows *xsql.Rows
rows, err = av.d.DBPool[ex.DBName].Query(c, fmt.Sprintf(ex.SQL, xstr.JoinInts(ids))+" and 1 = ? ", 1)
if err != nil {
log.Error("extraDataDefault db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := dao.InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataDefault rows.Scan() error(%v)", err)
continue
}
if v, ok := item[ex.InField]; ok {
if v2, ok := v.(*interface{}); ok {
item.TransData(av.attrs)
items[(*v2).(int64)] = item
}
}
}
rows.Close()
}
//fmt.Println("a.mapData", av.mapData, "ids", ids, "items", items)
// merge data
fds := []string{"_id", "cid", "vid", "aid", "v_ctime"}
av.mapData = []model.MapData{}
for k, item := range items {
if v, ok := temp[k]; ok {
for _, fd := range fds {
if f, ok := item[fd]; ok {
v[fd] = f
}
}
av.mapData = append(av.mapData, v)
}
}
length = len(av.mapData)
//fmt.Println("a.mapData:after", av.mapData)
return
}
// BulkIndex .
func (av *AvrArchive) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := av.mapData[start:end]
err = av.d.BulkDBData(c, av.attrs, writeEntityIndex, partData...)
return
}
// Commit commit offset.
func (av *AvrArchive) Commit(c context.Context) (err error) {
err = av.d.CommitOffset(c, av.offset, av.attrs.AppID, av.attrs.Table.TablePrefix)
av.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (av *AvrArchive) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(av.attrs.Other.Sleep))
}
// Size return size.
func (av *AvrArchive) Size(c context.Context) int {
return av.attrs.Other.Size
}

View File

@@ -0,0 +1,124 @@
package business
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
. "github.com/smartystreets/goconvey/convey"
)
func WithBusinessArv(f func(d *Avr)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := dao.New(conf.Conf)
bsn := NewAvr(d, "archive_video", conf.Conf)
f(bsn)
}
}
func Test_AvrRecover(t *testing.T) {
Convey("set recover", t, WithBusinessArv(func(d *Avr) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_AvrInitOffset(t *testing.T) {
Convey("test close", t, WithBusinessArv(func(d *Avr) {
d.InitOffset(context.TODO())
}))
}
func WithBusinessDmDate(f func(d *DmDate)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := dao.New(conf.Conf)
bsn := NewDmDate(d, "dm_search")
f(bsn)
}
}
func Test_DmDateRecover(t *testing.T) {
Convey("set recover", t, WithBusinessDmDate(func(d *DmDate) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_DmDateInitOffset(t *testing.T) {
Convey("test close", t, WithBusinessDmDate(func(d *DmDate) {
d.InitOffset(context.TODO())
}))
}
func WithBusinessLog(f func(d *Log)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := dao.New(conf.Conf)
bsn := NewLog(d, "log_audit")
f(bsn)
}
}
func Test_LogRecover(t *testing.T) {
Convey("set recover", t, WithBusinessLog(func(d *Log) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_LogInitOffset(t *testing.T) {
Convey("test close", t, WithBusinessLog(func(d *Log) {
d.InitOffset(context.TODO())
}))
}
func Test_LogInitIndex(t *testing.T) {
Convey("test init index", t, WithBusinessLog(func(d *Log) {
d.InitIndex(context.TODO())
}))
}
func Test_LogOffset(t *testing.T) {
Convey("test offset", t, WithBusinessLog(func(d *Log) {
d.Offset(context.TODO())
}))
}
func Test_LogSetRecover(t *testing.T) {
Convey("test set recover", t, WithBusinessLog(func(d *Log) {
d.SetRecover(context.TODO(), 0, "", 0)
}))
}
func Test_LogAllMessages(t *testing.T) {
Convey("test set recover", t, WithBusinessLog(func(d *Log) {
d.AllMessages(context.TODO())
}))
}

View File

@@ -0,0 +1,352 @@
package business
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
const (
minIDSQL = "SELECT id FROM dm_index_%03d WHERE ctime > ? ORDER BY id ASC LIMIT 1"
)
// DmDate .
type DmDate struct {
d *dao.Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offsets model.LoopOffsets
mapData []model.MapData
commits map[int32]*databus.Message
frontTwelveMonthDate string
tableName []string
oidDayMap map[string]string
}
// NewDmDate .
func NewDmDate(d *dao.Dao, appid string) (dd *DmDate) {
dd = &DmDate{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
commits: make(map[int32]*databus.Message),
frontTwelveMonthDate: "2017-08-01",
oidDayMap: make(map[string]string),
}
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
dd.offsets[i] = &model.LoopOffset{}
}
dd.db = d.DBPool[dd.attrs.DBName]
dd.dtb = d.DatabusPool[dd.attrs.Databus.Databus]
return
}
// Business return business.
func (dd *DmDate) Business() string {
return dd.attrs.Business
}
// InitIndex init index.
func (dd *DmDate) InitIndex(c context.Context) {
var (
indexAliasName string
indexEntityName string
)
aliases, err := dd.d.GetAliases(dd.attrs.ESName, dd.attrs.Index.IndexAliasPrefix)
now := time.Now()
for i := -12; i < 18; i++ {
newDate := now.AddDate(0, i, 0).Format("2006-01")
indexAliasName = dd.attrs.Index.IndexAliasPrefix + strings.Replace(newDate, "-", "_", -1)
indexEntityName = dd.attrs.Index.IndexEntityPrefix + strings.Replace(newDate, "-", "_", -1)
if err != nil {
dd.d.InitIndex(c, nil, dd.attrs.ESName, indexAliasName, indexEntityName, dd.attrs.Index.IndexMapping)
} else {
dd.d.InitIndex(c, aliases, dd.attrs.ESName, indexAliasName, indexEntityName, dd.attrs.Index.IndexMapping)
}
}
}
// InitOffset .
func (dd *DmDate) InitOffset(c context.Context) {
dd.d.InitOffset(c, dd.offsets[0], dd.attrs, dd.tableName)
log.Info("in InitOffset")
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
var (
id int64
err error
row *xsql.Row
)
row = dd.db.QueryRow(c, fmt.Sprintf(minIDSQL, i), dd.frontTwelveMonthDate)
if err = row.Scan(&id); err != nil {
if err == xsql.ErrNoRows {
log.Info("in ErrNoRows")
err = nil
} else {
log.Info("row.Scan error(%v)", err)
log.Error("row.Scan error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
}
log.Info("here i am %d", i)
dd.offsets[i] = &model.LoopOffset{}
dd.offsets[i].OffsetID = id
}
log.Info("InitOffset over")
}
// Offset get offset.
func (dd *DmDate) Offset(c context.Context) {
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+dd.attrs.Table.TableZero+"d", dd.attrs.Table.TablePrefix, i)
offset, err := dd.d.Offset(c, dd.attrs.AppID, tableName)
if err != nil {
log.Error("dd.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
}
dd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
dd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (dd *DmDate) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
}
// IncrMessages .
func (dd *DmDate) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(dd.attrs.Databus.Ticker)))
defer ticker.Stop()
timeStr := time.Now().Format("2006-01-02")
t, _ := time.ParseInLocation("2006-01-02", timeStr, time.Local)
tomorrowZeroTimestamp := t.AddDate(0, 0, 1).Unix()
nowTimestamp := time.Now().Unix()
if tomorrowZeroTimestamp-nowTimestamp < 180 {
dd.oidDayMap = nil
dd.oidDayMap = make(map[string]string)
}
for {
select {
case msg, ok := <-dd.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", dd.attrs.Databus)
break
}
m := &model.Message{}
dd.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Action == "insert" && strings.HasPrefix(m.Table, "dm_index") {
var parseMap map[string]interface{}
parseMap, err = dd.d.JSON2map(m.New)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
newParseMap := dd.newDtbParseMap(c, parseMap)
indexID := newParseMap["index_id"].(string)
indexName := newParseMap["index_name"].(string)
if _, exists := dd.oidDayMap[indexID]; exists {
continue
}
dd.oidDayMap[indexID] = indexName
dd.mapData = append(dd.mapData, newParseMap)
}
if len(dd.mapData) < dd.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(dd.mapData) > 0 {
dd.mapData, err = dd.d.ExtraData(c, dd.mapData, dd.attrs, "dtb", []string{})
}
length = len(dd.mapData)
//amd.d.extraData(c, amd, "dtb")
return
}
// AllMessages .
func (dd *DmDate) AllMessages(c context.Context) (length int, err error) {
dd.mapData = []model.MapData{}
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
var rows *xsql.Rows
if dd.offsets[i].OffsetID == 0 {
continue
}
if rows, err = dd.db.Query(c, fmt.Sprintf(dd.attrs.DataSQL.SQLByID, dd.attrs.DataSQL.SQLFields, i), dd.offsets[i].OffsetID, dd.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := dao.InitMapData(dd.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("appMultipleDatabus.AllMessages rows.Scan() error(%v)", err)
continue
}
newParseMap := dd.newParseMap(c, item)
ctime, ok := newParseMap["ctime"].(*interface{})
if ok {
dbTime := (*ctime).(time.Time)
dbTimeStr := dbTime.Format("2006-01-02")
t1, err1 := time.Parse("2006-01-02", dd.frontTwelveMonthDate)
t2, err2 := time.Parse("2006-01-02", dbTimeStr)
if err1 != nil || err2 != nil || t1.After(t2) {
continue
}
} else {
continue
}
tempList = append(tempList, newParseMap)
dd.mapData = append(dd.mapData, newParseMap)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
dd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrCTime())
}
}
length = len(dd.mapData)
if length > 0 {
dd.mapData, err = dd.d.ExtraData(c, dd.mapData, dd.attrs, "db", []string{})
}
log.Info("length is %d", length)
return
}
// BulkIndex .
func (dd *DmDate) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := dd.mapData[start:end]
// if dd.d.GetConfig(c).Business.Index {
// err = dd.d.BulkDBData(c, dd.attrs, partData...)
// } else {
// err = dd.d.BulkDatabusData(c, dd.attrs, partData...)
// }
err = dd.d.BulkDBData(c, dd.attrs, writeEntityIndex, partData...)
return
}
// Commit commit offset.
func (dd *DmDate) Commit(c context.Context) (err error) {
if dd.d.GetConfig(c).Business.Index {
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
tOffset := dd.offsets[i]
if tOffset.TempOffsetID != 0 {
tOffset.OffsetID = tOffset.TempOffsetID
}
if tOffset.TempOffsetTime != "" {
tOffset.OffsetTime = tOffset.TempOffsetTime
}
tableName := fmt.Sprintf("%s%0"+dd.attrs.Table.TableZero+"d", dd.attrs.Table.TablePrefix, i)
if err = dd.d.CommitOffset(c, tOffset, dd.attrs.AppID, tableName); err != nil {
log.Error("appMultipleDatabus.Commit error(%v)", err)
continue
}
}
} else {
for k, c := range dd.commits {
if err = c.Commit(); err != nil {
log.Error("appMultipleDatabus.Commit error(%v)", err)
continue
}
delete(dd.commits, k)
}
}
dd.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (dd *DmDate) Sleep(c context.Context) {
}
// Size return size.
func (dd *DmDate) Size(c context.Context) int {
return 0
}
// newParseMap .
func (dd *DmDate) newParseMap(c context.Context, parseMap map[string]interface{}) (res map[string]interface{}) {
res = parseMap
indexName, strID := "", ""
if res["month"] != nil {
if month, ok := res["month"].(*interface{}); ok {
mth := strings.Replace(dd.b2s((*month).([]uint8)), "-", "_", -1)
indexName = "dm_date_" + mth
}
}
if res["date"] != nil {
if date, ok := res["date"].(*interface{}); ok {
dte := strings.Replace(dd.b2s((*date).([]uint8)), "-", "_", -1)
if oid, ok := res["oid"].(*interface{}); ok {
strID = strconv.FormatInt((*oid).(int64), 10) + "_" + dte
}
}
}
res["index_name"] = indexName
res["index_id"] = strID
return
}
// newDtbParseMap .
func (dd *DmDate) newDtbParseMap(c context.Context, parseMap map[string]interface{}) (res map[string]interface{}) {
res = parseMap
indexName, strID, mth, dte, id := "", "", "", "", ""
if res["ctime"] != nil {
if ctime, ok := res["ctime"].(string); ok {
t, _ := time.Parse("2006-01-02 15:04:05", ctime)
mth = t.Format("2006-01")
dte = t.Format("2006-01-02")
indexName = "dm_date_" + strings.Replace(mth, "-", "_", -1)
}
}
if res["oid"] != nil {
if oid, ok := res["oid"].(int64); ok {
strOid := strconv.FormatInt(oid, 10)
strID = strOid + "_" + strings.Replace(dte, "-", "_", -1)
}
}
if res["id"] != nil {
if newID, ok := res["id"].(int64); ok {
id = strconv.Itoa(int(newID))
}
}
for k := range res {
if k == "id" || k == "oid" {
continue
}
delete(res, k)
}
res["index_name"] = indexName
res["index_id"] = strID
res["month"] = mth
res["date"] = dte
res["id"] = id
return
}
// bs2 []uint8 to string.
func (dd *DmDate) b2s(bs []uint8) string {
b := make([]byte, len(bs))
for i, v := range bs {
b[i] = byte(v)
}
return string(b)
}

View File

@@ -0,0 +1,355 @@
package business
import (
"bytes"
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
"go-common/library/log"
"go-common/library/log/infoc"
"go-common/library/queue/databus"
"gopkg.in/olivere/elastic.v5"
)
const _sql = "SELECT id, index_format, index_version, index_cluster, additional_mapping, data_center FROM digger_"
// Log .
type Log struct {
d *dao.Dao
appid string
attrs *model.Attrs
databus *databus.Databus
infoC *infoc.Infoc
infoCField []string
mapData []model.MapData
commits map[int32]*databus.Message
business map[int]*info
week map[int]string
additionalMapping map[int]map[string]string
defaultMapping map[string]string
mapping map[int]map[string]string
}
type info struct {
Format string
Cluster string
Version string
DataCenter int8
}
// NewLog .
func NewLog(d *dao.Dao, appid string) (l *Log) {
l = &Log{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
databus: d.DatabusPool[appid],
infoC: d.InfoCPool[appid],
infoCField: []string{},
mapData: []model.MapData{},
commits: map[int32]*databus.Message{},
business: map[int]*info{},
additionalMapping: map[int]map[string]string{},
mapping: map[int]map[string]string{},
week: map[int]string{
0: "0107",
1: "0815",
2: "1623",
3: "2431",
},
}
switch appid {
case "log_audit":
l.defaultMapping = map[string]string{
"uname": "string",
"uid": "string",
"business": "string",
"type": "string",
"oid": "string",
"action": "string",
"ctime": "time",
"int_0": "int",
"int_1": "int",
"int_2": "int",
"str_0": "string",
"str_1": "string",
"str_2": "string",
"extra_data": "string",
}
l.infoCField = []string{"uname", "uid", "business", "type", "oid", "action", "ctime",
"int_0", "int_1", "int_2", "str_0", "str_1", "str_2", "str_3", "str_4", "extra_data"}
case "log_user_action":
l.defaultMapping = map[string]string{
"mid": "string",
"platform": "string",
"build": "string",
"buvid": "string",
"business": "string",
"type": "string",
"oid": "string",
"action": "string",
"ip": "string",
"ctime": "time",
"int_0": "int",
"int_1": "int",
"int_2": "int",
"str_0": "string",
"str_1": "string",
"str_2": "string",
"extra_data": "string",
}
l.infoCField = []string{"mid", "platform", "build", "buvid", "business", "type", "oid", "action", "ip", "ctime",
"int_0", "int_1", "int_2", "str_0", "str_1", "str_2", "extra_data"}
default:
log.Error("log appid error(%v)", appid)
return
}
rows, err := d.SearchDB.Query(context.TODO(), _sql+appid)
if err != nil {
log.Error("log Query error(%v)", appid)
return
}
defer rows.Close()
for rows.Next() {
var (
id int
additionalMapping string
)
info := &info{}
if err = rows.Scan(&id, &info.Format, &info.Version, &info.Cluster, &additionalMapping, &info.DataCenter); err != nil {
log.Error("Log New DB (%v)(%v)", id, err)
continue
}
l.business[id] = info
if additionalMapping != "" {
var additionalMappingDict map[string]string
if err = json.Unmarshal([]byte(additionalMapping), &additionalMappingDict); err != nil {
log.Error("Log New Json (%v)(%v)", id, err)
continue
}
l.additionalMapping[id] = additionalMappingDict
}
}
for b := range l.business {
l.mapping[b] = map[string]string{}
for k, v := range l.defaultMapping {
l.mapping[b][k] = v
}
if a, ok := l.additionalMapping[b]; ok {
for k, v := range a {
l.mapping[b][k] = v
}
}
}
return
}
// Business return business.
func (l *Log) Business() string {
return l.attrs.Business
}
// InitIndex .
func (l *Log) InitIndex(c context.Context) {
}
// InitOffset .
func (l *Log) InitOffset(c context.Context) {
}
// Offset .
func (l *Log) Offset(c context.Context) {
}
// MapData .
func (l *Log) MapData(c context.Context) (mapData []model.MapData) {
return l.mapData
}
// Attrs .
func (l *Log) Attrs(c context.Context) (attrs *model.Attrs) {
return l.attrs
}
// SetRecover .
func (l *Log) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
}
// IncrMessages .
func (l *Log) IncrMessages(c context.Context) (length int, err error) {
var jErr error
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(l.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-l.databus.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", l.attrs.Databus)
break
}
l.commits[msg.Partition] = msg
var result map[string]interface{}
decoder := json.NewDecoder(bytes.NewReader(msg.Value))
decoder.UseNumber()
if jErr = decoder.Decode(&result); jErr != nil {
log.Error("appid(%v) json.Unmarshal(%s) error(%v)", l.appid, msg.Value, jErr)
continue
}
// json.Number转int64
for k, v := range result {
switch t := v.(type) {
case json.Number:
if result[k], jErr = t.Int64(); jErr != nil {
log.Error("appid(%v) log.bulkDatabusData.json.Number(%v)(%v)", l.appid, t, jErr)
}
}
}
l.mapData = append(l.mapData, result)
if len(l.mapData) < l.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
// todo: 额外的参数
length = len(l.mapData)
return
}
// AllMessages .
func (l *Log) AllMessages(c context.Context) (length int, err error) {
return
}
// BulkIndex .
func (l *Log) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
partData := l.mapData[start:end]
if err = l.bulkDatabusData(c, l.attrs, writeEntityIndex, partData...); err != nil {
log.Error("appid(%v) json.bulkDatabusData error(%v)", l.appid, err)
return
}
return
}
// Commit .
func (l *Log) Commit(c context.Context) (err error) {
for k, msg := range l.commits {
if err = msg.Commit(); err != nil {
log.Error("appid(%v) Commit error(%v)", l.appid, err)
continue
}
delete(l.commits, k)
}
l.mapData = []model.MapData{}
return
}
// Sleep .
func (l *Log) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(l.attrs.Other.Sleep))
}
// Size .
func (l *Log) Size(c context.Context) (size int) {
return l.attrs.Other.Size
}
func (l *Log) bulkDatabusData(c context.Context, attrs *model.Attrs, writeEntityIndex bool, bulkData ...model.MapData) (err error) {
var (
request elastic.BulkableRequest
bulkRequest map[string]*elastic.BulkService
businessID int
)
bulkRequest = map[string]*elastic.BulkService{}
for _, b := range bulkData {
indexName := ""
if business, ok := b["business"].(int64); ok {
businessID = int(business)
if v, ok := b["ctime"].(string); ok {
if cTime, timeErr := time.Parse("2006-01-02 15:04:05", v); timeErr == nil {
if info, ok := l.business[businessID]; ok {
suffix := strings.Replace(cTime.Format(info.Format), "week", l.week[cTime.Day()/8], -1) + "_" + info.Version
if !writeEntityIndex {
indexName = attrs.Index.IndexAliasPrefix + "_" + strconv.Itoa(businessID) + "_" + suffix
} else {
indexName = attrs.Index.IndexEntityPrefix + "_" + strconv.Itoa(businessID) + "_" + suffix
}
}
}
}
}
if indexName == "" {
log.Error("appid(%v) ac.d.bulkDatabusData business business(%v) data(%+v)", l.appid, b["business"], b)
continue
}
esCluster := l.business[businessID].Cluster // 上方已经判断l.business[businessID]是否存在
if _, ok := bulkRequest[esCluster]; !ok {
if _, eok := l.d.ESPool[esCluster]; eok {
bulkRequest[esCluster] = l.d.ESPool[esCluster].Bulk()
} else {
log.Error("appid(%v) ac.d.bulkDatabusData cluster no find error(%v)", l.appid, esCluster)
continue //忽略这条数据
}
}
//发送数据中心
if l.business[businessID].DataCenter == 1 {
arr := make([]interface{}, len(l.infoCField))
for i, f := range l.infoCField {
if v, ok := b[f]; ok {
arr[i] = fmt.Sprintf("%v", v)
}
}
if er := l.infoC.Info(arr...); er != nil {
log.Error("appid(%v) ac.infoC.Info error(%v)", l.appid, er)
}
}
//数据处理
for k, v := range b {
if t, ok := l.mapping[businessID][k]; ok {
switch t {
case "int_to_bin":
if item, ok := v.(int64); ok {
item := int(item)
arr := []string{}
for i := 0; item != 0; i++ {
if item&1 == 1 {
arr = append(arr, strconv.Itoa(item&1<<uint(i)))
}
item = item >> 1
}
b[k] = arr
} else {
delete(b, k)
}
case "array":
if arr, ok := v.([]interface{}); ok {
b[k] = arr
} else {
delete(b, k)
}
}
} else {
delete(b, k)
}
}
request = elastic.NewBulkIndexRequest().Index(indexName).Type(attrs.Index.IndexType).Doc(b)
bulkRequest[esCluster].Add(request)
}
for _, v := range bulkRequest {
if v.NumberOfActions() == 0 {
continue
}
if _, err = v.Do(c); err != nil {
log.Error("appid(%s) bulk error(%v)", attrs.AppID, err)
}
}
return
}

View File

@@ -0,0 +1,25 @@
package dao
// import (
// "context"
// "go-common/app/job/main/search/model"
// "go-common/database/sql"
// "go-common/xstr"
// )
// const (
// _getAssetSQL = "SELECT id, name, type, src FROM digger_asset where id in (?)"
// )
// func (d *Dao) getAsset(c context.Context, ids []int64) (res *model.SQLAsset, err error) {
// res = new(model.SQLAsset)
// row := d.SearchDB.QueryRow(c, _getAssetSQL, xstr.JoinInts(ids))
// if err = row.Scan(&res.ID, &res.Name, &res.Type, &res.Src); err != nil {
// if err == sql.ErrNoRows {
// err = nil
// res = nil
// }
// }
// return
// }

View File

@@ -0,0 +1,266 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/model"
"go-common/library/database/sql"
"go-common/library/log"
)
const (
_getAttrsSQL = "SELECT appid,db_name,es_name,table_prefix,table_format,index_prefix,index_version,index_format,index_type,index_id,index_mapping, " +
"data_index_suffix,review_num,review_time,sleep,size,business,data_fields,data_extra,sql_by_id,sql_by_mtime,sql_by_idmtime,databus_info,databus_index_id FROM digger_app WHERE appid=?"
)
type attr struct {
d *Dao
appID string
attrs *model.Attrs
}
func newAttr(d *Dao, appID string) (ar *attr) {
ar = &attr{
d: d,
appID: appID,
attrs: new(model.Attrs),
}
if err := ar.initAttrs(); err != nil {
//fmt.Println("strace:init>", err)
log.Error("d.initAttrs error (%v)", err)
}
return
}
func (ar *attr) initAttrs() (err error) {
var sqlAttrs *model.SQLAttrs
for {
if sqlAttrs, err = ar.getSQLAttrs(context.TODO()); err != nil || sqlAttrs == nil {
log.Error("d.Attrs error (%v)", err)
time.Sleep(time.Second * 3)
continue
}
break
}
// attr-src
ar.attrs.Business = sqlAttrs.Business
ar.attrs.AppID = sqlAttrs.AppID
ar.attrs.DBName = sqlAttrs.DBName
ar.attrs.ESName = sqlAttrs.ESName
ar.attrs.DtbName = sqlAttrs.DtbName
// attr-table
if err = ar.parseTable(sqlAttrs); err != nil {
err = fmt.Errorf("parseTable appid(%s) err(%v)", ar.appID, err)
return
}
// attr-index
if err = ar.parseIndex(sqlAttrs); err != nil {
err = fmt.Errorf("parseIndex appid(%s) err(%v)", ar.appID, err)
return
}
// attr-datasql
if err = ar.parseDataSQL(sqlAttrs); err != nil {
err = fmt.Errorf("parseDataSQL appid(%s) err(%v)", ar.appID, err)
return
}
// attr-sql
// attr-data_extra
if err = ar.parseExtraData(sqlAttrs); err != nil {
err = fmt.Errorf("parseExtraData appid(%s) err(%v)", ar.appID, err)
return
}
// attr-databus
if err = ar.parseDatabus(sqlAttrs); err != nil {
err = fmt.Errorf("parseDatabus appid(%s) err(%v)", ar.appID, err)
return
}
// attr-other
ar.attrs.Other = &model.AttrOther{
ReviewNum: sqlAttrs.ReviewNum,
ReviewTime: sqlAttrs.ReviewTime,
Sleep: sqlAttrs.Sleep,
Size: sqlAttrs.Size,
}
return
}
func (ar *attr) getSQLAttrs(c context.Context) (res *model.SQLAttrs, err error) {
res = new(model.SQLAttrs)
row := ar.d.SearchDB.QueryRow(c, _getAttrsSQL, ar.appID)
//fmt.Println("appID", ar.appID)
if err = row.Scan(&res.AppID, &res.DBName, &res.ESName, &res.TablePrefix, &res.TableFormat, &res.IndexAliasPrefix, &res.IndexVersion, &res.IndexFormat, &res.IndexType, &res.IndexID, &res.IndexMapping,
&res.DataIndexSuffix, &res.ReviewNum, &res.ReviewTime, &res.Sleep, &res.Size, &res.Business, &res.DataFields, &res.DataExtraInfo, &res.SQLByID, &res.SQLByMTime, &res.SQLByIDMTime, &res.DatabusInfo, &res.DatabusIndexID); err != nil {
if err == sql.ErrNoRows {
err = nil
res = nil
}
}
return
}
func (ar *attr) parseTable(sqlAttrs *model.SQLAttrs) (err error) {
table := new(model.AttrTable)
table.TablePrefix = sqlAttrs.TablePrefix
table.TableFormat = sqlAttrs.TableFormat
tableFormat := strings.Split(table.TableFormat, ",")
if len(tableFormat) != 5 {
err = fmt.Errorf("wrong tableForamt(%s)", tableFormat)
return
}
if table.TableSplit = tableFormat[0]; table.TableSplit != "single" {
if table.TableFrom, err = strconv.Atoi(tableFormat[1]); err != nil {
return
}
if table.TableTo, err = strconv.Atoi(tableFormat[2]); err != nil {
return
}
}
table.TableZero = tableFormat[3]
table.TableFixed = (tableFormat[4] == "fixed")
ar.attrs.Table = table
return
}
func (ar *attr) parseIndex(sqlAttrs *model.SQLAttrs) (err error) {
index := new(model.AttrIndex)
index.IndexAliasPrefix = sqlAttrs.IndexAliasPrefix
index.IndexEntityPrefix = sqlAttrs.IndexAliasPrefix + sqlAttrs.IndexVersion
index.IndexFormat = sqlAttrs.IndexFormat
index.IndexType = sqlAttrs.IndexType
index.IndexID = sqlAttrs.IndexID
index.IndexMapping = sqlAttrs.IndexMapping
indexFormat := strings.Split(index.IndexFormat, ",")
if len(indexFormat) != 5 {
err = fmt.Errorf("wrong indexFormat(%s)", indexFormat)
return
}
if index.IndexID == "base" {
err = fmt.Errorf("indexID Prohibition 'base' (%s)", indexFormat)
return
}
if index.IndexSplit = indexFormat[0]; index.IndexSplit != "single" {
if index.IndexFrom, err = strconv.Atoi(indexFormat[1]); err != nil {
return
}
if index.IndexTo, err = strconv.Atoi(indexFormat[2]); err != nil {
return
}
}
index.IndexZero = indexFormat[3]
index.IndexFixed = (indexFormat[4] == "fixed")
ar.attrs.Index = index
return
}
func (ar *attr) parseDataSQL(sqlAttrs *model.SQLAttrs) (err error) {
dataSQL := new(model.AttrDataSQL)
dataSQL.DataIndexFormatFields = make(map[string]string)
dataSQL.DataDtbFields = make(map[string][]string)
dataSQL.DataFieldsV2 = make(map[string]model.AttrDataFields)
dataSQL.DataIndexSuffix = sqlAttrs.DataIndexSuffix
dataSQL.DataFields = sqlAttrs.DataFields
dataSQL.DataExtraInfo = sqlAttrs.DataExtraInfo
if dataSQL.DataFields == "" {
return
}
p := []model.AttrDataFields{} //DataFieldsV2
sqlFields := []string{}
if e := json.Unmarshal([]byte(dataSQL.DataFields), &p); e != nil {
fields := strings.Split(dataSQL.DataFields, ",")
for _, v := range fields {
exp := strings.Split(v, ":")
indexFieldName := exp[0]
dataSQL.DataIndexFields = append(dataSQL.DataIndexFields, indexFieldName)
sqlFields = append(sqlFields, exp[1])
dataSQL.DataIndexFormatFields[indexFieldName] = exp[2]
if exp[3] == "n" {
dataSQL.DataIndexRemoveFields = append(dataSQL.DataIndexRemoveFields, indexFieldName)
}
}
} else {
// json方式
for _, v := range p {
dataSQL.DataFieldsV2[v.ESField] = v
dataSQL.DataIndexFields = append(dataSQL.DataIndexFields, v.ESField)
sqlFields = append(sqlFields, v.SQL)
dataSQL.DataIndexFormatFields[v.ESField] = v.Expect
if v.Stored == "n" {
dataSQL.DataIndexRemoveFields = append(dataSQL.DataIndexRemoveFields, v.ESField)
}
if v.InDtb == "y" {
dataSQL.DataDtbFields[v.Field] = append(dataSQL.DataDtbFields[v.Field], v.ESField)
}
}
}
//fmt.Println(dataSQL.DataDtbFields)
//sqlFields顺序和attr.DataIndexFields要一致
if (len(sqlFields) != len(dataSQL.DataIndexFields)) && (len(sqlFields) == 0 || len(dataSQL.DataIndexFields) == 0) {
log.Error("sqlFields and attr.DataIndexFields are different")
return
}
dataSQL.SQLFields = strings.Join(sqlFields, ",")
if ar.attrs.Table.TableSplit == "single" {
dataSQL.SQLByID = fmt.Sprintf(sqlAttrs.SQLByID, dataSQL.SQLFields)
dataSQL.SQLByMTime = fmt.Sprintf(sqlAttrs.SQLByMTime, dataSQL.SQLFields)
dataSQL.SQLByIDMTime = fmt.Sprintf(sqlAttrs.SQLByIDMTime, dataSQL.SQLFields)
} else {
dataSQL.SQLByID = sqlAttrs.SQLByID
dataSQL.SQLByMTime = sqlAttrs.SQLByMTime
dataSQL.SQLByIDMTime = sqlAttrs.SQLByIDMTime
}
ar.attrs.DataSQL = dataSQL
return
}
func (ar *attr) parseExtraData(sqlAttrs *model.SQLAttrs) (err error) {
if sqlAttrs.DataExtraInfo != "" {
err = json.Unmarshal([]byte(sqlAttrs.DataExtraInfo), &ar.attrs.DataExtras)
}
// append all format field from extra data
for _, v := range ar.attrs.DataExtras {
if v.FieldsStr == "" {
continue
}
fields := strings.Split(v.FieldsStr, ",")
for _, v := range fields {
exp := strings.Split(v, ":")
ar.attrs.DataSQL.DataIndexFormatFields[exp[0]] = exp[2]
}
}
return
}
func (ar *attr) parseDatabus(sqlAttrs *model.SQLAttrs) (err error) {
dtb := new(model.AttrDatabus)
if sqlAttrs.DatabusInfo != "" {
databusInfo := strings.Split(sqlAttrs.DatabusInfo, ",")
if len(databusInfo) != 3 {
err = fmt.Errorf("wrong databusInfo(%s)", databusInfo)
return
}
dtb.Databus = databusInfo[0]
if dtb.AggCount, err = strconv.Atoi(databusInfo[1]); err != nil {
return
}
if dtb.Ticker, err = strconv.Atoi(databusInfo[2]); err != nil {
return
}
}
if sqlAttrs.DatabusIndexID != "" {
databusIndexID := strings.Split(sqlAttrs.DatabusIndexID, ":")
if len(databusIndexID) != 2 {
err = fmt.Errorf("wrong databusIndexID(%s)", databusIndexID)
return
}
dtb.PrimaryID = databusIndexID[0]
dtb.RelatedID = databusIndexID[1]
}
ar.attrs.Databus = dtb
return
}

View File

@@ -0,0 +1,77 @@
package dao
import (
"context"
"encoding/json"
"errors"
"time"
"go-common/app/job/main/search/model"
"go-common/library/database/sql"
"go-common/library/log"
)
const (
_getBusinessSQL = "SELECT business, app_ids, asset_db, asset_es, asset_dtb FROM digger_business WHERE business=?"
)
type bns struct {
d *Dao
business string
bInfo *model.Bsn
}
func newBusiness(d *Dao, business string) (bs *bns, err error) {
bs = &bns{
d: d,
business: business,
bInfo: new(model.Bsn),
}
if err = bs.initBusiness(); err != nil {
log.Error("d.initBusiness error (%v)", err)
}
return
}
func (bs *bns) initBusiness() (err error) {
var sqlBusiness *model.SQLBusiness
for {
if sqlBusiness, err = bs.getBusiness(context.TODO()); err != nil {
log.Error("initBusiness error (%v)", err)
time.Sleep(time.Second * 3)
continue
}
break
}
if sqlBusiness == nil {
err = errors.New("initBusiness: " + bs.business + " not found in `digger_business`")
return
}
bs.bInfo.Business = sqlBusiness.Business
bs.bInfo.AppInfo = make([]model.BsnAppInfo, 0)
// business-appinfo
if sqlBusiness.AppIds != "" {
err = json.Unmarshal([]byte(sqlBusiness.AppIds), &bs.bInfo.AppInfo)
}
// business-assetdb
// business-assetes
// business-assedtb
return
}
func (bs *bns) getBusiness(c context.Context) (res *model.SQLBusiness, err error) {
res = new(model.SQLBusiness)
row := bs.d.SearchDB.QueryRow(c, _getBusinessSQL, bs.business)
if err = row.Scan(&res.Business, &res.AppIds, &res.AssetDB, &res.AssetES, &res.AssetDtb); err != nil {
log.Error("business row.Scan error(%v)", err)
if err == sql.ErrNoRows {
err = nil
res = nil
return
}
}
return
}

View File

@@ -0,0 +1,77 @@
package dao
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/model"
"go-common/library/database/sql"
"go-common/library/log"
xtime "go-common/library/time"
)
const (
_getOffsetSQL = "SELECT offset_incr_id,offset_incr_time,review_incr_id,review_icnr_time FROM digger_offset WHERE project=? AND table_name=?"
//TODO 有问题,字段少了
_updateOffsetSQL = "UPDATE digger_offset SET offset_incr_id=?,offset_incr_time=?,mtime=? WHERE project=? AND table_name=?"
//_initOffsetSQL = "INSERT INTO digger_offset(project,table_name,offset_incr_time,offset_recover_id,offset_recover_time) VALUES(?,?,?,?,?,?) " +
// "ON DUPLICATE KEY UPDATE offset_recover_id=?, offset_recover_time=?"
)
// Offset get offset
func (d *Dao) Offset(c context.Context, appid, tableName string) (res *model.Offset, err error) {
res = new(model.Offset)
row := d.SearchDB.QueryRow(c, _getOffsetSQL, appid, tableName)
if err = row.Scan(&res.OffID, &res.OffTime, &res.ReviewID, &res.ReviewTime); err != nil {
log.Error("OffsetID row.Scan error(%v)", err)
if err == sql.ErrNoRows {
err = nil
res.OffID = 1
res.OffTime = xtime.Time(time.Now().Unix())
return
}
log.Error("offset row.Scan error(%v)", err)
}
return
}
// updateOffset update offset
func (d *Dao) updateOffset(c context.Context, offset *model.LoopOffset, appid, tableName string) (err error) {
nowFormat := time.Now().Format("2006-01-02 15:04:05")
if _, err = d.SearchDB.Exec(c, _updateOffsetSQL, offset.OffsetID, offset.OffsetTime, nowFormat, appid, tableName); err != nil {
log.Error("updateOffset Exec() error(%v)", err)
}
return
}
// bulkInitOffset .
func (d *Dao) bulkInitOffset(c context.Context, offset *model.LoopOffset, attrs *model.Attrs, arr []string) (err error) {
var (
values = []string{}
nowFormat = time.Now().Format("2006-01-02 15:04:05")
insertOffsetSQL = "INSERT INTO digger_offset(project,table_name,table_suffix,offset_incr_time,offset_recover_id,offset_recover_time) VALUES"
)
if len(arr) == 0 {
for i := attrs.Table.TableFrom; i <= attrs.Table.TableTo; i++ {
if attrs.Table.TableTo == 0 {
arr = append(arr, attrs.Table.TablePrefix)
} else {
arr = append(arr, fmt.Sprintf("%s%0"+attrs.Table.TableZero+"d", attrs.Table.TablePrefix, i))
}
}
}
for _, v := range arr {
// TODO why???
// table := attrs.Table.TablePrefix + v
// value := "('" + attrs.AppID + "','" + table + "','" + v + "','" + nowFormat + "'," + strconv.FormatInt(offset.RecoverID, 10) + ",'" + offset.RecoverTime + "')"
value := "('" + attrs.AppID + "','" + v + "','" + attrs.Table.TablePrefix + "','" + nowFormat + "'," + strconv.FormatInt(offset.RecoverID, 10) + ",'" + offset.RecoverTime + "')"
values = append(values, value)
}
valueStr := strings.Join(values, ",") + " ON DUPLICATE KEY UPDATE offset_recover_id=VALUES(offset_recover_id),offset_recover_time=VALUES(offset_recover_time)"
bulkInserSQL := insertOffsetSQL + valueStr
_, err = d.SearchDB.Exec(c, bulkInserSQL)
return
}

View File

@@ -0,0 +1,64 @@
package dao
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/model"
. "github.com/smartystreets/goconvey/convey"
)
func WithCO(f func(d *Dao)) func() {
return func() {
dir, _ := filepath.Abs("../cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := New(conf.Conf)
f(d)
}
}
func Test_Offset(t *testing.T) {
Convey("Test_Offset", t, WithCO(func(d *Dao) {
var (
err error
c = context.TODO()
)
d.Offset(c, "", "")
So(err, ShouldBeNil)
}))
}
func Test_UpdateOffset(t *testing.T) {
Convey("Test_UpdateOffset", t, WithCO(func(d *Dao) {
var (
err error
c = context.TODO()
offset = &model.LoopOffset{}
)
d.updateOffset(c, offset, "", "")
So(err, ShouldBeNil)
}))
}
func Test_BulkInitOffset(t *testing.T) {
Convey("Test_BulkInitOffset", t, WithCO(func(d *Dao) {
var (
c = context.TODO()
err error
offset = &model.LoopOffset{}
attrs = &model.Attrs{
Table: &model.AttrTable{},
}
)
attrs.Table.TableFrom = 0
attrs.Table.TableTo = 0
err = d.bulkInitOffset(c, offset, attrs, []string{})
So(err, ShouldBeNil)
}))
}

View File

@@ -0,0 +1,665 @@
package dao
import (
"bytes"
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/model"
"go-common/library/xstr"
// "go-common/database/hbase"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/log/infoc"
"go-common/library/queue/databus"
"go-common/library/stat/prom"
"gopkg.in/olivere/elastic.v5"
)
var errorsCount = prom.BusinessErrCount
const (
// business
// search db name. for table attr,offset,manager.
_searchDB = "search"
)
// App .
type App interface {
Business() string
InitIndex(c context.Context)
InitOffset(c context.Context)
Offset(c context.Context)
SetRecover(c context.Context, recoverID int64, recoverTime string, i int)
IncrMessages(c context.Context) (length int, err error)
AllMessages(c context.Context) (length int, err error)
BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error)
Commit(c context.Context) (err error)
Sleep(c context.Context)
Size(c context.Context) (size int)
}
// Dao .
type Dao struct {
c *conf.Config
// smsClient
sms *sms
// search db
SearchDB *xsql.DB
// hbase *hbase.Client
BusinessPool map[string]model.BsnAppInfo
AttrPool map[string]*model.Attrs
AppPool map[string]App
DBPool map[string]*xsql.DB
ESPool map[string]*elastic.Client
DatabusPool map[string]*databus.Databus
InfoCPool map[string]*infoc.Infoc
}
// New .
func New(c *conf.Config) (d *Dao) {
d = &Dao{
c: c,
DBPool: newDbPool(c),
}
// check search db
if d.SearchDB = d.DBPool[_searchDB]; d.SearchDB == nil {
panic("SearchDB must config")
}
d.sms = newSMS(d)
d.BusinessPool = newBusinessPool(d)
d.AttrPool = newAttrPool(d)
d.ESPool = newEsPool(c, d)
// consumer
d.DatabusPool = newDatabusPool(c, d)
d.InfoCPool = newInfoCPool(c, d)
return
}
// newDatabusPool .
func newDatabusPool(c *conf.Config, d *Dao) (pool map[string]*databus.Databus) {
pool = make(map[string]*databus.Databus)
if c.Business.Index {
return
}
for name := range d.BusinessPool {
if config, ok := c.Databus[name]; ok {
pool[name] = databus.New(config)
}
}
return
}
// newInfoCPool .
func newInfoCPool(c *conf.Config, d *Dao) (pool map[string]*infoc.Infoc) {
pool = map[string]*infoc.Infoc{}
if c.Business.Index {
return
}
for k := range d.BusinessPool {
if n, ok := c.InfoC[k]; ok {
pool[k] = infoc.New(n)
}
}
return
}
// newBusinessPool all appid info from one business
func newBusinessPool(d *Dao) (pool map[string]model.BsnAppInfo) {
pool = map[string]model.BsnAppInfo{}
if bns, err := newBusiness(d, d.c.Business.Env); err == nil {
for _, v := range bns.bInfo.AppInfo {
if v.AppID != "" {
pool[v.AppID] = v
}
}
}
return
}
// newAttrPool .
func newAttrPool(d *Dao) (pool map[string]*model.Attrs) {
pool = make(map[string]*model.Attrs)
for k := range d.BusinessPool {
ar := newAttr(d, k)
pool[k] = ar.attrs
}
//fmt.Println("strace:attr-pool>", pool)
return
}
// SetRecover set recover.
func (d *Dao) SetRecover(c context.Context, appid string, recoverID int64, recoverTime string, i int) {
d.AppPool[appid].SetRecover(c, recoverID, recoverTime, i)
}
// newDbPool db combo
func newDbPool(c *conf.Config) (pool map[string]*xsql.DB) {
pool = make(map[string]*xsql.DB)
for dbName, config := range c.DB {
pool[dbName] = xsql.NewMySQL(config)
}
return
}
// newEsCluster cluster action
func newEsPool(c *conf.Config, d *Dao) (esCluster map[string]*elastic.Client) {
esCluster = make(map[string]*elastic.Client)
for esName, e := range c.Es {
if client, err := elastic.NewClient(elastic.SetURL(e.Addr...)); err == nil {
esCluster[esName] = client
} else {
d.PromError("es:集群连接失败", "cluster: %s, %v", esName, err)
if err := d.SendSMS(fmt.Sprintf("[search-job]%s集群连接失败", esName)); err != nil {
d.PromError("es:集群连接短信失败", "cluster: %s, %v", esName, err)
}
}
}
return
}
// PromError .
func (d *Dao) PromError(name string, format string, args ...interface{}) {
errorsCount.Incr(name)
log.Error(format, args)
}
// Close close dao
func (d *Dao) Close() {
for _, db := range d.DBPool {
db.Close()
}
}
// Ping health of db.
func (d *Dao) Ping(c context.Context) (err error) {
// TODO 循环ping
if err = d.SearchDB.Ping(c); err != nil {
d.PromError("db:ping", "")
return
}
if err = d.pingESCluster(c); err != nil {
d.PromError("es:ping", "d.pingESCluster error(%v)", err)
return
}
return
}
// GetAliases get all aliases by indexAliasPrefix
func (d *Dao) GetAliases(esName, indexAliasPrefix string) (aliases map[string]bool, err error) {
aliases = map[string]bool{}
if _, ok := d.ESPool[esName]; !ok {
log.Error("GetAliases 集群不存在 (%s)", esName)
return
}
if aliasesRes, err := d.ESPool[esName].Aliases().Index(indexAliasPrefix + "*").Do(context.TODO()); err != nil {
log.Error("GetAliases(%s*) failed", indexAliasPrefix)
} else {
for _, indexDetails := range aliasesRes.Indices {
for _, v := range indexDetails.Aliases {
if v.AliasName != "" {
aliases[v.AliasName] = true
}
}
}
}
return
}
// InitIndex create entity indecies & aliases if necessary
func (d *Dao) InitIndex(c context.Context, aliases map[string]bool, esName, indexAliasName, indexEntityName, indexMapping string) {
if indexMapping == "" {
log.Error("indexEntityName(%s) mapping is epmty", indexEntityName)
return
}
for {
exists, err := d.ESPool[esName].IndexExists(indexEntityName).Do(c)
if err != nil {
time.Sleep(time.Second * 3)
continue
}
if !exists {
if _, err := d.ESPool[esName].CreateIndex(indexEntityName).Body(indexMapping).Do(c); err != nil {
log.Error("indexEntityName(%s) create err(%v)", indexEntityName, err)
time.Sleep(time.Second * 3)
continue
}
}
break
}
// add aliases if necessary
if aliases != nil && indexAliasName != indexEntityName {
if _, ok := aliases[indexAliasName]; !ok {
if _, err := d.ESPool[esName].Alias().Add(indexEntityName, indexAliasName).Do(context.TODO()); err != nil {
log.Error("indexEntityName(%s) failed to add alias indexAliasName(%s) err(%v)", indexEntityName, indexAliasName, err)
}
}
}
}
// InitOffset init offset to offset table .
func (d *Dao) InitOffset(c context.Context, offset *model.LoopOffset, attrs *model.Attrs, arr []string) {
for {
if err := d.bulkInitOffset(c, offset, attrs, arr); err != nil {
log.Error("project(%s) initOffset(%v)", attrs.AppID, err)
time.Sleep(time.Second * 3)
continue
}
break
}
}
// InitMapData init each field struct
func InitMapData(fields []string) (item model.MapData, row []interface{}) {
item = make(map[string]interface{})
for _, v := range fields {
item[v] = new(interface{})
}
for _, v := range fields {
row = append(row, item[v])
}
return
}
// UpdateOffsetByMap .
func UpdateOffsetByMap(offsets *model.LoopOffset, mapData ...model.MapData) {
var (
id int64
mtime string
)
length := len(mapData)
if length == 0 {
return
}
offsetTime := offsets.OffsetTime
lastRes := mapData[length-1]
id = lastRes.PrimaryID()
lastMtime := lastRes.StrMTime()
//fmt.Println("real", lastMtime, id, offsets.OffsetID)
if (id != offsets.OffsetID) && (offsetTime == lastMtime) {
offsets.IsLoop = true
} else {
if offsets.IsLoop {
for _, p := range mapData {
tempMtime := p.StrMTime()
if tempMtime == offsetTime {
continue
}
id = p.PrimaryID()
mtime = tempMtime
break
}
} else {
mtime = lastMtime
}
offsets.IsLoop = false
}
offsets.SetTempOffset(id, mtime)
}
// CommitOffset .
func (d *Dao) CommitOffset(c context.Context, offset *model.LoopOffset, appid, tableName string) (err error) {
if offset.TempOffsetID != 0 {
offset.SetOffset(offset.TempOffsetID, "")
}
if offset.TempOffsetTime != "" {
offset.SetOffset(0, offset.TempOffsetTime)
}
if offset.TempRecoverID >= 0 {
offset.SetRecoverOffset(offset.TempRecoverID, "")
}
if offset.TempRecoverTime != "" {
offset.SetRecoverOffset(-1, offset.TempRecoverTime)
}
err = d.updateOffset(c, offset, appid, tableName)
return
}
// JSON2map json to map.
func (d *Dao) JSON2map(rowJSON json.RawMessage) (result map[string]interface{}, err error) {
decoder := json.NewDecoder(bytes.NewReader(rowJSON))
decoder.UseNumber()
if err = decoder.Decode(&result); err != nil {
log.Error("JSON2map.Unmarshal(%s) error(%v)", rowJSON, err)
return nil, err
}
// json.Number转int64
for k, v := range result {
switch t := v.(type) {
case json.Number:
if result[k], err = t.Int64(); err != nil {
log.Error("JSON2map.json.Number(%v)(%v)", t, err)
return nil, err
}
}
}
return
}
// ExtraData .
func (d *Dao) ExtraData(c context.Context, mapData []model.MapData, attrs *model.Attrs, way string, tags []string) (md []model.MapData, err error) {
md = mapData
switch way {
case "db":
for i, item := range mapData {
item.TransData(attrs)
for k, v := range item {
md[i][k] = v
}
}
case "dtb":
for i, item := range mapData {
item.TransDtb(attrs)
for k, v := range item {
md[i][k] = v
}
}
}
for _, ex := range attrs.DataExtras {
// db exists or not
if _, ok := d.DBPool[ex.DBName]; !ok {
log.Error("ExtraData d.DBPool excludes:%s", ex.DBName)
continue
}
if len(tags) != 0 {
for _, v := range tags {
if v != ex.Tag {
continue
}
switch ex.Type {
case "slice":
md, err = d.extraDataSlice(c, md, attrs, ex)
default:
md, err = d.extraDataDefault(c, md, attrs, ex)
}
}
} else {
switch ex.Type {
case "slice":
md, err = d.extraDataSlice(c, md, attrs, ex)
default:
md, err = d.extraDataDefault(c, md, attrs, ex)
}
}
}
return
}
// extraData-default
func (d *Dao) extraDataDefault(c context.Context, mapData []model.MapData, attrs *model.Attrs, ex model.AttrDataExtra) (md []model.MapData, err error) {
md = mapData
// filter ids from in_fields
var (
ids []int64
items map[int64]model.MapData
include []string
)
cdtInField := ex.Condition["in_field"]
items = make(map[int64]model.MapData)
if cld, ok := ex.Condition["include"]; ok {
include = strings.Split(cld, "=")
}
var rows *xsql.Rows
if cdtInFields := strings.Split(cdtInField, ","); len(cdtInFields) == 1 { //FIXME 支持主键多个条件定位一条数据
for _, m := range mapData {
if v, ok := m[cdtInField]; ok {
if len(include) >= 2 { //TODO 支持多种
if cldVal, ok := m[include[0]]; ok && strconv.FormatInt(cldVal.(int64), 10) == include[1] {
ids = append(ids, v.(int64))
}
} else {
ids = append(ids, v.(int64)) //TODO 加去重
}
}
}
// query extra data
//TODO 如果分表太多的业务单次循环size设置过大一下子来50万的数据where in一个表会拒绝请求或超时
if len(ids) > 0 {
if tableFormat := strings.Split(ex.TableFormat, ","); ex.TableFormat == "" || tableFormat[0] == "single" {
i := 0
flag := false
//TODO 缺点:耗内存
for {
var id []int64
if (i+1)*200 < len(ids) {
id = ids[i*200 : (i+1)*200]
} else {
id = ids[i*200:]
flag = true
}
rows, err = d.DBPool[ex.DBName].Query(c, fmt.Sprintf(ex.SQL, xstr.JoinInts(id))+" and 1 = ? ", 1)
if err != nil {
log.Error("extraDataDefault db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataDefault rows.Scan() error(%v)", err)
continue
}
if v, ok := item[ex.InField]; ok {
if v2, ok := v.(*interface{}); ok {
item.TransData(attrs)
items[(*v2).(int64)] = item
}
}
// fmt.Println(item)
}
rows.Close()
i++
if flag {
break
}
}
} else if tableFormat[0] == "int" {
formatData := make(map[int64][]int64)
var dbid = []int64{}
if len(tableFormat) >= 6 { // 弹幕举报根据文章id来分表 dmid进行匹配
for _, m := range mapData {
if v, ok := m[tableFormat[5]]; ok {
dbid = append(dbid, v.(int64)) // 加去重
}
}
} else {
dbid = ids
}
if len(dbid) != len(ids) {
log.Error("tableFormat[5] len error(%v)(%v)", len(dbid), len(ids))
return
}
for i := 0; i < len(ids); i++ {
d, e := strconv.ParseInt(tableFormat[2], 10, 64)
if e != nil {
log.Error("extraDataDefault strconv.Atoi() error(%v)", e)
continue
}
d = dbid[i] % (d + 1)
if d < 0 { //可能有脏数据
continue
}
formatData[d] = append(formatData[d], ids[i])
}
for v, k := range formatData {
rows, err = d.DBPool[ex.DBName].Query(c, fmt.Sprintf(ex.SQL, v, xstr.JoinInts(k))+" and 1 = ? ", 1)
if err != nil {
log.Error("extraDataDefaultTableFormat db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataDefaultTableFormat rows.Scan() error(%v)", err)
continue
}
if v, ok := item[ex.InField]; ok {
if v2, ok := v.(*interface{}); ok {
item.TransData(attrs)
items[(*v2).(int64)] = item
}
}
}
rows.Close()
}
}
}
// fmt.Println("ids:", ids, "items:", items)
// merge data
for i, m := range mapData {
if len(include) >= 2 { //TODO 支持多种
if cldVal, ok := m[include[0]]; !ok || strconv.FormatInt(cldVal.(int64), 10) != include[1] {
continue
}
}
if k, ok := m[cdtInField]; ok {
if item, ok := items[k.(int64)]; ok {
for _, v := range ex.RemoveFields {
delete(item, v)
}
item.TransData(attrs)
for k, v := range item {
md[i][k] = v
}
}
}
}
//fmt.Println(md)
} else {
for i, m := range mapData {
var value []interface{}
for _, v := range cdtInFields {
value = append(value, m[v])
}
rows, err = d.DBPool[ex.DBName].Query(c, ex.SQL, value...)
if err != nil {
log.Error("extraDataDefault db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataDefault rows.Scan() error(%v)", err)
continue
}
item.TransData(attrs)
for _, v := range ex.RemoveFields {
delete(item, v)
}
for k, v := range item {
md[i][k] = v
}
}
rows.Close()
}
}
return
}
// extraData-slice
func (d *Dao) extraDataSlice(c context.Context, mapData []model.MapData, attrs *model.Attrs, ex model.AttrDataExtra) (md []model.MapData, err error) {
md = mapData
// filter ids from in_fields
var (
ids []int64
items map[string]map[string][]interface{}
include []string
)
cdtInField := ex.Condition["in_field"]
items = make(map[string]map[string][]interface{})
sliceFields := strings.Split(ex.SliceField, ",")
if cld, ok := ex.Condition["include"]; ok {
include = strings.Split(cld, "=")
}
for _, m := range mapData {
if v, ok := m[cdtInField]; ok {
if len(include) >= 2 { //TODO 支持多种
if cldVal, ok := m[include[0]]; ok && strconv.FormatInt(cldVal.(int64), 10) == include[1] {
ids = append(ids, v.(int64))
}
} else {
ids = append(ids, v.(int64)) //TODO 加去重
}
}
}
// query extra data
if len(ids) > 0 {
var rows *xsql.Rows
rows, err = d.DBPool[ex.DBName].Query(c, fmt.Sprintf(ex.SQL, xstr.JoinInts(ids))+" and 1 = ? ", 1)
if err != nil {
log.Error("extraDataSlice db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataSlice rows.Scan() error(%v)", err)
continue
}
if v, ok := item[ex.InField]; ok {
if v2, ok := v.(*interface{}); ok {
var key string
switch (*v2).(type) {
case int, int8, int16, int32, int64:
key = strconv.FormatInt((*v2).(int64), 10)
case []uint, []uint8, []uint16, []uint32, []uint64:
key = string((*v2).([]byte))
}
for _, sf := range sliceFields {
if _, ok := items[key]; !ok {
items[key] = make(map[string][]interface{})
}
var res interface{}
if v3, ok := item[sf].(*interface{}); ok {
switch (*v3).(type) {
case []uint, []uint8, []uint16, []uint32, []uint64:
res = string((*v3).([]byte))
default:
res = v3
}
}
items[key][sf] = append(items[key][sf], res)
}
}
}
}
rows.Close()
}
//log.Info("items:%v", items)
// merge data
for i, m := range mapData {
if len(include) >= 2 { //TODO 支持多种
if cldVal, ok := m[include[0]]; !ok || strconv.FormatInt(cldVal.(int64), 10) != include[1] {
continue
}
}
if v, ok := m[cdtInField]; ok {
if item, ok := items[strconv.FormatInt(v.(int64), 10)]; ok {
for _, sf := range sliceFields {
if list, ok := item[sf]; ok {
md[i][sf] = list
}
}
} else {
for _, sf := range sliceFields {
md[i][sf] = []int64{}
}
}
}
}
// for _, v := range md {
// log.Info("md:%v", v)
// }
return
}
// GetConfig .
func (d *Dao) GetConfig(c context.Context) *conf.Config {
return d.c
}

View File

@@ -0,0 +1,59 @@
package dao
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
. "github.com/smartystreets/goconvey/convey"
)
func WithDao(f func(d *Dao)) func() {
return func() {
dir, _ := filepath.Abs("../cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := New(conf.Conf)
f(d)
}
}
func Test_Reply(t *testing.T) {
Convey("open app", t, WithDao(func(d *Dao) {
var (
err error
c = context.TODO()
)
err = d.Ping(c)
So(err, ShouldBeNil)
}))
}
func Test_SetRecover(t *testing.T) {
Convey("set recover", t, WithDao(func(d *Dao) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, "archive_video", 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_Close(t *testing.T) {
Convey("test close", t, WithDao(func(d *Dao) {
d.Close()
}))
}
func Test_SendSMS(t *testing.T) {
Convey("test send sms", t, WithDao(func(d *Dao) {
var err error
err = d.SendSMS("test sms")
So(err, ShouldBeNil)
}))
}

View File

@@ -0,0 +1,243 @@
package dao
import (
"context"
"fmt"
"reflect"
"strconv"
"time"
"go-common/app/job/main/search/model"
"go-common/library/log"
"go-common/library/stat/prom"
"gopkg.in/olivere/elastic.v5"
)
// BulkDatabusData 写入es数据来自databus.
func (d *Dao) BulkDatabusData(c context.Context, attrs *model.Attrs, writeEntityIndex bool, bulkData ...model.MapData) (err error) {
// TODO 需要兼容
var (
request elastic.BulkableRequest
bulkRequest = d.ESPool[attrs.ESName].Bulk()
//indexField = ""
)
//s := strings.Split(attrs.DataSQL.DataIndexSuffix, ";")
//if len(s) >= 2 {
// indexField = strings.Split(s[1], ":")[0]
//}
for _, b := range bulkData {
var (
indexName string
strID string
)
if name, ok := b["index_name"]; ok {
if indexName, ok = name.(string); ok {
delete(b, "index_name")
} else {
log.Error("dao.es.BulkDBData index_name err")
continue
}
} else {
if !writeEntityIndex {
indexName, _ = b.Index(attrs)
} else {
_, indexName = b.Index(attrs)
}
}
if id, ok := b["index_id"]; ok {
if strID, ok = id.(string); !ok {
log.Error("es.BulkDBData.strID(%v)", id)
continue
}
} else {
if strID, ok = b.StrID(attrs); !ok {
log.Error("es.BulkDBData.strID")
continue
}
}
if indexName == "" {
continue
}
for _, v := range attrs.DataSQL.DataIndexRemoveFields {
delete(b, v)
}
if _, ok := b["index_field"]; ok {
delete(b, "index_field")
//delete(b, indexField)
delete(b, "ctime")
delete(b, "mtime")
}
for k := range b {
if !d.Contain(k, attrs.DataSQL.DataIndexFormatFields) {
delete(b, k)
}
}
key := []string{}
for k := range b {
key = append(key, k)
}
for _, k := range key {
customType, ok := attrs.DataSQL.DataIndexFormatFields[k]
if ok {
switch customType {
case "ip":
switch b[k].(type) {
case float64:
ipFormat := b.InetNtoA(int64(b[k].(float64)))
b[k+"_format"] = ipFormat
case int64:
ipFormat := b.InetNtoA(b[k].(int64))
b[k+"_format"] = ipFormat
}
case "arr":
var arr []int
binaryAttributes := strconv.FormatInt(b[k].(int64), 2)
for i := len(binaryAttributes) - 1; i >= 0; i-- {
b := fmt.Sprintf("%c", binaryAttributes[i])
if b == "1" {
arr = append(arr, len(binaryAttributes)-i)
}
}
b[k+"_format"] = arr
case "bin":
var arr []int
binaryAttributes := strconv.FormatInt(b[k].(int64), 2)
for i := len(binaryAttributes) - 1; i >= 0; i-- {
b := fmt.Sprintf("%c", binaryAttributes[i])
if b == "1" {
arr = append(arr, len(binaryAttributes)-i)
}
}
b[k] = arr
case "workflow":
if state, ok := b[k].(int64); ok {
b["state"] = state & 15
b["business_state"] = state >> 4 & 15
delete(b, k)
}
case "time":
if v, ok := b[k].(string); ok {
if v == "0000-00-00 00:00:00" {
b[k] = "0001-01-01 00:00:00"
}
}
default:
// as long as you happy
}
}
}
if strID == "" {
request = elastic.NewBulkIndexRequest().Index(indexName).Type(attrs.Index.IndexType).Doc(b)
} else {
request = elastic.NewBulkUpdateRequest().Index(indexName).Type(attrs.Index.IndexType).Id(strID).Doc(b).DocAsUpsert(true)
}
//fmt.Println(request)
bulkRequest.Add(request)
}
if bulkRequest.NumberOfActions() == 0 {
return
}
now := time.Now()
// prom.BusinessInfoCount.Add("redis:bulk:doc", int64(bulkRequest.NumberOfActions()))
for i := 0; i < bulkRequest.NumberOfActions(); i++ {
prom.BusinessInfoCount.Incr("redis:bulk:doc")
}
if _, err = bulkRequest.Do(c); err != nil {
log.Error("appid(%s) bulk error(%v)", attrs.AppID, err)
}
prom.LibClient.Timing("redis:bulk", int64(time.Since(now)/time.Millisecond))
return
}
// BulkDBData 写入es数据来自db.
func (d *Dao) BulkDBData(c context.Context, attrs *model.Attrs, writeEntityIndex bool, bulkData ...model.MapData) (err error) {
var (
indexName string
strID string
request elastic.BulkableRequest
bulkRequest = d.ESPool[attrs.ESName].Bulk()
)
for _, b := range bulkData {
if name, ok := b["index_name"]; ok {
if indexName, ok = name.(string); ok {
delete(b, "index_name")
} else {
log.Error("dao.es.BulkDBData index_name err")
continue
}
} else {
if !writeEntityIndex {
indexName, _ = b.Index(attrs)
} else {
_, indexName = b.Index(attrs)
}
}
if id, ok := b["index_id"]; ok {
if strID, ok = id.(string); !ok {
log.Error("es.BulkDBData.strID(%v)", id)
continue
}
} else {
if strID, ok = b.StrID(attrs); !ok {
log.Error("es.BulkDBData.strID")
continue
}
}
if indexName == "" || strID == "" {
continue
}
//attr提供要去除掉的字段不往ES中写
for _, v := range attrs.DataSQL.DataIndexRemoveFields {
delete(b, v)
}
request = elastic.NewBulkUpdateRequest().Index(indexName).Type(attrs.Index.IndexType).Id(strID).Doc(b).DocAsUpsert(true).RetryOnConflict(3)
//fmt.Println(request)
bulkRequest.Add(request)
}
if bulkRequest.NumberOfActions() == 0 {
// 注意这里request格式问题会引起action为0
return
}
log.Info("insert number is %d", bulkRequest.NumberOfActions())
now := time.Now()
// prom.BusinessInfoCount.Add("redis:bulk:doc", int64(bulkRequest.NumberOfActions()))
for i := 0; i < bulkRequest.NumberOfActions(); i++ {
prom.BusinessInfoCount.Incr("redis:bulk:doc")
}
if _, err = bulkRequest.Do(c); err != nil {
log.Error("appid(%s) bulk error(%v)", attrs.AppID, err)
}
prom.LibClient.Timing("redis:bulk", int64(time.Since(now)/time.Millisecond))
return
}
// pingEsCluster ping es cluster
func (d *Dao) pingESCluster(ctx context.Context) (err error) {
//for name, client := range d.ESPool {
// if _, _, err = client.Ping(d.c.Es[name].Addr[0]).Do(ctx); err != nil {
// d.PromError("Es:Ping", "%s:Ping error(%v)", name, err)
// return
// }
//}
return
}
// Contain .
func (d *Dao) Contain(obj interface{}, target interface{}) bool {
targetValue := reflect.ValueOf(target)
switch reflect.TypeOf(target).Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < targetValue.Len(); i++ {
if targetValue.Index(i).Interface() == obj {
return true
}
}
case reflect.Map:
if targetValue.MapIndex(reflect.ValueOf(obj)).IsValid() {
return true
}
}
return false
}

View File

@@ -0,0 +1,78 @@
package dao
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/model"
. "github.com/smartystreets/goconvey/convey"
)
func WithES(f func(d *Dao)) func() {
return func() {
dir, _ := filepath.Abs("../cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := New(conf.Conf)
f(d)
}
}
func Test_WithES(t *testing.T) {
Convey("Test_WithES", t, WithES(func(d *Dao) {
var (
err error
c = context.TODO()
)
err = d.Ping(c)
So(err, ShouldBeNil)
}))
}
func Test_BulkDatabusData(t *testing.T) {
Convey("Test_BulkDatabusData", t, WithES(func(d *Dao) {
var (
err error
c = context.TODO()
attrs = &model.Attrs{
DataSQL: &model.AttrDataSQL{},
}
)
attrs.ESName = "archive"
attrs.DataSQL.DataIndexSuffix = ""
d.BulkDatabusData(c, attrs, false)
So(err, ShouldBeNil)
}))
}
func Test_BulkDBData(t *testing.T) {
Convey("Test_BulkDBData", t, WithES(func(d *Dao) {
var (
err error
c = context.TODO()
attrs = &model.Attrs{
DataSQL: &model.AttrDataSQL{},
}
)
attrs.ESName = "archive"
attrs.DataSQL.DataIndexSuffix = ""
d.BulkDBData(c, attrs, false)
So(err, ShouldBeNil)
}))
}
func Test_PingESCluster(t *testing.T) {
Convey("Test_PingESCluster", t, WithES(func(d *Dao) {
var (
c = context.TODO()
err error
)
err = d.pingESCluster(c)
So(err, ShouldBeNil)
}))
}

View File

@@ -0,0 +1,40 @@
package dao
// import (
// "context"
// "time"
// "go-common/app/job/main/search/model"
// "go-common/log"
// "golang/gohbase/hrpc"
// )
// // stat archive stat
// func (d *Dao) stat(c context.Context, tableName, startRow, endRow string, from, to uint64, limit int) (res []*model.HbaseArchiveStat, err error) {
// var (
// scan *hrpc.Scan
// results []*hrpc.Result
// ctx, cancel = context.WithTimeout(c, time.Duration(d.c.HBase.ReadsTimeout))
// )
// defer cancel()
// if scan, err = hrpc.NewScanRangeStr(ctx, tableName, startRow, endRow, from, to); err != nil {
// log.Error("d.hbase.stat hrpc.NewScanRangeStr table(%s) startRow(%s) endRow(%s) from(%d) to(%d) error(%v)", tableName, startRow, endRow, from, to, err)
// return
// }
// scan.SetLimit(limit)
// if results, err = d.hbase.Scan(ctx, scan); err != nil {
// log.Error("d.hbase.Scan error(%v)", err)
// return
// }
// for _, r := range results {
// for _, c := range r.Cells {
// oneRes := &model.HbaseArchiveStat{
// Row: string(c.Row),
// TimeStamp: uint64(*c.Timestamp),
// Value: string(c.Value),
// }
// res = append(res, oneRes)
// }
// }
// return
// }

View File

@@ -0,0 +1,72 @@
package dao
import (
"net/http"
"net/url"
"strconv"
"time"
"go-common/library/log"
)
const _smsURL = "http://ops-mng.bilibili.co/api/sendsms"
type sms struct {
d *Dao
client *http.Client
lastTime int64
interval int64
params *url.Values
}
func newSMS(d *Dao) (s *sms) {
s = &sms{
d: d,
client: &http.Client{},
lastTime: time.Now().Unix() - d.c.SMS.Interval, //如果不想让初始化的时候告警,把减号去掉
interval: d.c.SMS.Interval,
params: &url.Values{
"phone": []string{d.c.SMS.Phone},
"token": []string{d.c.SMS.Token},
},
}
return
}
// SendSMS .
func (d *Dao) SendSMS(msg string) (err error) {
if !d.sms.IntervalCheck() {
log.Error("发短信太频繁啦, msg%s", msg)
return
}
if err = d.sms.Send(msg); err != nil {
log.Error("发短信失败, msg%s, error(%v)", msg, err)
}
return
}
func (sms *sms) Send(msg string) (err error) {
var req *http.Request
sms.params.Set("message", msg)
if req, err = http.NewRequest("GET", _smsURL+"?"+sms.params.Encode(), nil); err != nil {
return
}
req.Header.Set("x1-bilispy-timeout", strconv.FormatInt(int64(time.Duration(1)/time.Millisecond), 10))
if _, err = sms.client.Do(req); err != nil {
log.Error("ops-mng sendsms url(%s) error(%v)", _smsURL+"?"+sms.params.Encode(), err)
}
return
}
// IntervalCheck accessible or not to send msg at present time
func (sms *sms) IntervalCheck() (send bool) {
now := time.Now().Unix()
if (now - sms.lastTime) >= sms.interval {
send = true
sms.lastTime = now
} else {
send = false
}
return
}