Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"aegis_resource.go",
"archive_video_relation.go",
"avr_archive.go",
"dm_date.go",
"log.go",
],
importpath = "go-common/app/job/main/search/dao/business",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/dao:go_default_library",
"//app/job/main/search/model:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/queue/databus:go_default_library",
"//library/xstr:go_default_library",
"//vendor/gopkg.in/olivere/elastic.v5:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["business_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/dao:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)

View File

@@ -0,0 +1,208 @@
package business
import (
"context"
"encoding/json"
"fmt"
"time"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AegisResource single table consume databus.
type AegisResource struct {
d *dao.Dao
c *conf.Config
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offset *model.LoopOffset
mapData []model.MapData
commits map[int32]*databus.Message
}
// NewAppDatabus .
func NewAegisResource(d *dao.Dao, appid string, c *conf.Config) (a *AegisResource) {
a = &AegisResource{
d: d,
c: c,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
dtb: d.DatabusPool[d.AttrPool[appid].Databus.Databus],
commits: make(map[int32]*databus.Message),
}
return
}
// Business return business.
func (a *AegisResource) Business() string {
return a.attrs.Business
}
// InitIndex init index.
func (a *AegisResource) InitIndex(c context.Context) {
if aliases, err := a.d.GetAliases(a.attrs.ESName, a.attrs.Index.IndexAliasPrefix); err != nil {
a.d.InitIndex(c, nil, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
} else {
a.d.InitIndex(c, aliases, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (a *AegisResource) InitOffset(c context.Context) {
a.d.InitOffset(c, a.offset, a.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
a.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (a *AegisResource) Offset(c context.Context) {
for {
offset, err := a.d.Offset(c, a.appid, a.attrs.Table.TablePrefix)
if err != nil {
log.Error("a.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
a.offset.SetReview(offset.ReviewID, offset.ReviewTime)
a.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (a *AegisResource) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
a.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (a *AegisResource) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(a.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-a.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", a.attrs.Databus)
break
}
m := &model.Message{}
a.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Table == "resource" || m.Table == "resource_result" || m.Table == "net_flow_resource" {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = a.d.JSON2map(m.New)
if _, ok := parseMap["rid"]; ok {
parseMap["_id"] = parseMap["rid"]
parseMap["id"] = parseMap["rid"]
}
if _, sok := parseMap["state"]; m.Table != "resource_result" && sok {
delete(parseMap, "state")
}
log.Info(fmt.Sprintf("%v: %+v", a.attrs.AppID, parseMap))
if err != nil {
log.Error("a.JSON2map error(%v)", err)
continue
}
a.mapData = append(a.mapData, parseMap)
}
}
if len(a.mapData) < a.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "dtb", []string{})
}
length = len(a.mapData)
return
}
// AllMessages .
func (a *AegisResource) AllMessages(c context.Context) (length int, err error) {
rows, err := a.db.Query(c, a.attrs.DataSQL.SQLByID, a.offset.OffsetID, a.attrs.Other.Size)
log.Info("appid: %s allMessages Current offsetID: %d", a.appid, a.offset.OffsetID)
if err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(a.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
return
}
a.mapData = append(a.mapData, item)
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "db", []string{})
// offset
if v, ok := a.mapData[len(a.mapData)-1]["_id"]; ok && v != nil {
if v2, ok := v.(interface{}); ok {
a.offset.SetTempOffset((v2).(int64), "")
a.offset.SetRecoverTempOffset((v2).(int64), "")
} else {
log.Error("dtb.all._id interface error")
}
} else {
log.Error("dtb.all._id nil error")
}
}
length = len(a.mapData)
return
}
// BulkIndex .
func (a *AegisResource) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
partData := a.mapData[start:end]
if a.c.Business.Index {
err = a.d.BulkDBData(c, a.attrs, writeEntityIndex, partData...)
} else {
err = a.d.BulkDatabusData(c, a.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (a *AegisResource) Commit(c context.Context) (err error) {
if a.c.Business.Index {
err = a.d.CommitOffset(c, a.offset, a.attrs.AppID, a.attrs.Table.TablePrefix)
} else {
for k, cos := range a.commits {
if err = cos.Commit(); err != nil {
log.Error("appid(%s) commit error(%v)", a.attrs.AppID, err)
continue
}
delete(a.commits, k)
}
}
a.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (a *AegisResource) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(a.attrs.Other.Sleep))
}
// Size return size.
func (a *AegisResource) Size(c context.Context) int {
return a.attrs.Other.Size
}

View File

@@ -0,0 +1,197 @@
package business
import (
"context"
"encoding/json"
"time"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// Avr single table consume databus.
type Avr struct {
c *conf.Config
d *dao.Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offset *model.LoopOffset
mapData []model.MapData
commits map[int32]*databus.Message
}
// NewAvr .
func NewAvr(d *dao.Dao, appid string, c *conf.Config) (a *Avr) {
a = &Avr{
c: c,
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
dtb: d.DatabusPool[d.AttrPool[appid].Databus.Databus],
commits: make(map[int32]*databus.Message),
}
return
}
// Business return business.
func (a *Avr) Business() string {
return a.attrs.Business
}
// InitIndex init index.
func (a *Avr) InitIndex(c context.Context) {
if aliases, err := a.d.GetAliases(a.attrs.ESName, a.attrs.Index.IndexAliasPrefix); err != nil {
a.d.InitIndex(c, nil, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
} else {
a.d.InitIndex(c, aliases, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (a *Avr) InitOffset(c context.Context) {
a.d.InitOffset(c, a.offset, a.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
a.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (a *Avr) Offset(c context.Context) {
for {
offset, err := a.d.Offset(c, a.appid, a.attrs.Table.TablePrefix)
if err != nil {
log.Error("a.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
a.offset.SetReview(offset.ReviewID, offset.ReviewTime)
a.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (a *Avr) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
a.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (a *Avr) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(a.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-a.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", a.attrs.Databus)
break
}
m := &model.Message{}
a.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
//fmt.Println("origin msg", m)
//log.Info("origin msg: (%v)", m.Table, a.mapData)
if m.Table == a.attrs.Table.TablePrefix {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = a.d.JSON2map(m.New)
if err != nil {
log.Error("a.JSON2map error(%v)", err)
continue
}
a.mapData = append(a.mapData, parseMap)
}
}
if len(a.mapData) < a.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
//log.Info("origin msg: (%v)", a.mapData)
if len(a.mapData) > 0 {
//fmt.Println("before", a.mapData)
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "dtb", []string{"archive", "video", "audit", "ups"})
//fmt.Println("after", a.mapData)
log.Info("dtb msg: (%v)", a.mapData)
}
length = len(a.mapData)
return
}
// AllMessages .
func (a *Avr) AllMessages(c context.Context) (length int, err error) {
rows, err := a.db.Query(c, a.attrs.DataSQL.SQLByID, a.offset.RecoverID, a.attrs.Other.Size)
log.Info("appid: %s allMessages Current RecoverID: %d", a.appid, a.offset.RecoverID)
if err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(a.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
return
}
a.mapData = append(a.mapData, item)
}
if len(a.mapData) > 0 {
//fmt.Println("before", a.mapData)
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "db", []string{"audit", "ups"})
if v, ok := a.mapData[len(a.mapData)-1]["_id"]; ok {
a.offset.SetTempOffset(v.(int64), "")
a.offset.SetRecoverTempOffset(v.(int64), "")
}
}
length = len(a.mapData)
return
}
// BulkIndex .
func (a *Avr) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
if len(a.mapData) > 0 {
partData := a.mapData[start:end]
err = a.d.BulkDBData(c, a.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (a *Avr) Commit(c context.Context) (err error) {
if a.c.Business.Index {
err = a.d.CommitOffset(c, a.offset, a.attrs.AppID, a.attrs.Table.TablePrefix)
} else {
for k, cos := range a.commits {
if err = cos.Commit(); err != nil {
log.Error("appid(%s) commit error(%v)", a.attrs.AppID, err)
continue
}
delete(a.commits, k)
}
}
a.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (a *Avr) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(a.attrs.Other.Sleep))
}
// Size return size.
func (a *Avr) Size(c context.Context) int {
return a.attrs.Other.Size
}

View File

@@ -0,0 +1,259 @@
package business
import (
"context"
"fmt"
"time"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/xstr"
)
// AvrArchive .
type AvrArchive struct {
d *dao.Dao
appid string
attrs *model.Attrs
db *xsql.DB
offset *model.LoopOffset
mapData []model.MapData
}
// NewAvrArchive .
func NewAvrArchive(d *dao.Dao, appid string) (av *AvrArchive) {
av = &AvrArchive{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
}
return
}
// Business return business.
func (av *AvrArchive) Business() string {
return av.attrs.Business
}
// InitIndex init index.
func (av *AvrArchive) InitIndex(c context.Context) {
if aliases, err := av.d.GetAliases(av.attrs.ESName, av.attrs.Index.IndexAliasPrefix); err != nil {
av.d.InitIndex(c, nil, av.attrs.ESName, av.attrs.Index.IndexAliasPrefix, av.attrs.Index.IndexEntityPrefix, av.attrs.Index.IndexMapping)
} else {
av.d.InitIndex(c, aliases, av.attrs.ESName, av.attrs.Index.IndexAliasPrefix, av.attrs.Index.IndexEntityPrefix, av.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (av *AvrArchive) InitOffset(c context.Context) {
av.d.InitOffset(c, av.offset, av.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
av.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (av *AvrArchive) Offset(c context.Context) {
for {
offset, err := av.d.Offset(c, av.appid, av.attrs.Table.TablePrefix)
if err != nil {
log.Error("ac.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
av.offset.SetReview(offset.ReviewID, offset.ReviewTime)
av.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (av *AvrArchive) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
av.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (av *AvrArchive) IncrMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
log.Info("appid: %s IncrMessages Current OffsetTime: %s, OffsetID: %d", av.appid, av.offset.OffsetTime, av.offset.OffsetID)
if !av.offset.IsLoop {
rows, err = av.db.Query(c, av.attrs.DataSQL.SQLByMTime, av.offset.OffsetTime, av.attrs.Other.Size)
} else {
rows, err = av.db.Query(c, av.attrs.DataSQL.SQLByIDMTime, av.offset.OffsetID, av.offset.OffsetTime, av.attrs.Other.Size)
}
if err != nil {
log.Error("db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(av.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("IncrMessages rows.Scan() error(%v)", err)
return
}
av.mapData = append(av.mapData, item)
}
length = len(av.mapData)
if length > 0 {
// offset
dao.UpdateOffsetByMap(av.offset, av.mapData...)
// extra relevant data
length, err = av.extraData(c, "db", map[string]bool{"Avr": true})
}
return
}
// AllMessages .
func (av *AvrArchive) AllMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
log.Info("appid: %s allMessages Current RecoverID: %d", av.appid, av.offset.RecoverID)
if rows, err = av.db.Query(c, av.attrs.DataSQL.SQLByID, av.offset.RecoverID, av.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(av.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
continue
}
av.mapData = append(av.mapData, item)
}
length = len(av.mapData)
if length > 0 {
// offset
if av.mapData[length-1]["_id"] != nil {
v := av.mapData[length-1]["_id"]
if v2, ok := v.(*interface{}); ok {
av.offset.SetTempOffset((*v2).(int64), "")
av.offset.SetRecoverTempOffset((*v2).(int64), "")
}
}
// extra relevant data
length, err = av.extraData(c, "db", map[string]bool{"Avr": true})
}
return
}
// extraData extra data for appid
func (av *AvrArchive) extraData(c context.Context, way string, tags map[string]bool) (length int, err error) {
switch way {
case "db":
for i, item := range av.mapData {
item.TransData(av.attrs)
for k, v := range item {
av.mapData[i][k] = v
}
}
case "dtb":
for i, item := range av.mapData {
item.TransDtb(av.attrs)
av.mapData[i] = model.MapData{}
for k, v := range item {
av.mapData[i][k] = v
}
}
}
for _, ex := range av.attrs.DataExtras {
if _, ok := tags[ex.Tag]; !ok {
continue
}
switch ex.Type {
case "slice":
continue
//av.extraDataSlice(c, ex)
default:
length, _ = av.extraDataDefault(c, ex)
}
}
return
}
// extraData-default
func (av *AvrArchive) extraDataDefault(c context.Context, ex model.AttrDataExtra) (length int, err error) {
// filter ids from in_fields
var (
ids []int64
items map[int64]model.MapData
temp map[int64]model.MapData
)
cdtInField := ex.Condition["in_field"]
items = make(map[int64]model.MapData)
temp = make(map[int64]model.MapData)
for _, md := range av.mapData {
if v, ok := md[cdtInField]; ok {
ids = append(ids, v.(int64)) // 加去重
temp[v.(int64)] = md
}
}
// query extra data
if len(ids) > 0 {
var rows *xsql.Rows
rows, err = av.d.DBPool[ex.DBName].Query(c, fmt.Sprintf(ex.SQL, xstr.JoinInts(ids))+" and 1 = ? ", 1)
if err != nil {
log.Error("extraDataDefault db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := dao.InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataDefault rows.Scan() error(%v)", err)
continue
}
if v, ok := item[ex.InField]; ok {
if v2, ok := v.(*interface{}); ok {
item.TransData(av.attrs)
items[(*v2).(int64)] = item
}
}
}
rows.Close()
}
//fmt.Println("a.mapData", av.mapData, "ids", ids, "items", items)
// merge data
fds := []string{"_id", "cid", "vid", "aid", "v_ctime"}
av.mapData = []model.MapData{}
for k, item := range items {
if v, ok := temp[k]; ok {
for _, fd := range fds {
if f, ok := item[fd]; ok {
v[fd] = f
}
}
av.mapData = append(av.mapData, v)
}
}
length = len(av.mapData)
//fmt.Println("a.mapData:after", av.mapData)
return
}
// BulkIndex .
func (av *AvrArchive) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := av.mapData[start:end]
err = av.d.BulkDBData(c, av.attrs, writeEntityIndex, partData...)
return
}
// Commit commit offset.
func (av *AvrArchive) Commit(c context.Context) (err error) {
err = av.d.CommitOffset(c, av.offset, av.attrs.AppID, av.attrs.Table.TablePrefix)
av.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (av *AvrArchive) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(av.attrs.Other.Sleep))
}
// Size return size.
func (av *AvrArchive) Size(c context.Context) int {
return av.attrs.Other.Size
}

View File

@@ -0,0 +1,124 @@
package business
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
. "github.com/smartystreets/goconvey/convey"
)
func WithBusinessArv(f func(d *Avr)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := dao.New(conf.Conf)
bsn := NewAvr(d, "archive_video", conf.Conf)
f(bsn)
}
}
func Test_AvrRecover(t *testing.T) {
Convey("set recover", t, WithBusinessArv(func(d *Avr) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_AvrInitOffset(t *testing.T) {
Convey("test close", t, WithBusinessArv(func(d *Avr) {
d.InitOffset(context.TODO())
}))
}
func WithBusinessDmDate(f func(d *DmDate)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := dao.New(conf.Conf)
bsn := NewDmDate(d, "dm_search")
f(bsn)
}
}
func Test_DmDateRecover(t *testing.T) {
Convey("set recover", t, WithBusinessDmDate(func(d *DmDate) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_DmDateInitOffset(t *testing.T) {
Convey("test close", t, WithBusinessDmDate(func(d *DmDate) {
d.InitOffset(context.TODO())
}))
}
func WithBusinessLog(f func(d *Log)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := dao.New(conf.Conf)
bsn := NewLog(d, "log_audit")
f(bsn)
}
}
func Test_LogRecover(t *testing.T) {
Convey("set recover", t, WithBusinessLog(func(d *Log) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_LogInitOffset(t *testing.T) {
Convey("test close", t, WithBusinessLog(func(d *Log) {
d.InitOffset(context.TODO())
}))
}
func Test_LogInitIndex(t *testing.T) {
Convey("test init index", t, WithBusinessLog(func(d *Log) {
d.InitIndex(context.TODO())
}))
}
func Test_LogOffset(t *testing.T) {
Convey("test offset", t, WithBusinessLog(func(d *Log) {
d.Offset(context.TODO())
}))
}
func Test_LogSetRecover(t *testing.T) {
Convey("test set recover", t, WithBusinessLog(func(d *Log) {
d.SetRecover(context.TODO(), 0, "", 0)
}))
}
func Test_LogAllMessages(t *testing.T) {
Convey("test set recover", t, WithBusinessLog(func(d *Log) {
d.AllMessages(context.TODO())
}))
}

View File

@@ -0,0 +1,352 @@
package business
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
const (
minIDSQL = "SELECT id FROM dm_index_%03d WHERE ctime > ? ORDER BY id ASC LIMIT 1"
)
// DmDate .
type DmDate struct {
d *dao.Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offsets model.LoopOffsets
mapData []model.MapData
commits map[int32]*databus.Message
frontTwelveMonthDate string
tableName []string
oidDayMap map[string]string
}
// NewDmDate .
func NewDmDate(d *dao.Dao, appid string) (dd *DmDate) {
dd = &DmDate{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
commits: make(map[int32]*databus.Message),
frontTwelveMonthDate: "2017-08-01",
oidDayMap: make(map[string]string),
}
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
dd.offsets[i] = &model.LoopOffset{}
}
dd.db = d.DBPool[dd.attrs.DBName]
dd.dtb = d.DatabusPool[dd.attrs.Databus.Databus]
return
}
// Business return business.
func (dd *DmDate) Business() string {
return dd.attrs.Business
}
// InitIndex init index.
func (dd *DmDate) InitIndex(c context.Context) {
var (
indexAliasName string
indexEntityName string
)
aliases, err := dd.d.GetAliases(dd.attrs.ESName, dd.attrs.Index.IndexAliasPrefix)
now := time.Now()
for i := -12; i < 18; i++ {
newDate := now.AddDate(0, i, 0).Format("2006-01")
indexAliasName = dd.attrs.Index.IndexAliasPrefix + strings.Replace(newDate, "-", "_", -1)
indexEntityName = dd.attrs.Index.IndexEntityPrefix + strings.Replace(newDate, "-", "_", -1)
if err != nil {
dd.d.InitIndex(c, nil, dd.attrs.ESName, indexAliasName, indexEntityName, dd.attrs.Index.IndexMapping)
} else {
dd.d.InitIndex(c, aliases, dd.attrs.ESName, indexAliasName, indexEntityName, dd.attrs.Index.IndexMapping)
}
}
}
// InitOffset .
func (dd *DmDate) InitOffset(c context.Context) {
dd.d.InitOffset(c, dd.offsets[0], dd.attrs, dd.tableName)
log.Info("in InitOffset")
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
var (
id int64
err error
row *xsql.Row
)
row = dd.db.QueryRow(c, fmt.Sprintf(minIDSQL, i), dd.frontTwelveMonthDate)
if err = row.Scan(&id); err != nil {
if err == xsql.ErrNoRows {
log.Info("in ErrNoRows")
err = nil
} else {
log.Info("row.Scan error(%v)", err)
log.Error("row.Scan error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
}
log.Info("here i am %d", i)
dd.offsets[i] = &model.LoopOffset{}
dd.offsets[i].OffsetID = id
}
log.Info("InitOffset over")
}
// Offset get offset.
func (dd *DmDate) Offset(c context.Context) {
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+dd.attrs.Table.TableZero+"d", dd.attrs.Table.TablePrefix, i)
offset, err := dd.d.Offset(c, dd.attrs.AppID, tableName)
if err != nil {
log.Error("dd.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
}
dd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
dd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (dd *DmDate) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
}
// IncrMessages .
func (dd *DmDate) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(dd.attrs.Databus.Ticker)))
defer ticker.Stop()
timeStr := time.Now().Format("2006-01-02")
t, _ := time.ParseInLocation("2006-01-02", timeStr, time.Local)
tomorrowZeroTimestamp := t.AddDate(0, 0, 1).Unix()
nowTimestamp := time.Now().Unix()
if tomorrowZeroTimestamp-nowTimestamp < 180 {
dd.oidDayMap = nil
dd.oidDayMap = make(map[string]string)
}
for {
select {
case msg, ok := <-dd.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", dd.attrs.Databus)
break
}
m := &model.Message{}
dd.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Action == "insert" && strings.HasPrefix(m.Table, "dm_index") {
var parseMap map[string]interface{}
parseMap, err = dd.d.JSON2map(m.New)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
newParseMap := dd.newDtbParseMap(c, parseMap)
indexID := newParseMap["index_id"].(string)
indexName := newParseMap["index_name"].(string)
if _, exists := dd.oidDayMap[indexID]; exists {
continue
}
dd.oidDayMap[indexID] = indexName
dd.mapData = append(dd.mapData, newParseMap)
}
if len(dd.mapData) < dd.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(dd.mapData) > 0 {
dd.mapData, err = dd.d.ExtraData(c, dd.mapData, dd.attrs, "dtb", []string{})
}
length = len(dd.mapData)
//amd.d.extraData(c, amd, "dtb")
return
}
// AllMessages .
func (dd *DmDate) AllMessages(c context.Context) (length int, err error) {
dd.mapData = []model.MapData{}
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
var rows *xsql.Rows
if dd.offsets[i].OffsetID == 0 {
continue
}
if rows, err = dd.db.Query(c, fmt.Sprintf(dd.attrs.DataSQL.SQLByID, dd.attrs.DataSQL.SQLFields, i), dd.offsets[i].OffsetID, dd.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := dao.InitMapData(dd.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("appMultipleDatabus.AllMessages rows.Scan() error(%v)", err)
continue
}
newParseMap := dd.newParseMap(c, item)
ctime, ok := newParseMap["ctime"].(*interface{})
if ok {
dbTime := (*ctime).(time.Time)
dbTimeStr := dbTime.Format("2006-01-02")
t1, err1 := time.Parse("2006-01-02", dd.frontTwelveMonthDate)
t2, err2 := time.Parse("2006-01-02", dbTimeStr)
if err1 != nil || err2 != nil || t1.After(t2) {
continue
}
} else {
continue
}
tempList = append(tempList, newParseMap)
dd.mapData = append(dd.mapData, newParseMap)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
dd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrCTime())
}
}
length = len(dd.mapData)
if length > 0 {
dd.mapData, err = dd.d.ExtraData(c, dd.mapData, dd.attrs, "db", []string{})
}
log.Info("length is %d", length)
return
}
// BulkIndex .
func (dd *DmDate) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := dd.mapData[start:end]
// if dd.d.GetConfig(c).Business.Index {
// err = dd.d.BulkDBData(c, dd.attrs, partData...)
// } else {
// err = dd.d.BulkDatabusData(c, dd.attrs, partData...)
// }
err = dd.d.BulkDBData(c, dd.attrs, writeEntityIndex, partData...)
return
}
// Commit commit offset.
func (dd *DmDate) Commit(c context.Context) (err error) {
if dd.d.GetConfig(c).Business.Index {
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
tOffset := dd.offsets[i]
if tOffset.TempOffsetID != 0 {
tOffset.OffsetID = tOffset.TempOffsetID
}
if tOffset.TempOffsetTime != "" {
tOffset.OffsetTime = tOffset.TempOffsetTime
}
tableName := fmt.Sprintf("%s%0"+dd.attrs.Table.TableZero+"d", dd.attrs.Table.TablePrefix, i)
if err = dd.d.CommitOffset(c, tOffset, dd.attrs.AppID, tableName); err != nil {
log.Error("appMultipleDatabus.Commit error(%v)", err)
continue
}
}
} else {
for k, c := range dd.commits {
if err = c.Commit(); err != nil {
log.Error("appMultipleDatabus.Commit error(%v)", err)
continue
}
delete(dd.commits, k)
}
}
dd.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (dd *DmDate) Sleep(c context.Context) {
}
// Size return size.
func (dd *DmDate) Size(c context.Context) int {
return 0
}
// newParseMap .
func (dd *DmDate) newParseMap(c context.Context, parseMap map[string]interface{}) (res map[string]interface{}) {
res = parseMap
indexName, strID := "", ""
if res["month"] != nil {
if month, ok := res["month"].(*interface{}); ok {
mth := strings.Replace(dd.b2s((*month).([]uint8)), "-", "_", -1)
indexName = "dm_date_" + mth
}
}
if res["date"] != nil {
if date, ok := res["date"].(*interface{}); ok {
dte := strings.Replace(dd.b2s((*date).([]uint8)), "-", "_", -1)
if oid, ok := res["oid"].(*interface{}); ok {
strID = strconv.FormatInt((*oid).(int64), 10) + "_" + dte
}
}
}
res["index_name"] = indexName
res["index_id"] = strID
return
}
// newDtbParseMap .
func (dd *DmDate) newDtbParseMap(c context.Context, parseMap map[string]interface{}) (res map[string]interface{}) {
res = parseMap
indexName, strID, mth, dte, id := "", "", "", "", ""
if res["ctime"] != nil {
if ctime, ok := res["ctime"].(string); ok {
t, _ := time.Parse("2006-01-02 15:04:05", ctime)
mth = t.Format("2006-01")
dte = t.Format("2006-01-02")
indexName = "dm_date_" + strings.Replace(mth, "-", "_", -1)
}
}
if res["oid"] != nil {
if oid, ok := res["oid"].(int64); ok {
strOid := strconv.FormatInt(oid, 10)
strID = strOid + "_" + strings.Replace(dte, "-", "_", -1)
}
}
if res["id"] != nil {
if newID, ok := res["id"].(int64); ok {
id = strconv.Itoa(int(newID))
}
}
for k := range res {
if k == "id" || k == "oid" {
continue
}
delete(res, k)
}
res["index_name"] = indexName
res["index_id"] = strID
res["month"] = mth
res["date"] = dte
res["id"] = id
return
}
// bs2 []uint8 to string.
func (dd *DmDate) b2s(bs []uint8) string {
b := make([]byte, len(bs))
for i, v := range bs {
b[i] = byte(v)
}
return string(b)
}

View File

@@ -0,0 +1,355 @@
package business
import (
"bytes"
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
"go-common/library/log"
"go-common/library/log/infoc"
"go-common/library/queue/databus"
"gopkg.in/olivere/elastic.v5"
)
const _sql = "SELECT id, index_format, index_version, index_cluster, additional_mapping, data_center FROM digger_"
// Log .
type Log struct {
d *dao.Dao
appid string
attrs *model.Attrs
databus *databus.Databus
infoC *infoc.Infoc
infoCField []string
mapData []model.MapData
commits map[int32]*databus.Message
business map[int]*info
week map[int]string
additionalMapping map[int]map[string]string
defaultMapping map[string]string
mapping map[int]map[string]string
}
type info struct {
Format string
Cluster string
Version string
DataCenter int8
}
// NewLog .
func NewLog(d *dao.Dao, appid string) (l *Log) {
l = &Log{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
databus: d.DatabusPool[appid],
infoC: d.InfoCPool[appid],
infoCField: []string{},
mapData: []model.MapData{},
commits: map[int32]*databus.Message{},
business: map[int]*info{},
additionalMapping: map[int]map[string]string{},
mapping: map[int]map[string]string{},
week: map[int]string{
0: "0107",
1: "0815",
2: "1623",
3: "2431",
},
}
switch appid {
case "log_audit":
l.defaultMapping = map[string]string{
"uname": "string",
"uid": "string",
"business": "string",
"type": "string",
"oid": "string",
"action": "string",
"ctime": "time",
"int_0": "int",
"int_1": "int",
"int_2": "int",
"str_0": "string",
"str_1": "string",
"str_2": "string",
"extra_data": "string",
}
l.infoCField = []string{"uname", "uid", "business", "type", "oid", "action", "ctime",
"int_0", "int_1", "int_2", "str_0", "str_1", "str_2", "str_3", "str_4", "extra_data"}
case "log_user_action":
l.defaultMapping = map[string]string{
"mid": "string",
"platform": "string",
"build": "string",
"buvid": "string",
"business": "string",
"type": "string",
"oid": "string",
"action": "string",
"ip": "string",
"ctime": "time",
"int_0": "int",
"int_1": "int",
"int_2": "int",
"str_0": "string",
"str_1": "string",
"str_2": "string",
"extra_data": "string",
}
l.infoCField = []string{"mid", "platform", "build", "buvid", "business", "type", "oid", "action", "ip", "ctime",
"int_0", "int_1", "int_2", "str_0", "str_1", "str_2", "extra_data"}
default:
log.Error("log appid error(%v)", appid)
return
}
rows, err := d.SearchDB.Query(context.TODO(), _sql+appid)
if err != nil {
log.Error("log Query error(%v)", appid)
return
}
defer rows.Close()
for rows.Next() {
var (
id int
additionalMapping string
)
info := &info{}
if err = rows.Scan(&id, &info.Format, &info.Version, &info.Cluster, &additionalMapping, &info.DataCenter); err != nil {
log.Error("Log New DB (%v)(%v)", id, err)
continue
}
l.business[id] = info
if additionalMapping != "" {
var additionalMappingDict map[string]string
if err = json.Unmarshal([]byte(additionalMapping), &additionalMappingDict); err != nil {
log.Error("Log New Json (%v)(%v)", id, err)
continue
}
l.additionalMapping[id] = additionalMappingDict
}
}
for b := range l.business {
l.mapping[b] = map[string]string{}
for k, v := range l.defaultMapping {
l.mapping[b][k] = v
}
if a, ok := l.additionalMapping[b]; ok {
for k, v := range a {
l.mapping[b][k] = v
}
}
}
return
}
// Business return business.
func (l *Log) Business() string {
return l.attrs.Business
}
// InitIndex .
func (l *Log) InitIndex(c context.Context) {
}
// InitOffset .
func (l *Log) InitOffset(c context.Context) {
}
// Offset .
func (l *Log) Offset(c context.Context) {
}
// MapData .
func (l *Log) MapData(c context.Context) (mapData []model.MapData) {
return l.mapData
}
// Attrs .
func (l *Log) Attrs(c context.Context) (attrs *model.Attrs) {
return l.attrs
}
// SetRecover .
func (l *Log) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
}
// IncrMessages .
func (l *Log) IncrMessages(c context.Context) (length int, err error) {
var jErr error
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(l.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-l.databus.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", l.attrs.Databus)
break
}
l.commits[msg.Partition] = msg
var result map[string]interface{}
decoder := json.NewDecoder(bytes.NewReader(msg.Value))
decoder.UseNumber()
if jErr = decoder.Decode(&result); jErr != nil {
log.Error("appid(%v) json.Unmarshal(%s) error(%v)", l.appid, msg.Value, jErr)
continue
}
// json.Number转int64
for k, v := range result {
switch t := v.(type) {
case json.Number:
if result[k], jErr = t.Int64(); jErr != nil {
log.Error("appid(%v) log.bulkDatabusData.json.Number(%v)(%v)", l.appid, t, jErr)
}
}
}
l.mapData = append(l.mapData, result)
if len(l.mapData) < l.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
// todo: 额外的参数
length = len(l.mapData)
return
}
// AllMessages .
func (l *Log) AllMessages(c context.Context) (length int, err error) {
return
}
// BulkIndex .
func (l *Log) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
partData := l.mapData[start:end]
if err = l.bulkDatabusData(c, l.attrs, writeEntityIndex, partData...); err != nil {
log.Error("appid(%v) json.bulkDatabusData error(%v)", l.appid, err)
return
}
return
}
// Commit .
func (l *Log) Commit(c context.Context) (err error) {
for k, msg := range l.commits {
if err = msg.Commit(); err != nil {
log.Error("appid(%v) Commit error(%v)", l.appid, err)
continue
}
delete(l.commits, k)
}
l.mapData = []model.MapData{}
return
}
// Sleep .
func (l *Log) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(l.attrs.Other.Sleep))
}
// Size .
func (l *Log) Size(c context.Context) (size int) {
return l.attrs.Other.Size
}
func (l *Log) bulkDatabusData(c context.Context, attrs *model.Attrs, writeEntityIndex bool, bulkData ...model.MapData) (err error) {
var (
request elastic.BulkableRequest
bulkRequest map[string]*elastic.BulkService
businessID int
)
bulkRequest = map[string]*elastic.BulkService{}
for _, b := range bulkData {
indexName := ""
if business, ok := b["business"].(int64); ok {
businessID = int(business)
if v, ok := b["ctime"].(string); ok {
if cTime, timeErr := time.Parse("2006-01-02 15:04:05", v); timeErr == nil {
if info, ok := l.business[businessID]; ok {
suffix := strings.Replace(cTime.Format(info.Format), "week", l.week[cTime.Day()/8], -1) + "_" + info.Version
if !writeEntityIndex {
indexName = attrs.Index.IndexAliasPrefix + "_" + strconv.Itoa(businessID) + "_" + suffix
} else {
indexName = attrs.Index.IndexEntityPrefix + "_" + strconv.Itoa(businessID) + "_" + suffix
}
}
}
}
}
if indexName == "" {
log.Error("appid(%v) ac.d.bulkDatabusData business business(%v) data(%+v)", l.appid, b["business"], b)
continue
}
esCluster := l.business[businessID].Cluster // 上方已经判断l.business[businessID]是否存在
if _, ok := bulkRequest[esCluster]; !ok {
if _, eok := l.d.ESPool[esCluster]; eok {
bulkRequest[esCluster] = l.d.ESPool[esCluster].Bulk()
} else {
log.Error("appid(%v) ac.d.bulkDatabusData cluster no find error(%v)", l.appid, esCluster)
continue //忽略这条数据
}
}
//发送数据中心
if l.business[businessID].DataCenter == 1 {
arr := make([]interface{}, len(l.infoCField))
for i, f := range l.infoCField {
if v, ok := b[f]; ok {
arr[i] = fmt.Sprintf("%v", v)
}
}
if er := l.infoC.Info(arr...); er != nil {
log.Error("appid(%v) ac.infoC.Info error(%v)", l.appid, er)
}
}
//数据处理
for k, v := range b {
if t, ok := l.mapping[businessID][k]; ok {
switch t {
case "int_to_bin":
if item, ok := v.(int64); ok {
item := int(item)
arr := []string{}
for i := 0; item != 0; i++ {
if item&1 == 1 {
arr = append(arr, strconv.Itoa(item&1<<uint(i)))
}
item = item >> 1
}
b[k] = arr
} else {
delete(b, k)
}
case "array":
if arr, ok := v.([]interface{}); ok {
b[k] = arr
} else {
delete(b, k)
}
}
} else {
delete(b, k)
}
}
request = elastic.NewBulkIndexRequest().Index(indexName).Type(attrs.Index.IndexType).Doc(b)
bulkRequest[esCluster].Add(request)
}
for _, v := range bulkRequest {
if v.NumberOfActions() == 0 {
continue
}
if _, err = v.Do(c); err != nil {
log.Error("appid(%s) bulk error(%v)", attrs.AppID, err)
}
}
return
}