Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

21
app/job/main/search/BUILD Normal file
View File

@@ -0,0 +1,21 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/search/cmd:all-srcs",
"//app/job/main/search/conf:all-srcs",
"//app/job/main/search/dao:all-srcs",
"//app/job/main/search/http:all-srcs",
"//app/job/main/search/model:all-srcs",
"//app/job/main/search/service:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,141 @@
运营后台搜索项目-Job
===============
v1.5.2
1. activity_all
v1.5.1
1. esports_fav_all
v1.5.0
1. TODO 去掉data_fields的_id和_mtime改为读新的配置字段
2. TODO bulk优化
v1.4.4
1. 支持workflow特殊字段
2. 行为日志支持配置
v1.4.3
1. fix slice bounds out of range
v1.4.2
1. 回滚数据字符处理`
v1.4.1
1. extra支持多字段
v1.4.0
1. 支持多个sliceField
v1.3.9
1. 修复app_databus全量不加offset问题
v1.3.8
1. 废除databus_index_id
v1.3.7
1. 去除弹幕老逻辑
v1.3.6
1. dm_date bug修复
2. 打印es写入时间
v1.3.5
1. app_multiple_databus更加通用
v1.3.4
1. 日志只发送评论
v1.3.3
1. 行为日志infoc打印log
v1.3.2
1. dmreport预发上线
2. dataExtra新增条件过滤
v1.3.1
1.fix log bug
v1.3.0
1. update infoc sdk
v1.2.0
1.日志平台支持数组
2.workflow新索引修改
v1.1.9
1. 迁移bm框架
2. 日志平台支持多个集群
3. workflow新索引上线
v1.1.8
1. 修复amd的indexname的问题
v1.1.7
1. TODO: 合并BulkDatabusData和BulkDBData所有数据全部移到model层处理完不再对数据额外处理直接循环bulk
v1.1.6
1. TODO: 全量url新增参数index_version导数据到一个新版本索引当不一定有别名时也支持
v1.1.5
1. IndexAliasPrefix索引别名支持
2. business配置去除手动写businessPool
3. 修复indexName bug
v1.1.4
1. data_fields改成json格式兼容db和databus
2. extra_data兼容db和databus
3. 新增base.go兼容自定义包
4. dao方法和变量对外开放
v1.1.3
1. 弹幕监控和历史上线
v1.1.2
1. 释放dataMap
v1.1.1
1. 增量前移time
2. remove无用代码
v1.1.0
1. 支持recover
2. 支持数组型字段配置
3. 简化dtb
v1.0.9
1. single、multiple的配置化支持
2. 优化attrs
3. extra_data跨库支持
v1.0.8
1. bug修复
v1.0.7
1. workflow_group_common
2. workflow_chall_common
v1.0.6
1. blocked_case增加databus消息聚合量
v1.0.5
1. 增加workflow_feedback
v1.0.4
1. 修改Unmarshal date bug
v1.0.3
1. 风纪委重构
2. 预发和上线
v1.0.2
1. 修改blocked的commit逻辑
2. 预发和上线
v1.0.1
1. 增加配置index(bool), 判断全量 or 增量
2. 预发和上线
v1.0.0
1. 风纪委项目上线(blocked)
2. 预发和上线

View File

@@ -0,0 +1,13 @@
# Owner
liweijia
zhapuyu
renwei
guanhuaxin
# Author
guanhuaxin
zhoushuguang
# Reviewer
zhapuyu
renwei

View File

@@ -0,0 +1,19 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- guanhuaxin
- liweijia
- renwei
- zhapuyu
- zhoushuguang
labels:
- job
- job/main/search
- main
options:
no_parent_owners: true
reviewers:
- guanhuaxin
- renwei
- zhapuyu
- zhoushuguang

View File

@@ -0,0 +1,10 @@
#### search-job
##### 项目简介
> 1.es索引数据全量、增量同步
##### 编译环境
> 请只用golang v1.7.x以上版本编译执行。
##### 依赖包
> 1.公共包go-common

View File

@@ -0,0 +1,44 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "cmd",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
data = ["search-job-test.toml"],
importpath = "go-common/app/job/main/search/cmd",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/http:go_default_library",
"//app/job/main/search/model:go_default_library",
"//app/job/main/search/service:go_default_library",
"//library/log:go_default_library",
"//library/net/trace:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,52 @@
package main
import (
"flag"
"os"
"os/signal"
"syscall"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/http"
_ "go-common/app/job/main/search/model"
"go-common/app/job/main/search/service"
"go-common/library/log"
"go-common/library/net/trace"
)
func main() {
flag.Parse()
if err := conf.Init(); err != nil {
log.Error("conf.Init() error(%v)", err)
panic(err)
}
// init log
log.Init(conf.Conf.XLog)
trace.Init(conf.Conf.Tracer)
defer trace.Close()
log.Info("search-job start")
defer log.Close()
// service init
srv := service.New(conf.Conf)
http.Init(conf.Conf, srv)
// init signal
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
for {
s := <-c
log.Info("search-job get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
log.Info("search-job exit")
srv.Close()
return
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}

View File

@@ -0,0 +1,896 @@
version = "1.1.1"
user = "nobody"
pid = "/tmp/search-job.pid"
dir = "./"
debug = false
[xlog]
dir = "/data/log/search-job/"
[tracer]
family = "search-job"
proto = "unixgram"
addr = "/var/run/dapper-collect/dapper-collect.sock"
[hbase]
master = ""
meta = ""
dialTimeout = "10s"
readTimeout = "10s"
readsTimeout = "10s"
writeTimeout = "10s"
writesTimeout = "10s"
[hbase.zookeeper]
root = ""
addrs = ["10.23.58.154:2181","10.23.58.141:2181","10.23.58.20:2181"]
timeout = "30s"
[business]
env = "fav_playlist"
index = true
[db]
[db.bilibili_lottery]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_lottery?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[db.bilibili_lottery.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.feedback]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_feedback?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[db.feedback.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.search]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_search?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.search.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.archive]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_archive?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[db.archive.breaker]
window = "3s"
sleep = "500ms"
bucket = 10
ratio = 0.5
request = 100
[db.manager]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_manager?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[db.manager.breaker]
window = "3s"
sleep = "500ms"
bucket = 10
ratio = 0.5
request = 100
[db.dmmeta]
addr = "172.16.33.205:3310"
dsn = "test_3308:test_3308@tcp(172.16.33.205:3310)/bilibili_dm_meta?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.dmmeta.breaker]
window = "3s"
sleep = "3s"
bucket = 10
ratio = 0.5
request = 100
[db.dm]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_dm?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "200ms"
execTimeout = "200ms"
tranTimeout = "200ms"
[db.dm.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_esports]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_esports?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "200ms"
execTimeout = "200ms"
tranTimeout = "200ms"
[db.bilibili_esports.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.archive_stat]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/archive_stat?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "200ms"
execTimeout = "200ms"
tranTimeout = "200ms"
[db.archive_stat.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_archive]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_archive?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "200ms"
execTimeout = "200ms"
tranTimeout = "200ms"
[db.bilibili_archive.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.archivestat]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/archive_stat?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[db.archivestat.breaker]
window = "3s"
sleep = "500ms"
bucket = 10
ratio = 0.5
request = 100
[db.blocked]
addr = "172.16.33.205:3306"
dsn = "blocked:mUdFycLKSbvPgdE28lCveqN77SqsGOmy@tcp(172.16.33.205:3306)/blocked?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "5s"
execTimeout = "5s"
tranTimeout = "5s"
[db.blocked.breaker]
window = "3s"
sleep = "500ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_reply]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_reply?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[db.bilibili_reply.breaker]
window = "3s"
sleep = "500ms"
bucket = 10
ratio = 0.5
request = 100
[db.workflow]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_workflow?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.workflow.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.music]
addr = "172.16.33.227:3306"
dsn = "root:123456@tcp(172.16.33.227:3306)/music?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.music.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.pgc_review]
addr = "172.16.33.205:3307"
dsn = "pgc_review:pgc_test@tcp(172.16.33.205:3307)/pgc_review?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "5s"
execTimeout = "5s"
tranTimeout = "5s"
[db.pgc_review.breaker]
window = "3s"
sleep = "500ms"
bucket = 10
ratio = 0.5
request = 100
[db.aso]
addr = "172.16.33.205:3306"
dsn = "aso:hA0DAnENNFz78kYB@tcp(172.16.33.205:3306)/aso?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.aso.breaker]
window = "3s"
sleep = "500ms"
bucket = 10
ratio = 0.5
request = 100
[db.pgc_media]
addr = "172.16.33.205:3307"
dsn = "pgc_test:pgc_test@tcp(172.16.33.205:3307)/pgc_media?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.pgc_media.breaker]
window = "3s"
sleep = "500ms"
bucket = 10
ratio = 0.5
request = 100
[db.vip]
addr = "172.16.0.28:3306"
dsn = "bilibili:12345@tcp(172.16.0.28:3306)/vip?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.vip.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_vip]
addr = "172.16.0.28:3306"
dsn = "bilibili:12345@tcp(172.16.0.28:3306)/bilibili_vip?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.bilibili_vip.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_article]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_article?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.bilibili_article.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.user]
addr = "172.16.33.205:3306"
dsn = "account:wx2U1MwXRyWEuURw@tcp(172.16.33.205:3306)/relation?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.user.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.lottery]
addr = "172.16.33.205:3306"
dsn = "account:wx2U1MwXRyWEuURw@tcp(172.16.33.205:3306)/bilibili_lottery?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.lottery.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.aso_account]
addr = "172.16.33.205:3306"
dsn = "aso:hA0DAnENNFz78kYB@tcp(172.16.33.205:3306)/aso?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.aso_account.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.pgc]
addr = "172.16.33.205:3306"
dsn = "bangumi:SbBR5yRCYuMdYyor@tcp(172.16.33.205:3306)/bili2_statement?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.pgc.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_member]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_member?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.bilibili_member.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.platform_tag]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/platform_tag?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.platform_tag.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.tickets]
addr = "172.16.33.203:3306"
dsn = "root:123456@tcp(172.16.33.203:3306)/tickets?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.tickets.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_upcrm]
addr = "172.16.33.205:3306"
dsn = "upcrm:DdL6c5JaWCYKMAQ10PURbfeImow9HXlx@tcp(172.16.33.205:3306)/bilibili_upcrm?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.bilibili_upcrm.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_fav]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_fav?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.bilibili_fav.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_creative]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_creative?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.bilibili_creative.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.bilibili_tv]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_tv?timeout=10s&readTimeout=10s&writeTimeout=10s&parseTime=true&loc=Local&charset=utf8"
active = 10
idle = 5
idleTimeout = "4h"
queryTimeout = "10s"
execTimeout = "10s"
tranTimeout = "10s"
[db.bilibili_tv.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[databus]
[databus.dm]
key = "615f0ad9d4ba4c01"
secret = "8dd405b9124f1ff1da67d026cedc471f"
group = "DMMeta-MainManager-S"
topic = "DMMeta-T"
action = "sub"
name = "search-job/dm-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.dm_monitor]
key = "615f0ad9d4ba4c01"
secret = "8dd405b9124f1ff1da67d026cedc471f"
group = "DMSubject-MainManager-S"
topic = "DMSubject-T"
action = "sub"
name = "search-job/dm_monitor-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.dm_search]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "DMMeta-MainSearch-S"
topic = "DMMeta-T"
action = "sub"
name = "search-job/dm_search-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.dm_date]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "DMMeta-MainSearchDMHistory-S"
topic = "DMMeta-T"
action = "sub"
name = "search-job/dm_date-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.dmreport]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "BiliDMBinlog-MainSearch-S"
topic = "BiliDMBinlog-T"
action = "sub"
name = "search-job/dm_report-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.blocked_info]
key = "0QEO9F8JuuIxZzNDvklH"
secret = "0QEO9F8JuuIxZzNDvklI"
group = "Blocked-SearchInfo-S"
topic = "Blocked-T"
action = "sub"
name = "search-job/blocked-info-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "10s"
readTimeout = "60s"
writeTimeout = "10s"
idleTimeout = "10s"
[databus.blocked_case]
key = "0QEO9F8JuuIxZzNDvklH"
secret = "0QEO9F8JuuIxZzNDvklI"
group = "Blocked-SearchCase-S"
topic = "Blocked-T"
action = "sub"
name = "search-job/blocked-case-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "10s"
readTimeout = "60s"
writeTimeout = "10s"
idleTimeout = "10s"
[databus.blocked_jury]
key = "0QEO9F8JuuIxZzNDvklH"
secret = "0QEO9F8JuuIxZzNDvklI"
group = "Blocked-SearchJury-S"
topic = "Blocked-T"
action = "sub"
name = "search-job/blocked-jury-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "10s"
readTimeout = "60s"
writeTimeout = "10s"
idleTimeout = "10s"
[databus.blocked_opinion]
key = "0QEO9F8JuuIxZzNDvklH"
secret = "0QEO9F8JuuIxZzNDvklI"
group = "Blocked-SearchOpinion-S"
topic = "Blocked-T"
action = "sub"
name = "search-job/blocked-opinion-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "10s"
readTimeout = "60s"
writeTimeout = "10s"
idleTimeout = "10s"
[databus.blocked_publish]
key = "0QEO9F8JuuIxZzNDvklH"
secret = "0QEO9F8JuuIxZzNDvklI"
group = "Blocked-SearchPublish-S"
topic = "Blocked-T"
action = "sub"
name = "search-job/blocked-publish-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "10s"
readTimeout = "60s"
writeTimeout = "10s"
idleTimeout = "10s"
[databus.blocked_kpi_point]
key = "0QEO9F8JuuIxZzNDvklH"
secret = "0QEO9F8JuuIxZzNDvklI"
group = "Blocked-Search-S"
topic = "Blocked-T"
action = "sub"
name = "search-job/blocked-kpi-point-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "10s"
readTimeout = "60s"
writeTimeout = "10s"
idleTimeout = "10s"
[databus.archive_video_relation]
key = "615f0ad9d4ba4c01"
secret = "8dd405b9124f1ff1da67d026cedc471f"
group = "Archive-MainManager-2-S"
topic = "Archive-T"
action = "sub"
name = "search-job/video_relation-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.pgc_order]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "Pgc-MainSearch-S"
topic = "Pgc-T"
action = "sub"
name = "search-job/pgc-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.log_audit]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "LogAudit-MainSearch-S"
topic = "LogAudit-T"
action = "sub"
name = "search-job/log-sub"
proto = "tcp"
addr = "172.16.38.154:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.log_user_action]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "LogUserAction-MainSearch-S"
topic = "LogUserAction-T"
action = "sub"
name = "search-job/log-sub"
proto = "tcp"
addr = "172.16.38.154:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.favorite]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "Favorite-MainSearch-S"
topic = "Favorite-T"
action = "sub"
name = "search-job/favorite-sub"
proto = "tcp"
addr = "172.16.38.154:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.esports_fav]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "Favorite-MainSearch-Esports-S"
topic = "Favorite-T"
action = "sub"
name = "search-job/favorite-sub"
proto = "tcp"
addr = "172.16.38.154:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.up_crm_info]
key = "2511663d546f1413"
secret = "cde3b480836cc76df3d635470f991caa"
group = "UpCRMBinLog-MainSearch-S"
topic = "UpCRMBinLog-T"
action = "sub"
name = "search-job/upcrm-sub"
proto = "tcp"
addr = "172.18.33.50:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.dm_subtitle]
key = "2511663d546f1413"
secret = "cde3b480836cc76df3d635470f991caa"
group = "BiliDMBinlog-MainSearch-Subtitle-S"
topic = "BiliDMBinlog-T"
action = "sub"
name = "search-job/dm-sub"
proto = "tcp"
addr = "172.16.38.154:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.creative_archive]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "Archive-MainSearch-CreativeArchive-S"
topic = "Archive-T"
action = "sub"
name = "search-job/archive-sub"
proto = "tcp"
addr = "172.16.38.154:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.workflow_group_common]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "WorkflowBinlog-MainSearch-Group-S"
topic = "WorkflowBinlog-T"
action = "sub"
name = "search-job/workflow-sub"
proto = "tcp"
addr = "172.16.38.154:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.workflow_chall_common]
key = "2511663d546f1413"
secret = "971d048a2818e37ae124a0293c300e89"
group = "WorkflowBinlog-MainSearch-Chall-S"
topic = "WorkflowBinlog-T"
action = "sub"
name = "search-job/workflow-sub"
proto = "tcp"
addr = "172.16.38.154:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[HTTPServer]
addr = "0.0.0.0:6217"
maxListen = 1000
timeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
[es]
[es.pcie_dm_in]
addr = ["http://172.18.33.71:9200"]
[es.ssd_pub_in01]
addr = ["http://172.22.33.120:9201"]
[es.pcie_fav]
addr = ["http://172.18.33.71:9200"]
[es.replyInternal]
addr = ["http://172.18.33.71:9200"]
[es.replyExternal]
addr = ["http://172.18.33.71:9200"]
[es.archive]
addr = ["http://172.18.33.71:9200"]
[es.externalPublic]
addr = ["http://172.18.33.71:9200"]
[es.vip]
addr = ["http://172.18.33.71:9200"]
[es.log]
addr = ["http://172.18.33.71:9200"]
[es.article]
addr = ["http://172.18.33.71:9200"]
[es.internalPublic]
addr = ["http://172.18.33.71:9200"]
[es.open]
addr = ["http://172.18.33.71:9200"]
[es.fav]
addr = ["http://172.18.33.71:9200"]
[redis]
name = "search-job"
proto = "tcp"
addr = "172.18.33.71:6379"
active = 10
idle = 5
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "10s"
expire = "24h"
[sms]
phone = "17621966518"
token = "f5a658b2-5926-4b71-96c3-7d3777b7d256"
interval = 300

View File

@@ -0,0 +1,41 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["conf.go"],
importpath = "go-common/app/job/main/search/conf",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/cache/redis:go_default_library",
"//library/conf:go_default_library",
"//library/database/hbase.v2:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/trace:go_default_library",
"//library/queue/databus:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/BurntSushi/toml:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,150 @@
package conf
import (
"errors"
"flag"
"time"
"go-common/library/cache/redis"
"go-common/library/conf"
"go-common/library/database/sql"
"go-common/library/log"
"go-common/library/log/infoc"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/trace"
"go-common/library/queue/databus"
xtime "go-common/library/time"
hbase "go-common/library/database/hbase.v2"
"github.com/BurntSushi/toml"
)
var (
// ConfPath .
ConfPath string
client *conf.Client
// Conf .
Conf = &Config{}
)
// Config .
type Config struct {
// log
XLog *log.Config
// tracer
Tracer *trace.Config
// hbase
HBase *HBaseConfig
// business
Business *Business
// xhttp
HTTPServer *bm.ServerConfig
// http client
HTTPClient *bm.ClientConfig
// database
DB map[string]*sql.Config
// es cluster
Es map[string]EsInfo
// databus
Databus map[string]*databus.Config
// infoc
InfoC map[string]*infoc.Config
// sms
SMS *SMS
}
// HBaseConfig combine with hbase.Config add ReadTimeout, WriteTimeout
type HBaseConfig struct {
*hbase.Config
// extra config
ReadTimeout xtime.Duration
ReadsTimeout xtime.Duration
WriteTimeout xtime.Duration
WritesTimeout xtime.Duration
}
// Consumer .
type Consumer struct {
GroupID string
Topic []string
Offset string
Addrs []string
}
// Business .
type Business struct {
Env string
Index bool
}
// Redis search redis.
type Redis struct {
*redis.Config
Expire time.Duration
}
// EsInfo (deprecated).
type EsInfo struct {
Addr []string
}
// SMS config
type SMS struct {
Phone string
Token string
Interval int64
}
// init .
func init() {
flag.StringVar(&ConfPath, "conf", "", "config path")
}
// Init .
func Init() (err error) {
if ConfPath != "" {
return local()
}
return remote()
}
// local .
func local() (err error) {
_, err = toml.DecodeFile(ConfPath, &Conf)
return
}
func remote() (err error) {
if client, err = conf.New(); err != nil {
return
}
if err = load(); err != nil {
return
}
go func() {
for range client.Event() {
log.Info("config reload")
if load() != nil {
log.Error("config reload error (%v)", err)
}
}
}()
return
}
func load() (err error) {
var (
s string
ok bool
tmpConf *Config
)
if s, ok = client.Toml2(); !ok {
return errors.New("load config center error")
}
if _, err = toml.Decode(s, &tmpConf); err != nil {
return errors.New("could not decode config")
}
*Conf = *tmpConf
return
}

View File

@@ -0,0 +1,77 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"app_multiple_databus_test.go",
"app_single_test.go",
"config_offset_test.go",
"dao_test.go",
"es_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"app_databus.go",
"app_multiple.go",
"app_multiple_databus.go",
"app_single.go",
"config_asset.go",
"config_attr.go",
"config_business.go",
"config_offset.go",
"dao.go",
"es.go",
"hbase.go",
"sms.go",
],
importpath = "go-common/app/job/main/search/dao",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/model:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/queue/databus:go_default_library",
"//library/stat/prom:go_default_library",
"//library/time:go_default_library",
"//library/xstr:go_default_library",
"//vendor/gopkg.in/olivere/elastic.v5:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/search/dao/base:all-srcs",
"//app/job/main/search/dao/business:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,197 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AppDatabus single table consume databus.
type AppDatabus struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offset *model.LoopOffset
mapData []model.MapData
commits map[int32]*databus.Message
}
// NewAppDatabus .
func NewAppDatabus(d *Dao, appid string) (a *AppDatabus) {
a = &AppDatabus{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
dtb: d.DatabusPool[d.AttrPool[appid].Databus.Databus],
commits: make(map[int32]*databus.Message),
}
return
}
// Business return business.
func (a *AppDatabus) Business() string {
return a.attrs.Business
}
// InitIndex init index.
func (a *AppDatabus) InitIndex(c context.Context) {
if aliases, err := a.d.GetAliases(a.attrs.ESName, a.attrs.Index.IndexAliasPrefix); err != nil {
a.d.InitIndex(c, nil, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
} else {
a.d.InitIndex(c, aliases, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (a *AppDatabus) InitOffset(c context.Context) {
a.d.InitOffset(c, a.offset, a.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
a.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (a *AppDatabus) Offset(c context.Context) {
for {
offset, err := a.d.Offset(c, a.appid, a.attrs.Table.TablePrefix)
if err != nil {
log.Error("a.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
a.offset.SetReview(offset.ReviewID, offset.ReviewTime)
a.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (a *AppDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
a.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (a *AppDatabus) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(a.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-a.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", a.attrs.Databus)
break
}
m := &model.Message{}
a.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Table == a.attrs.Table.TablePrefix {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = a.d.JSON2map(m.New)
log.Info(fmt.Sprintf("%v: %+v", a.attrs.AppID, parseMap))
if err != nil {
log.Error("a.JSON2map error(%v)", err)
continue
}
a.mapData = append(a.mapData, parseMap)
}
}
if len(a.mapData) < a.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "dtb", []string{})
}
length = len(a.mapData)
return
}
// AllMessages .
func (a *AppDatabus) AllMessages(c context.Context) (length int, err error) {
rows, err := a.db.Query(c, a.attrs.DataSQL.SQLByID, a.offset.OffsetID, a.attrs.Other.Size)
log.Info("appid: %s allMessages Current offsetID: %d", a.appid, a.offset.OffsetID)
if err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := InitMapData(a.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
return
}
a.mapData = append(a.mapData, item)
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "db", []string{})
// offset
if v, ok := a.mapData[len(a.mapData)-1]["_id"]; ok && v != nil {
if v2, ok := v.(interface{}); ok {
a.offset.SetTempOffset((v2).(int64), "")
a.offset.SetRecoverTempOffset((v2).(int64), "")
} else {
log.Error("dtb.all._id interface error")
}
} else {
log.Error("dtb.all._id nil error")
}
}
length = len(a.mapData)
return
}
// BulkIndex .
func (a *AppDatabus) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
partData := a.mapData[start:end]
if a.d.c.Business.Index {
err = a.d.BulkDBData(c, a.attrs, writeEntityIndex, partData...)
} else {
err = a.d.BulkDatabusData(c, a.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (a *AppDatabus) Commit(c context.Context) (err error) {
if a.d.c.Business.Index {
err = a.d.CommitOffset(c, a.offset, a.attrs.AppID, a.attrs.Table.TablePrefix)
} else {
for k, cos := range a.commits {
if err = cos.Commit(); err != nil {
log.Error("appid(%s) commit error(%v)", a.attrs.AppID, err)
continue
}
delete(a.commits, k)
}
}
a.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (a *AppDatabus) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(a.attrs.Other.Sleep))
}
// Size return size.
func (a *AppDatabus) Size(c context.Context) int {
return a.attrs.Other.Size
}

View File

@@ -0,0 +1,185 @@
package dao
import (
"context"
"fmt"
"strconv"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
)
// AppMultiple .
type AppMultiple struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
offsets model.LoopOffsets
mapData []model.MapData
}
// NewAppMultiple .
func NewAppMultiple(d *Dao, appid string) (am *AppMultiple) {
am = &AppMultiple{
d: d,
appid: appid,
db: d.DBPool[d.AttrPool[appid].DBName],
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
}
for i := am.attrs.Table.TableFrom; i <= am.attrs.Table.TableTo; i++ {
am.offsets[i] = &model.LoopOffset{}
}
return
}
// Business return business
func (am *AppMultiple) Business() string {
return am.attrs.Business
}
// InitIndex .
func (am *AppMultiple) InitIndex(c context.Context) {
var (
indexAliasName string
indexEntityName string
)
aliases, err := am.d.GetAliases(am.attrs.ESName, am.attrs.Index.IndexAliasPrefix)
for i := am.attrs.Index.IndexFrom; i <= am.attrs.Index.IndexTo; i++ {
indexAliasName = fmt.Sprintf("%s%0"+am.attrs.Index.IndexZero+"d", am.attrs.Index.IndexAliasPrefix, i)
indexEntityName = fmt.Sprintf("%s%0"+am.attrs.Index.IndexZero+"d", am.attrs.Index.IndexEntityPrefix, i)
if err != nil {
am.d.InitIndex(c, nil, am.attrs.ESName, indexAliasName, indexEntityName, am.attrs.Index.IndexMapping)
} else {
am.d.InitIndex(c, aliases, am.attrs.ESName, indexAliasName, indexEntityName, am.attrs.Index.IndexMapping)
}
}
}
// InitOffset insert init value to offset.
func (am *AppMultiple) InitOffset(c context.Context) {
am.d.InitOffset(c, am.offsets[0], am.attrs, []string{})
}
// Offset .
func (am *AppMultiple) Offset(c context.Context) {
for i := am.attrs.Table.TableFrom; i < am.attrs.Table.TableTo; i++ {
offset, err := am.d.Offset(c, am.attrs.AppID, am.attrs.Table.TablePrefix+strconv.Itoa(i))
if err != nil {
log.Error("as.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
}
am.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
am.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (am *AppMultiple) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
am.offsets.SetRecoverOffsets(i, recoverID, recoverTime)
}
// IncrMessages .
func (am *AppMultiple) IncrMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
am.mapData = []model.MapData{}
for i := am.attrs.Table.TableFrom; i <= am.attrs.Table.TableTo; i++ {
if !am.offsets[i].IsLoop {
rows, err = am.db.Query(c, fmt.Sprintf(am.attrs.DataSQL.SQLByMTime, am.attrs.DataSQL.SQLFields, i), am.offsets[i].OffsetTime, am.attrs.Other.Size)
} else {
rows, err = am.db.Query(c, fmt.Sprintf(am.attrs.DataSQL.SQLByIDMTime, am.attrs.DataSQL.SQLFields, i), am.offsets[i].OffsetID, am.offsets[i].OffsetTime, am.attrs.Other.Size)
}
if err != nil {
log.Error("db.Query error(%v)", err)
continue
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(am.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("rows.Scan() error(%v)", err)
continue
}
tempList = append(tempList, item)
am.mapData = append(am.mapData, item)
}
rows.Close()
if len(tempList) > 0 {
UpdateOffsetByMap(am.offsets[i], tempList...)
}
}
if len(am.mapData) > 0 {
//fmt.Println("before", am.mapData)
am.mapData, err = am.d.ExtraData(c, am.mapData, am.attrs, "db", []string{})
//fmt.Println("after", am.mapData)
}
length = len(am.mapData)
return
}
// AllMessages .
func (am *AppMultiple) AllMessages(c context.Context) (length int, err error) {
am.mapData = []model.MapData{}
for i := am.attrs.Table.TableFrom; i <= am.attrs.Table.TableTo; i++ {
var rows *xsql.Rows
if rows, err = am.db.Query(c, fmt.Sprintf(am.attrs.DataSQL.SQLByID, am.attrs.DataSQL.SQLFields, i), am.offsets[i].OffsetID, am.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(am.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("IncrMessages rows.Scan() error(%v)", err)
continue
}
tempList = append(tempList, item)
am.mapData = append(am.mapData, item)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
am.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime())
}
}
if len(am.mapData) > 0 {
am.mapData, err = am.d.ExtraData(c, am.mapData, am.attrs, "db", []string{})
}
length = len(am.mapData)
return
}
// BulkIndex .
func (am *AppMultiple) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
if len(am.mapData) >= (start+1) && len(am.mapData) >= end {
partData := am.mapData[start:end]
err = am.d.BulkDBData(c, am.attrs, writeEntityIndex, partData...)
}
return
}
// Commit .
func (am *AppMultiple) Commit(c context.Context) (err error) {
for i := am.attrs.Table.TableFrom; i <= am.attrs.Table.TableTo; i++ {
if err = am.d.CommitOffset(c, am.offsets[i], am.attrs.AppID, am.attrs.Table.TablePrefix+strconv.Itoa(i)); err != nil {
log.Error("Commit error(%v)", err)
continue
}
}
am.mapData = []model.MapData{}
return
}
// Sleep .
func (am *AppMultiple) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(am.attrs.Other.Sleep))
}
// Size .
func (am *AppMultiple) Size(c context.Context) (size int) {
size = am.attrs.Other.Size
return
}

View File

@@ -0,0 +1,451 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"regexp"
"strings"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AppMultipleDatabus .
type AppMultipleDatabus struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offsets model.LoopOffsets
mapData []model.MapData
tableName []string
indexNameSuffix []string
commits map[int32]*databus.Message
}
// IndexNameSuffix .
func (amd *AppMultipleDatabus) IndexNameSuffix(format string, startDate string) (res []string, err error) {
var (
sTime time.Time
eTime = time.Now()
)
sTime, err = time.Parse(format, startDate)
if err != nil {
log.Error("d.LogAuditIndexName(%v)", startDate)
return
}
resDict := map[string]bool{}
if strings.Contains(format, "02") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -1)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "week") {
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, 0, -7)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "01") {
// 1月31日时AddDate(0, -1, 0)会出现错误
year, month, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, month, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(0, -1, 0)
if sTime.After(eTime) {
break
}
}
} else if strings.Contains(format, "2006") {
// 2月29日时AddDate(-1, 0, 0)会出现错误
year, _, _ := eTime.Date()
hour, min, sec := eTime.Clock()
eTime = time.Date(year, 1, 1, hour, min, sec, 0, eTime.Location())
for {
resDict[amd.getIndexName(format, eTime)] = true
eTime = eTime.AddDate(-1, 0, 0)
if sTime.After(eTime) {
break
}
}
}
for k := range resDict {
res = append(res, k)
}
return
}
func (amd *AppMultipleDatabus) getIndexName(format string, time time.Time) (index string) {
var (
week = map[int]string{
0: "0108",
1: "0916",
2: "1724",
3: "2531",
}
)
return strings.Replace(time.Format(format), "week", week[time.Day()/9], -1)
}
// NewAppMultipleDatabus .
func NewAppMultipleDatabus(d *Dao, appid string) (amd *AppMultipleDatabus) {
var err error
amd = &AppMultipleDatabus{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
tableName: []string{},
indexNameSuffix: []string{},
commits: make(map[int32]*databus.Message),
}
amd.db = d.DBPool[amd.attrs.DBName]
amd.dtb = d.DatabusPool[amd.attrs.Databus.Databus]
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" {
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
amd.tableName = append(amd.tableName, tableName)
amd.offsets[i] = &model.LoopOffset{}
}
} else {
var tableNameSuffix []string
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if tableNameSuffix, err = amd.IndexNameSuffix(tableFormat[0], tableFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range tableNameSuffix {
amd.tableName = append(amd.tableName, amd.attrs.Table.TablePrefix+v)
}
for i := range amd.tableName {
amd.offsets[i] = &model.LoopOffset{}
}
}
return
}
// Business return business.
func (amd *AppMultipleDatabus) Business() string {
return amd.attrs.Business
}
// InitIndex .
func (amd *AppMultipleDatabus) InitIndex(c context.Context) {
var (
err error
indexAliasName string
indexEntityName string
)
indexFormat := strings.Split(amd.attrs.Index.IndexFormat, ",")
aliases, aliasErr := amd.d.GetAliases(amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix)
if indexFormat[0] == "int" || indexFormat[0] == "single" {
for i := amd.attrs.Index.IndexFrom; i <= amd.attrs.Index.IndexTo; i++ {
// == "0" 有问题,不通用
if amd.attrs.Index.IndexZero == "0" {
indexAliasName = amd.attrs.Index.IndexAliasPrefix
indexEntityName = amd.attrs.Index.IndexEntityPrefix
} else {
indexAliasName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexAliasPrefix, i)
indexEntityName = fmt.Sprintf("%s%0"+amd.attrs.Index.IndexZero+"d", amd.attrs.Index.IndexEntityPrefix, i)
}
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, indexAliasName, indexEntityName, amd.attrs.Index.IndexMapping)
}
}
} else {
if amd.indexNameSuffix, err = amd.IndexNameSuffix(indexFormat[0], indexFormat[1]); err != nil {
log.Error("amd.IndexNameSuffix(%v)", err)
return
}
for _, v := range amd.indexNameSuffix {
if aliasErr != nil {
amd.d.InitIndex(c, nil, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
} else {
amd.d.InitIndex(c, aliases, amd.attrs.ESName, amd.attrs.Index.IndexAliasPrefix+v, amd.attrs.Index.IndexEntityPrefix+v, amd.attrs.Index.IndexMapping)
}
}
}
}
// InitOffset insert init value to offset.
func (amd *AppMultipleDatabus) InitOffset(c context.Context) {
amd.d.InitOffset(c, amd.offsets[0], amd.attrs, amd.tableName)
}
// Offset .
func (amd *AppMultipleDatabus) Offset(c context.Context) {
for i, v := range amd.tableName {
offset, err := amd.d.Offset(c, amd.attrs.AppID, v)
if err != nil {
log.Error("amd.d.offset error(%v)", err)
time.Sleep(time.Second * 3)
}
amd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
amd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (amd *AppMultipleDatabus) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
amd.offsets.SetRecoverOffsets(i, recoverID, recoverTime)
}
// IncrMessages .
func (amd *AppMultipleDatabus) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(amd.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-amd.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", amd.attrs.Databus)
break
}
m := &model.Message{}
amd.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if amd.attrs.Business == "creative_reply" {
r, _ := regexp.Compile("reply_\\d+")
if !r.MatchString(m.Table) {
continue
}
}
if (amd.attrs.Table.TableSplit == "string" && m.Table == amd.attrs.Table.TablePrefix) ||
(amd.attrs.Table.TableSplit != "string" && strings.HasPrefix(m.Table, amd.attrs.Table.TablePrefix)) {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = amd.d.JSON2map(m.New)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
// esports fav type filter
if amd.attrs.AppID == "esports_fav" {
if t, ok := parseMap["type"]; ok && t.(int64) != 10 {
continue
}
}
// playlist fav type and attr filter
if amd.attrs.AppID == "fav_playlist" {
if t, ok := parseMap["type"]; ok && t.(int64) != 2 {
continue
}
if t, ok := parseMap["attr"]; ok {
if t.(int64)>>0&1 == 0 || (m.Action == "insert" && t.(int64)>>1&1 == 1) {
continue
}
}
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, m.Table, parseMap)
if err != nil {
if amd.attrs.AppID == "creative_reply" {
continue
}
log.Error("amd.newParseMap error(%v)", err)
continue
}
amd.mapData = append(amd.mapData, newParseMap)
}
}
if len(amd.mapData) < amd.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "dtb", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "dtb")
return
}
// AllMessages .
func (amd *AppMultipleDatabus) AllMessages(c context.Context) (length int, err error) {
amd.mapData = []model.MapData{}
for i, v := range amd.tableName {
var (
rows *xsql.Rows
sql string
)
tableFormat := strings.Split(amd.attrs.Table.TableFormat, ",")
if amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm" {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i, i)
} else if tableFormat[0] == "int" || tableFormat[0] == "single" { // 兼容只传后缀,不传表名
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, i)
log.Info(sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size)
} else {
sql = fmt.Sprintf(amd.attrs.DataSQL.SQLByID, amd.attrs.DataSQL.SQLFields, v)
}
if rows, err = amd.db.Query(c, sql, amd.offsets[i].OffsetID, amd.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(amd.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AppMultipleDatabus.AllMessages rows.Scan() error(%v)", err)
continue
}
var newParseMap map[string]interface{}
newParseMap, err = amd.newParseMap(c, v, item)
if err != nil {
log.Error("amd.newParseMap error(%v)", err)
continue
}
tempList = append(tempList, newParseMap)
amd.mapData = append(amd.mapData, newParseMap)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
amd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrMTime())
}
}
if len(amd.mapData) > 0 {
amd.mapData, err = amd.d.ExtraData(c, amd.mapData, amd.attrs, "db", []string{})
}
length = len(amd.mapData)
//amd.d.extraData(c, amd, "db")
return
}
// BulkIndex .
func (amd *AppMultipleDatabus) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := amd.mapData[start:end]
if amd.d.c.Business.Index {
err = amd.d.BulkDBData(c, amd.attrs, writeEntityIndex, partData...)
} else {
err = amd.d.BulkDatabusData(c, amd.attrs, writeEntityIndex, partData...)
}
return
}
// Commit .
func (amd *AppMultipleDatabus) Commit(c context.Context) (err error) {
if amd.d.c.Business.Index {
if amd.attrs.Table.TableSplit == "int" || amd.attrs.Table.TableSplit == "single" { // 兼容只传后缀,不传表名
for i := amd.attrs.Table.TableFrom; i <= amd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+amd.attrs.Table.TableZero+"d", amd.attrs.Table.TablePrefix, i)
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, tableName); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
}
} else {
for i, v := range amd.indexNameSuffix {
if err = amd.d.CommitOffset(c, amd.offsets[i], amd.attrs.AppID, v); err != nil {
log.Error("Commit error(%v)", err)
continue
}
}
}
} else {
for k, c := range amd.commits {
if err = c.Commit(); err != nil {
log.Error("AppMultipleDatabus.Commit error(%v)", err)
continue
}
delete(amd.commits, k)
}
}
amd.mapData = []model.MapData{}
return
}
// Sleep .
func (amd *AppMultipleDatabus) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(amd.attrs.Other.Sleep))
}
// Size .
func (amd *AppMultipleDatabus) Size(c context.Context) (size int) {
return amd.attrs.Other.Size
}
// indexField .
// func (amd *AppMultipleDatabus) indexField(c context.Context, tableName string) (fieldName string, fieldValue int) {
// suffix, _ := strconv.Atoi(strings.Split(tableName, "_")[2])
// s := strings.Split(amd.attrs.DataSQL.DataIndexSuffix, ";")
// v := strings.Split(s[1], ":")
// fieldName = v[0]
// indexNum, _ := strconv.Atoi(v[2])
// fieldValue = suffix + indexNum
// return
// }
// newParseMap .
func (amd *AppMultipleDatabus) newParseMap(c context.Context, table string, parseMap map[string]interface{}) (res map[string]interface{}, err error) {
res = parseMap
//TODO 实体索引写不进去
if (amd.attrs.AppID == "dm_search" || amd.attrs.AppID == "dm") && !amd.d.c.Business.Index {
indexSuffix := strings.Split(table, "_")[2]
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
if _, ok := res["msg"]; ok {
// dm_content_
res["index_field"] = true // 删除ctime
res["index_id"] = fmt.Sprintf("%v", res["dmid"])
} else {
// dm_index_
res["index_id"] = fmt.Sprintf("%v", res["id"])
}
} else if amd.attrs.AppID == "dmreport" {
if ztime, ok := res["ctime"].(*interface{}); ok { // 数据库
if ctime, cok := (*ztime).(time.Time); cok {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
} else if ztime, ok := res["ctime"].(string); ok { // databus
var ctime time.Time
if ctime, err = time.Parse("2006-01-02 15:04:05", ztime); err == nil {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + ctime.Format("2006")
}
}
} else if amd.attrs.AppID == "creative_reply" && !amd.d.c.Business.Index {
if replyType, ok := res["type"].(int64); ok {
if replyType == 1 || replyType == 12 || replyType == 14 {
} else {
err = fmt.Errorf("多余数据")
}
} else {
err = fmt.Errorf("错误数据")
}
} else if amd.attrs.Index.IndexSplit == "single" {
res["index_name"] = amd.attrs.Index.IndexAliasPrefix
} else {
indexSuffix := string([]rune(table)[strings.Count(amd.attrs.Table.TablePrefix, "")-1:])
res["index_name"] = amd.attrs.Index.IndexAliasPrefix + indexSuffix
}
//dtb index_id
if amd.attrs.AppID == "favorite" && !amd.d.c.Business.Index {
if fid, ok := res["fid"].(int64); ok {
if oid, ok := res["oid"].(int64); ok {
res["index_id"] = fmt.Sprintf("%d_%d", fid, oid)
return
}
}
res["index_id"] = "err"
res["indexName"] = ""
}
return
}

View File

@@ -0,0 +1 @@
package dao

View File

@@ -0,0 +1,169 @@
package dao
import (
"context"
"time"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
)
// AppSingle .
type AppSingle struct {
d *Dao
appid string
attrs *model.Attrs
db *xsql.DB
offset *model.LoopOffset
mapData []model.MapData
}
// NewAppSingle .
func NewAppSingle(d *Dao, appid string) (as *AppSingle) {
as = &AppSingle{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
}
return
}
// Business return business.
func (as *AppSingle) Business() string {
return as.attrs.Business
}
// InitIndex init index.
func (as *AppSingle) InitIndex(c context.Context) {
if aliases, err := as.d.GetAliases(as.attrs.ESName, as.attrs.Index.IndexAliasPrefix); err != nil {
as.d.InitIndex(c, nil, as.attrs.ESName, as.attrs.Index.IndexAliasPrefix, as.attrs.Index.IndexEntityPrefix, as.attrs.Index.IndexMapping)
} else {
as.d.InitIndex(c, aliases, as.attrs.ESName, as.attrs.Index.IndexAliasPrefix, as.attrs.Index.IndexEntityPrefix, as.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (as *AppSingle) InitOffset(c context.Context) {
as.d.InitOffset(c, as.offset, as.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
as.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (as *AppSingle) Offset(c context.Context) {
for {
offset, err := as.d.Offset(c, as.appid, as.attrs.Table.TablePrefix)
if err != nil {
log.Error("ac.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
as.offset.SetReview(offset.ReviewID, offset.ReviewTime)
as.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (as *AppSingle) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
as.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (as *AppSingle) IncrMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
//fmt.Println("start", as.offset.OffsetTime)
if !as.offset.IsLoop {
rows, err = as.db.Query(c, as.attrs.DataSQL.SQLByMTime, as.offset.OffsetTime, as.attrs.Other.Size)
} else {
rows, err = as.db.Query(c, as.attrs.DataSQL.SQLByIDMTime, as.offset.OffsetID, as.offset.OffsetTime, as.attrs.Other.Size)
}
if err != nil {
log.Error("db.Query error(%v)", err)
return
}
defer rows.Close()
tempPartList := []model.MapData{}
for rows.Next() {
item, row := InitMapData(as.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("IncrMessages rows.Scan() error(%v)", err)
return
}
as.mapData = append(as.mapData, item)
tempPartList = append(tempPartList, item)
}
if len(as.mapData) > 0 {
// extra relevant data
as.mapData, err = as.d.ExtraData(c, as.mapData, as.attrs, "db", []string{})
// offset
UpdateOffsetByMap(as.offset, tempPartList...)
}
length = len(as.mapData)
return
}
// AllMessages .
func (as *AppSingle) AllMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
if rows, err = as.db.Query(c, as.attrs.DataSQL.SQLByID, as.offset.RecoverID, as.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := InitMapData(as.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
continue
}
as.mapData = append(as.mapData, item)
}
if len(as.mapData) > 0 {
// extra relevant data
as.mapData, err = as.d.ExtraData(c, as.mapData, as.attrs, "db", []string{})
// offset
if v, ok := as.mapData[len(as.mapData)-1]["_id"]; ok && v != nil {
if v2, ok := v.(interface{}); ok {
as.offset.SetTempOffset((v2).(int64), "")
as.offset.SetRecoverTempOffset((v2).(int64), "")
} else {
log.Error("single.all._id interface error")
}
} else {
log.Error("single.all._id nil error")
}
}
length = len(as.mapData)
return
}
// BulkIndex .
func (as *AppSingle) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
if len(as.mapData) >= (start+1) && len(as.mapData) >= end {
partData := as.mapData[start:end]
err = as.d.BulkDBData(c, as.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (as *AppSingle) Commit(c context.Context) (err error) {
err = as.d.CommitOffset(c, as.offset, as.attrs.AppID, as.attrs.Table.TablePrefix)
as.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (as *AppSingle) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(as.attrs.Other.Sleep))
}
// Size return size.
func (as *AppSingle) Size(c context.Context) int {
return as.attrs.Other.Size
}

View File

@@ -0,0 +1 @@
package dao

View File

@@ -0,0 +1,46 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["base_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["base.go"],
importpath = "go-common/app/job/main/search/dao/base",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/dao:go_default_library",
"//app/job/main/search/dao/business:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,57 @@
package base
import (
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
bsn "go-common/app/job/main/search/dao/business"
)
// Base .
type Base struct {
D *dao.Dao
C *conf.Config
}
// NewBase .
func NewBase(c *conf.Config) (b *Base) {
b = &Base{
C: c,
D: dao.New(c),
}
b.D.AppPool = b.newAppPool(b.D)
return
}
// newAppPool .
func (b *Base) newAppPool(d *dao.Dao) (pool map[string]dao.App) {
pool = make(map[string]dao.App)
for k, v := range d.BusinessPool {
switch v.IncrWay {
case "single":
pool[k] = dao.NewAppSingle(d, k)
case "multiple":
pool[k] = dao.NewAppMultiple(d, k)
case "dtb":
pool[k] = dao.NewAppDatabus(d, k)
case "multipleDtb":
pool[k] = dao.NewAppMultipleDatabus(d, k)
case "business":
switch k {
case "archive_video":
pool[k] = bsn.NewAvr(d, k, b.C)
case "avr_archive", "avr_video":
pool[k] = bsn.NewAvrArchive(d, k)
case "log_audit", "log_user_action":
pool[k] = bsn.NewLog(d, k)
case "dm_date":
pool[k] = bsn.NewDmDate(d, k)
case "aegis_resource":
pool[k] = bsn.NewAegisResource(d, k, b.C)
}
default:
// to do other thing
}
}
//fmt.Println("strace:app-pool>", pool)
return
}

View File

@@ -0,0 +1,30 @@
package base
import (
"flag"
"fmt"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
. "github.com/smartystreets/goconvey/convey"
)
func WithBase(f func(b *Base)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := NewBase(conf.Conf)
f(d)
}
}
func Test_NewAppPool(t *testing.T) {
Convey("newAppPool", t, WithBase(func(b *Base) {
pool := b.newAppPool(b.D)
fmt.Println(pool)
}))
}

View File

@@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"aegis_resource.go",
"archive_video_relation.go",
"avr_archive.go",
"dm_date.go",
"log.go",
],
importpath = "go-common/app/job/main/search/dao/business",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/dao:go_default_library",
"//app/job/main/search/model:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/queue/databus:go_default_library",
"//library/xstr:go_default_library",
"//vendor/gopkg.in/olivere/elastic.v5:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["business_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/dao:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)

View File

@@ -0,0 +1,208 @@
package business
import (
"context"
"encoding/json"
"fmt"
"time"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// AegisResource single table consume databus.
type AegisResource struct {
d *dao.Dao
c *conf.Config
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offset *model.LoopOffset
mapData []model.MapData
commits map[int32]*databus.Message
}
// NewAppDatabus .
func NewAegisResource(d *dao.Dao, appid string, c *conf.Config) (a *AegisResource) {
a = &AegisResource{
d: d,
c: c,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
dtb: d.DatabusPool[d.AttrPool[appid].Databus.Databus],
commits: make(map[int32]*databus.Message),
}
return
}
// Business return business.
func (a *AegisResource) Business() string {
return a.attrs.Business
}
// InitIndex init index.
func (a *AegisResource) InitIndex(c context.Context) {
if aliases, err := a.d.GetAliases(a.attrs.ESName, a.attrs.Index.IndexAliasPrefix); err != nil {
a.d.InitIndex(c, nil, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
} else {
a.d.InitIndex(c, aliases, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (a *AegisResource) InitOffset(c context.Context) {
a.d.InitOffset(c, a.offset, a.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
a.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (a *AegisResource) Offset(c context.Context) {
for {
offset, err := a.d.Offset(c, a.appid, a.attrs.Table.TablePrefix)
if err != nil {
log.Error("a.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
a.offset.SetReview(offset.ReviewID, offset.ReviewTime)
a.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (a *AegisResource) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
a.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (a *AegisResource) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(a.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-a.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", a.attrs.Databus)
break
}
m := &model.Message{}
a.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Table == "resource" || m.Table == "resource_result" || m.Table == "net_flow_resource" {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = a.d.JSON2map(m.New)
if _, ok := parseMap["rid"]; ok {
parseMap["_id"] = parseMap["rid"]
parseMap["id"] = parseMap["rid"]
}
if _, sok := parseMap["state"]; m.Table != "resource_result" && sok {
delete(parseMap, "state")
}
log.Info(fmt.Sprintf("%v: %+v", a.attrs.AppID, parseMap))
if err != nil {
log.Error("a.JSON2map error(%v)", err)
continue
}
a.mapData = append(a.mapData, parseMap)
}
}
if len(a.mapData) < a.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "dtb", []string{})
}
length = len(a.mapData)
return
}
// AllMessages .
func (a *AegisResource) AllMessages(c context.Context) (length int, err error) {
rows, err := a.db.Query(c, a.attrs.DataSQL.SQLByID, a.offset.OffsetID, a.attrs.Other.Size)
log.Info("appid: %s allMessages Current offsetID: %d", a.appid, a.offset.OffsetID)
if err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(a.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
return
}
a.mapData = append(a.mapData, item)
}
if len(a.mapData) > 0 {
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "db", []string{})
// offset
if v, ok := a.mapData[len(a.mapData)-1]["_id"]; ok && v != nil {
if v2, ok := v.(interface{}); ok {
a.offset.SetTempOffset((v2).(int64), "")
a.offset.SetRecoverTempOffset((v2).(int64), "")
} else {
log.Error("dtb.all._id interface error")
}
} else {
log.Error("dtb.all._id nil error")
}
}
length = len(a.mapData)
return
}
// BulkIndex .
func (a *AegisResource) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
partData := a.mapData[start:end]
if a.c.Business.Index {
err = a.d.BulkDBData(c, a.attrs, writeEntityIndex, partData...)
} else {
err = a.d.BulkDatabusData(c, a.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (a *AegisResource) Commit(c context.Context) (err error) {
if a.c.Business.Index {
err = a.d.CommitOffset(c, a.offset, a.attrs.AppID, a.attrs.Table.TablePrefix)
} else {
for k, cos := range a.commits {
if err = cos.Commit(); err != nil {
log.Error("appid(%s) commit error(%v)", a.attrs.AppID, err)
continue
}
delete(a.commits, k)
}
}
a.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (a *AegisResource) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(a.attrs.Other.Sleep))
}
// Size return size.
func (a *AegisResource) Size(c context.Context) int {
return a.attrs.Other.Size
}

View File

@@ -0,0 +1,197 @@
package business
import (
"context"
"encoding/json"
"time"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
// Avr single table consume databus.
type Avr struct {
c *conf.Config
d *dao.Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offset *model.LoopOffset
mapData []model.MapData
commits map[int32]*databus.Message
}
// NewAvr .
func NewAvr(d *dao.Dao, appid string, c *conf.Config) (a *Avr) {
a = &Avr{
c: c,
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
dtb: d.DatabusPool[d.AttrPool[appid].Databus.Databus],
commits: make(map[int32]*databus.Message),
}
return
}
// Business return business.
func (a *Avr) Business() string {
return a.attrs.Business
}
// InitIndex init index.
func (a *Avr) InitIndex(c context.Context) {
if aliases, err := a.d.GetAliases(a.attrs.ESName, a.attrs.Index.IndexAliasPrefix); err != nil {
a.d.InitIndex(c, nil, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
} else {
a.d.InitIndex(c, aliases, a.attrs.ESName, a.attrs.Index.IndexAliasPrefix, a.attrs.Index.IndexEntityPrefix, a.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (a *Avr) InitOffset(c context.Context) {
a.d.InitOffset(c, a.offset, a.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
a.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (a *Avr) Offset(c context.Context) {
for {
offset, err := a.d.Offset(c, a.appid, a.attrs.Table.TablePrefix)
if err != nil {
log.Error("a.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
a.offset.SetReview(offset.ReviewID, offset.ReviewTime)
a.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (a *Avr) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
a.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (a *Avr) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(a.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-a.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", a.attrs.Databus)
break
}
m := &model.Message{}
a.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
//fmt.Println("origin msg", m)
//log.Info("origin msg: (%v)", m.Table, a.mapData)
if m.Table == a.attrs.Table.TablePrefix {
if m.Action == "insert" || m.Action == "update" {
var parseMap map[string]interface{}
parseMap, err = a.d.JSON2map(m.New)
if err != nil {
log.Error("a.JSON2map error(%v)", err)
continue
}
a.mapData = append(a.mapData, parseMap)
}
}
if len(a.mapData) < a.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
//log.Info("origin msg: (%v)", a.mapData)
if len(a.mapData) > 0 {
//fmt.Println("before", a.mapData)
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "dtb", []string{"archive", "video", "audit", "ups"})
//fmt.Println("after", a.mapData)
log.Info("dtb msg: (%v)", a.mapData)
}
length = len(a.mapData)
return
}
// AllMessages .
func (a *Avr) AllMessages(c context.Context) (length int, err error) {
rows, err := a.db.Query(c, a.attrs.DataSQL.SQLByID, a.offset.RecoverID, a.attrs.Other.Size)
log.Info("appid: %s allMessages Current RecoverID: %d", a.appid, a.offset.RecoverID)
if err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(a.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
return
}
a.mapData = append(a.mapData, item)
}
if len(a.mapData) > 0 {
//fmt.Println("before", a.mapData)
a.mapData, err = a.d.ExtraData(c, a.mapData, a.attrs, "db", []string{"audit", "ups"})
if v, ok := a.mapData[len(a.mapData)-1]["_id"]; ok {
a.offset.SetTempOffset(v.(int64), "")
a.offset.SetRecoverTempOffset(v.(int64), "")
}
}
length = len(a.mapData)
return
}
// BulkIndex .
func (a *Avr) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
if len(a.mapData) > 0 {
partData := a.mapData[start:end]
err = a.d.BulkDBData(c, a.attrs, writeEntityIndex, partData...)
}
return
}
// Commit commit offset.
func (a *Avr) Commit(c context.Context) (err error) {
if a.c.Business.Index {
err = a.d.CommitOffset(c, a.offset, a.attrs.AppID, a.attrs.Table.TablePrefix)
} else {
for k, cos := range a.commits {
if err = cos.Commit(); err != nil {
log.Error("appid(%s) commit error(%v)", a.attrs.AppID, err)
continue
}
delete(a.commits, k)
}
}
a.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (a *Avr) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(a.attrs.Other.Sleep))
}
// Size return size.
func (a *Avr) Size(c context.Context) int {
return a.attrs.Other.Size
}

View File

@@ -0,0 +1,259 @@
package business
import (
"context"
"fmt"
"time"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/xstr"
)
// AvrArchive .
type AvrArchive struct {
d *dao.Dao
appid string
attrs *model.Attrs
db *xsql.DB
offset *model.LoopOffset
mapData []model.MapData
}
// NewAvrArchive .
func NewAvrArchive(d *dao.Dao, appid string) (av *AvrArchive) {
av = &AvrArchive{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offset: &model.LoopOffset{},
mapData: []model.MapData{},
db: d.DBPool[d.AttrPool[appid].DBName],
}
return
}
// Business return business.
func (av *AvrArchive) Business() string {
return av.attrs.Business
}
// InitIndex init index.
func (av *AvrArchive) InitIndex(c context.Context) {
if aliases, err := av.d.GetAliases(av.attrs.ESName, av.attrs.Index.IndexAliasPrefix); err != nil {
av.d.InitIndex(c, nil, av.attrs.ESName, av.attrs.Index.IndexAliasPrefix, av.attrs.Index.IndexEntityPrefix, av.attrs.Index.IndexMapping)
} else {
av.d.InitIndex(c, aliases, av.attrs.ESName, av.attrs.Index.IndexAliasPrefix, av.attrs.Index.IndexEntityPrefix, av.attrs.Index.IndexMapping)
}
}
// InitOffset insert init value to offset.
func (av *AvrArchive) InitOffset(c context.Context) {
av.d.InitOffset(c, av.offset, av.attrs, []string{})
nowFormat := time.Now().Format("2006-01-02 15:04:05")
av.offset.SetOffset(0, nowFormat)
}
// Offset get offset.
func (av *AvrArchive) Offset(c context.Context) {
for {
offset, err := av.d.Offset(c, av.appid, av.attrs.Table.TablePrefix)
if err != nil {
log.Error("ac.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
av.offset.SetReview(offset.ReviewID, offset.ReviewTime)
av.offset.SetOffset(offset.OffsetID(), offset.OffsetTime())
break
}
}
// SetRecover set recover
func (av *AvrArchive) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
av.offset.SetRecoverOffset(recoverID, recoverTime)
}
// IncrMessages .
func (av *AvrArchive) IncrMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
log.Info("appid: %s IncrMessages Current OffsetTime: %s, OffsetID: %d", av.appid, av.offset.OffsetTime, av.offset.OffsetID)
if !av.offset.IsLoop {
rows, err = av.db.Query(c, av.attrs.DataSQL.SQLByMTime, av.offset.OffsetTime, av.attrs.Other.Size)
} else {
rows, err = av.db.Query(c, av.attrs.DataSQL.SQLByIDMTime, av.offset.OffsetID, av.offset.OffsetTime, av.attrs.Other.Size)
}
if err != nil {
log.Error("db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(av.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("IncrMessages rows.Scan() error(%v)", err)
return
}
av.mapData = append(av.mapData, item)
}
length = len(av.mapData)
if length > 0 {
// offset
dao.UpdateOffsetByMap(av.offset, av.mapData...)
// extra relevant data
length, err = av.extraData(c, "db", map[string]bool{"Avr": true})
}
return
}
// AllMessages .
func (av *AvrArchive) AllMessages(c context.Context) (length int, err error) {
var rows *xsql.Rows
log.Info("appid: %s allMessages Current RecoverID: %d", av.appid, av.offset.RecoverID)
if rows, err = av.db.Query(c, av.attrs.DataSQL.SQLByID, av.offset.RecoverID, av.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
item, row := dao.InitMapData(av.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("AllMessages rows.Scan() error(%v)", err)
continue
}
av.mapData = append(av.mapData, item)
}
length = len(av.mapData)
if length > 0 {
// offset
if av.mapData[length-1]["_id"] != nil {
v := av.mapData[length-1]["_id"]
if v2, ok := v.(*interface{}); ok {
av.offset.SetTempOffset((*v2).(int64), "")
av.offset.SetRecoverTempOffset((*v2).(int64), "")
}
}
// extra relevant data
length, err = av.extraData(c, "db", map[string]bool{"Avr": true})
}
return
}
// extraData extra data for appid
func (av *AvrArchive) extraData(c context.Context, way string, tags map[string]bool) (length int, err error) {
switch way {
case "db":
for i, item := range av.mapData {
item.TransData(av.attrs)
for k, v := range item {
av.mapData[i][k] = v
}
}
case "dtb":
for i, item := range av.mapData {
item.TransDtb(av.attrs)
av.mapData[i] = model.MapData{}
for k, v := range item {
av.mapData[i][k] = v
}
}
}
for _, ex := range av.attrs.DataExtras {
if _, ok := tags[ex.Tag]; !ok {
continue
}
switch ex.Type {
case "slice":
continue
//av.extraDataSlice(c, ex)
default:
length, _ = av.extraDataDefault(c, ex)
}
}
return
}
// extraData-default
func (av *AvrArchive) extraDataDefault(c context.Context, ex model.AttrDataExtra) (length int, err error) {
// filter ids from in_fields
var (
ids []int64
items map[int64]model.MapData
temp map[int64]model.MapData
)
cdtInField := ex.Condition["in_field"]
items = make(map[int64]model.MapData)
temp = make(map[int64]model.MapData)
for _, md := range av.mapData {
if v, ok := md[cdtInField]; ok {
ids = append(ids, v.(int64)) // 加去重
temp[v.(int64)] = md
}
}
// query extra data
if len(ids) > 0 {
var rows *xsql.Rows
rows, err = av.d.DBPool[ex.DBName].Query(c, fmt.Sprintf(ex.SQL, xstr.JoinInts(ids))+" and 1 = ? ", 1)
if err != nil {
log.Error("extraDataDefault db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := dao.InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataDefault rows.Scan() error(%v)", err)
continue
}
if v, ok := item[ex.InField]; ok {
if v2, ok := v.(*interface{}); ok {
item.TransData(av.attrs)
items[(*v2).(int64)] = item
}
}
}
rows.Close()
}
//fmt.Println("a.mapData", av.mapData, "ids", ids, "items", items)
// merge data
fds := []string{"_id", "cid", "vid", "aid", "v_ctime"}
av.mapData = []model.MapData{}
for k, item := range items {
if v, ok := temp[k]; ok {
for _, fd := range fds {
if f, ok := item[fd]; ok {
v[fd] = f
}
}
av.mapData = append(av.mapData, v)
}
}
length = len(av.mapData)
//fmt.Println("a.mapData:after", av.mapData)
return
}
// BulkIndex .
func (av *AvrArchive) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := av.mapData[start:end]
err = av.d.BulkDBData(c, av.attrs, writeEntityIndex, partData...)
return
}
// Commit commit offset.
func (av *AvrArchive) Commit(c context.Context) (err error) {
err = av.d.CommitOffset(c, av.offset, av.attrs.AppID, av.attrs.Table.TablePrefix)
av.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (av *AvrArchive) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(av.attrs.Other.Sleep))
}
// Size return size.
func (av *AvrArchive) Size(c context.Context) int {
return av.attrs.Other.Size
}

View File

@@ -0,0 +1,124 @@
package business
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao"
. "github.com/smartystreets/goconvey/convey"
)
func WithBusinessArv(f func(d *Avr)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := dao.New(conf.Conf)
bsn := NewAvr(d, "archive_video", conf.Conf)
f(bsn)
}
}
func Test_AvrRecover(t *testing.T) {
Convey("set recover", t, WithBusinessArv(func(d *Avr) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_AvrInitOffset(t *testing.T) {
Convey("test close", t, WithBusinessArv(func(d *Avr) {
d.InitOffset(context.TODO())
}))
}
func WithBusinessDmDate(f func(d *DmDate)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := dao.New(conf.Conf)
bsn := NewDmDate(d, "dm_search")
f(bsn)
}
}
func Test_DmDateRecover(t *testing.T) {
Convey("set recover", t, WithBusinessDmDate(func(d *DmDate) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_DmDateInitOffset(t *testing.T) {
Convey("test close", t, WithBusinessDmDate(func(d *DmDate) {
d.InitOffset(context.TODO())
}))
}
func WithBusinessLog(f func(d *Log)) func() {
return func() {
dir, _ := filepath.Abs("../dao/cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := dao.New(conf.Conf)
bsn := NewLog(d, "log_audit")
f(bsn)
}
}
func Test_LogRecover(t *testing.T) {
Convey("set recover", t, WithBusinessLog(func(d *Log) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_LogInitOffset(t *testing.T) {
Convey("test close", t, WithBusinessLog(func(d *Log) {
d.InitOffset(context.TODO())
}))
}
func Test_LogInitIndex(t *testing.T) {
Convey("test init index", t, WithBusinessLog(func(d *Log) {
d.InitIndex(context.TODO())
}))
}
func Test_LogOffset(t *testing.T) {
Convey("test offset", t, WithBusinessLog(func(d *Log) {
d.Offset(context.TODO())
}))
}
func Test_LogSetRecover(t *testing.T) {
Convey("test set recover", t, WithBusinessLog(func(d *Log) {
d.SetRecover(context.TODO(), 0, "", 0)
}))
}
func Test_LogAllMessages(t *testing.T) {
Convey("test set recover", t, WithBusinessLog(func(d *Log) {
d.AllMessages(context.TODO())
}))
}

View File

@@ -0,0 +1,352 @@
package business
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/queue/databus"
)
const (
minIDSQL = "SELECT id FROM dm_index_%03d WHERE ctime > ? ORDER BY id ASC LIMIT 1"
)
// DmDate .
type DmDate struct {
d *dao.Dao
appid string
attrs *model.Attrs
db *xsql.DB
dtb *databus.Databus
offsets model.LoopOffsets
mapData []model.MapData
commits map[int32]*databus.Message
frontTwelveMonthDate string
tableName []string
oidDayMap map[string]string
}
// NewDmDate .
func NewDmDate(d *dao.Dao, appid string) (dd *DmDate) {
dd = &DmDate{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
offsets: make(map[int]*model.LoopOffset),
commits: make(map[int32]*databus.Message),
frontTwelveMonthDate: "2017-08-01",
oidDayMap: make(map[string]string),
}
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
dd.offsets[i] = &model.LoopOffset{}
}
dd.db = d.DBPool[dd.attrs.DBName]
dd.dtb = d.DatabusPool[dd.attrs.Databus.Databus]
return
}
// Business return business.
func (dd *DmDate) Business() string {
return dd.attrs.Business
}
// InitIndex init index.
func (dd *DmDate) InitIndex(c context.Context) {
var (
indexAliasName string
indexEntityName string
)
aliases, err := dd.d.GetAliases(dd.attrs.ESName, dd.attrs.Index.IndexAliasPrefix)
now := time.Now()
for i := -12; i < 18; i++ {
newDate := now.AddDate(0, i, 0).Format("2006-01")
indexAliasName = dd.attrs.Index.IndexAliasPrefix + strings.Replace(newDate, "-", "_", -1)
indexEntityName = dd.attrs.Index.IndexEntityPrefix + strings.Replace(newDate, "-", "_", -1)
if err != nil {
dd.d.InitIndex(c, nil, dd.attrs.ESName, indexAliasName, indexEntityName, dd.attrs.Index.IndexMapping)
} else {
dd.d.InitIndex(c, aliases, dd.attrs.ESName, indexAliasName, indexEntityName, dd.attrs.Index.IndexMapping)
}
}
}
// InitOffset .
func (dd *DmDate) InitOffset(c context.Context) {
dd.d.InitOffset(c, dd.offsets[0], dd.attrs, dd.tableName)
log.Info("in InitOffset")
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
var (
id int64
err error
row *xsql.Row
)
row = dd.db.QueryRow(c, fmt.Sprintf(minIDSQL, i), dd.frontTwelveMonthDate)
if err = row.Scan(&id); err != nil {
if err == xsql.ErrNoRows {
log.Info("in ErrNoRows")
err = nil
} else {
log.Info("row.Scan error(%v)", err)
log.Error("row.Scan error(%v)", err)
time.Sleep(time.Second * 3)
continue
}
}
log.Info("here i am %d", i)
dd.offsets[i] = &model.LoopOffset{}
dd.offsets[i].OffsetID = id
}
log.Info("InitOffset over")
}
// Offset get offset.
func (dd *DmDate) Offset(c context.Context) {
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
tableName := fmt.Sprintf("%s%0"+dd.attrs.Table.TableZero+"d", dd.attrs.Table.TablePrefix, i)
offset, err := dd.d.Offset(c, dd.attrs.AppID, tableName)
if err != nil {
log.Error("dd.d.Offset error(%v)", err)
time.Sleep(time.Second * 3)
}
dd.offsets[i].SetReview(offset.ReviewID, offset.ReviewTime)
dd.offsets[i].SetOffset(offset.OffsetID(), offset.OffsetTime())
}
}
// SetRecover set recover
func (dd *DmDate) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
}
// IncrMessages .
func (dd *DmDate) IncrMessages(c context.Context) (length int, err error) {
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(dd.attrs.Databus.Ticker)))
defer ticker.Stop()
timeStr := time.Now().Format("2006-01-02")
t, _ := time.ParseInLocation("2006-01-02", timeStr, time.Local)
tomorrowZeroTimestamp := t.AddDate(0, 0, 1).Unix()
nowTimestamp := time.Now().Unix()
if tomorrowZeroTimestamp-nowTimestamp < 180 {
dd.oidDayMap = nil
dd.oidDayMap = make(map[string]string)
}
for {
select {
case msg, ok := <-dd.dtb.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", dd.attrs.Databus)
break
}
m := &model.Message{}
dd.commits[msg.Partition] = msg
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Action == "insert" && strings.HasPrefix(m.Table, "dm_index") {
var parseMap map[string]interface{}
parseMap, err = dd.d.JSON2map(m.New)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
newParseMap := dd.newDtbParseMap(c, parseMap)
indexID := newParseMap["index_id"].(string)
indexName := newParseMap["index_name"].(string)
if _, exists := dd.oidDayMap[indexID]; exists {
continue
}
dd.oidDayMap[indexID] = indexName
dd.mapData = append(dd.mapData, newParseMap)
}
if len(dd.mapData) < dd.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
if len(dd.mapData) > 0 {
dd.mapData, err = dd.d.ExtraData(c, dd.mapData, dd.attrs, "dtb", []string{})
}
length = len(dd.mapData)
//amd.d.extraData(c, amd, "dtb")
return
}
// AllMessages .
func (dd *DmDate) AllMessages(c context.Context) (length int, err error) {
dd.mapData = []model.MapData{}
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
var rows *xsql.Rows
if dd.offsets[i].OffsetID == 0 {
continue
}
if rows, err = dd.db.Query(c, fmt.Sprintf(dd.attrs.DataSQL.SQLByID, dd.attrs.DataSQL.SQLFields, i), dd.offsets[i].OffsetID, dd.attrs.Other.Size); err != nil {
log.Error("AllMessages db.Query error(%v)", err)
return
}
tempList := []model.MapData{}
for rows.Next() {
item, row := dao.InitMapData(dd.attrs.DataSQL.DataIndexFields)
if err = rows.Scan(row...); err != nil {
log.Error("appMultipleDatabus.AllMessages rows.Scan() error(%v)", err)
continue
}
newParseMap := dd.newParseMap(c, item)
ctime, ok := newParseMap["ctime"].(*interface{})
if ok {
dbTime := (*ctime).(time.Time)
dbTimeStr := dbTime.Format("2006-01-02")
t1, err1 := time.Parse("2006-01-02", dd.frontTwelveMonthDate)
t2, err2 := time.Parse("2006-01-02", dbTimeStr)
if err1 != nil || err2 != nil || t1.After(t2) {
continue
}
} else {
continue
}
tempList = append(tempList, newParseMap)
dd.mapData = append(dd.mapData, newParseMap)
}
rows.Close()
tmpLength := len(tempList)
if tmpLength > 0 {
dd.offsets[i].SetTempOffset(tempList[tmpLength-1].PrimaryID(), tempList[tmpLength-1].StrCTime())
}
}
length = len(dd.mapData)
if length > 0 {
dd.mapData, err = dd.d.ExtraData(c, dd.mapData, dd.attrs, "db", []string{})
}
log.Info("length is %d", length)
return
}
// BulkIndex .
func (dd *DmDate) BulkIndex(c context.Context, start int, end int, writeEntityIndex bool) (err error) {
partData := dd.mapData[start:end]
// if dd.d.GetConfig(c).Business.Index {
// err = dd.d.BulkDBData(c, dd.attrs, partData...)
// } else {
// err = dd.d.BulkDatabusData(c, dd.attrs, partData...)
// }
err = dd.d.BulkDBData(c, dd.attrs, writeEntityIndex, partData...)
return
}
// Commit commit offset.
func (dd *DmDate) Commit(c context.Context) (err error) {
if dd.d.GetConfig(c).Business.Index {
for i := dd.attrs.Table.TableFrom; i <= dd.attrs.Table.TableTo; i++ {
tOffset := dd.offsets[i]
if tOffset.TempOffsetID != 0 {
tOffset.OffsetID = tOffset.TempOffsetID
}
if tOffset.TempOffsetTime != "" {
tOffset.OffsetTime = tOffset.TempOffsetTime
}
tableName := fmt.Sprintf("%s%0"+dd.attrs.Table.TableZero+"d", dd.attrs.Table.TablePrefix, i)
if err = dd.d.CommitOffset(c, tOffset, dd.attrs.AppID, tableName); err != nil {
log.Error("appMultipleDatabus.Commit error(%v)", err)
continue
}
}
} else {
for k, c := range dd.commits {
if err = c.Commit(); err != nil {
log.Error("appMultipleDatabus.Commit error(%v)", err)
continue
}
delete(dd.commits, k)
}
}
dd.mapData = []model.MapData{}
return
}
// Sleep interval duration.
func (dd *DmDate) Sleep(c context.Context) {
}
// Size return size.
func (dd *DmDate) Size(c context.Context) int {
return 0
}
// newParseMap .
func (dd *DmDate) newParseMap(c context.Context, parseMap map[string]interface{}) (res map[string]interface{}) {
res = parseMap
indexName, strID := "", ""
if res["month"] != nil {
if month, ok := res["month"].(*interface{}); ok {
mth := strings.Replace(dd.b2s((*month).([]uint8)), "-", "_", -1)
indexName = "dm_date_" + mth
}
}
if res["date"] != nil {
if date, ok := res["date"].(*interface{}); ok {
dte := strings.Replace(dd.b2s((*date).([]uint8)), "-", "_", -1)
if oid, ok := res["oid"].(*interface{}); ok {
strID = strconv.FormatInt((*oid).(int64), 10) + "_" + dte
}
}
}
res["index_name"] = indexName
res["index_id"] = strID
return
}
// newDtbParseMap .
func (dd *DmDate) newDtbParseMap(c context.Context, parseMap map[string]interface{}) (res map[string]interface{}) {
res = parseMap
indexName, strID, mth, dte, id := "", "", "", "", ""
if res["ctime"] != nil {
if ctime, ok := res["ctime"].(string); ok {
t, _ := time.Parse("2006-01-02 15:04:05", ctime)
mth = t.Format("2006-01")
dte = t.Format("2006-01-02")
indexName = "dm_date_" + strings.Replace(mth, "-", "_", -1)
}
}
if res["oid"] != nil {
if oid, ok := res["oid"].(int64); ok {
strOid := strconv.FormatInt(oid, 10)
strID = strOid + "_" + strings.Replace(dte, "-", "_", -1)
}
}
if res["id"] != nil {
if newID, ok := res["id"].(int64); ok {
id = strconv.Itoa(int(newID))
}
}
for k := range res {
if k == "id" || k == "oid" {
continue
}
delete(res, k)
}
res["index_name"] = indexName
res["index_id"] = strID
res["month"] = mth
res["date"] = dte
res["id"] = id
return
}
// bs2 []uint8 to string.
func (dd *DmDate) b2s(bs []uint8) string {
b := make([]byte, len(bs))
for i, v := range bs {
b[i] = byte(v)
}
return string(b)
}

View File

@@ -0,0 +1,355 @@
package business
import (
"bytes"
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/dao"
"go-common/app/job/main/search/model"
"go-common/library/log"
"go-common/library/log/infoc"
"go-common/library/queue/databus"
"gopkg.in/olivere/elastic.v5"
)
const _sql = "SELECT id, index_format, index_version, index_cluster, additional_mapping, data_center FROM digger_"
// Log .
type Log struct {
d *dao.Dao
appid string
attrs *model.Attrs
databus *databus.Databus
infoC *infoc.Infoc
infoCField []string
mapData []model.MapData
commits map[int32]*databus.Message
business map[int]*info
week map[int]string
additionalMapping map[int]map[string]string
defaultMapping map[string]string
mapping map[int]map[string]string
}
type info struct {
Format string
Cluster string
Version string
DataCenter int8
}
// NewLog .
func NewLog(d *dao.Dao, appid string) (l *Log) {
l = &Log{
d: d,
appid: appid,
attrs: d.AttrPool[appid],
databus: d.DatabusPool[appid],
infoC: d.InfoCPool[appid],
infoCField: []string{},
mapData: []model.MapData{},
commits: map[int32]*databus.Message{},
business: map[int]*info{},
additionalMapping: map[int]map[string]string{},
mapping: map[int]map[string]string{},
week: map[int]string{
0: "0107",
1: "0815",
2: "1623",
3: "2431",
},
}
switch appid {
case "log_audit":
l.defaultMapping = map[string]string{
"uname": "string",
"uid": "string",
"business": "string",
"type": "string",
"oid": "string",
"action": "string",
"ctime": "time",
"int_0": "int",
"int_1": "int",
"int_2": "int",
"str_0": "string",
"str_1": "string",
"str_2": "string",
"extra_data": "string",
}
l.infoCField = []string{"uname", "uid", "business", "type", "oid", "action", "ctime",
"int_0", "int_1", "int_2", "str_0", "str_1", "str_2", "str_3", "str_4", "extra_data"}
case "log_user_action":
l.defaultMapping = map[string]string{
"mid": "string",
"platform": "string",
"build": "string",
"buvid": "string",
"business": "string",
"type": "string",
"oid": "string",
"action": "string",
"ip": "string",
"ctime": "time",
"int_0": "int",
"int_1": "int",
"int_2": "int",
"str_0": "string",
"str_1": "string",
"str_2": "string",
"extra_data": "string",
}
l.infoCField = []string{"mid", "platform", "build", "buvid", "business", "type", "oid", "action", "ip", "ctime",
"int_0", "int_1", "int_2", "str_0", "str_1", "str_2", "extra_data"}
default:
log.Error("log appid error(%v)", appid)
return
}
rows, err := d.SearchDB.Query(context.TODO(), _sql+appid)
if err != nil {
log.Error("log Query error(%v)", appid)
return
}
defer rows.Close()
for rows.Next() {
var (
id int
additionalMapping string
)
info := &info{}
if err = rows.Scan(&id, &info.Format, &info.Version, &info.Cluster, &additionalMapping, &info.DataCenter); err != nil {
log.Error("Log New DB (%v)(%v)", id, err)
continue
}
l.business[id] = info
if additionalMapping != "" {
var additionalMappingDict map[string]string
if err = json.Unmarshal([]byte(additionalMapping), &additionalMappingDict); err != nil {
log.Error("Log New Json (%v)(%v)", id, err)
continue
}
l.additionalMapping[id] = additionalMappingDict
}
}
for b := range l.business {
l.mapping[b] = map[string]string{}
for k, v := range l.defaultMapping {
l.mapping[b][k] = v
}
if a, ok := l.additionalMapping[b]; ok {
for k, v := range a {
l.mapping[b][k] = v
}
}
}
return
}
// Business return business.
func (l *Log) Business() string {
return l.attrs.Business
}
// InitIndex .
func (l *Log) InitIndex(c context.Context) {
}
// InitOffset .
func (l *Log) InitOffset(c context.Context) {
}
// Offset .
func (l *Log) Offset(c context.Context) {
}
// MapData .
func (l *Log) MapData(c context.Context) (mapData []model.MapData) {
return l.mapData
}
// Attrs .
func (l *Log) Attrs(c context.Context) (attrs *model.Attrs) {
return l.attrs
}
// SetRecover .
func (l *Log) SetRecover(c context.Context, recoverID int64, recoverTime string, i int) {
}
// IncrMessages .
func (l *Log) IncrMessages(c context.Context) (length int, err error) {
var jErr error
ticker := time.NewTicker(time.Duration(time.Millisecond * time.Duration(l.attrs.Databus.Ticker)))
defer ticker.Stop()
for {
select {
case msg, ok := <-l.databus.Messages():
if !ok {
log.Error("databus: %s binlog consumer exit!!!", l.attrs.Databus)
break
}
l.commits[msg.Partition] = msg
var result map[string]interface{}
decoder := json.NewDecoder(bytes.NewReader(msg.Value))
decoder.UseNumber()
if jErr = decoder.Decode(&result); jErr != nil {
log.Error("appid(%v) json.Unmarshal(%s) error(%v)", l.appid, msg.Value, jErr)
continue
}
// json.Number转int64
for k, v := range result {
switch t := v.(type) {
case json.Number:
if result[k], jErr = t.Int64(); jErr != nil {
log.Error("appid(%v) log.bulkDatabusData.json.Number(%v)(%v)", l.appid, t, jErr)
}
}
}
l.mapData = append(l.mapData, result)
if len(l.mapData) < l.attrs.Databus.AggCount {
continue
}
case <-ticker.C:
}
break
}
// todo: 额外的参数
length = len(l.mapData)
return
}
// AllMessages .
func (l *Log) AllMessages(c context.Context) (length int, err error) {
return
}
// BulkIndex .
func (l *Log) BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error) {
partData := l.mapData[start:end]
if err = l.bulkDatabusData(c, l.attrs, writeEntityIndex, partData...); err != nil {
log.Error("appid(%v) json.bulkDatabusData error(%v)", l.appid, err)
return
}
return
}
// Commit .
func (l *Log) Commit(c context.Context) (err error) {
for k, msg := range l.commits {
if err = msg.Commit(); err != nil {
log.Error("appid(%v) Commit error(%v)", l.appid, err)
continue
}
delete(l.commits, k)
}
l.mapData = []model.MapData{}
return
}
// Sleep .
func (l *Log) Sleep(c context.Context) {
time.Sleep(time.Second * time.Duration(l.attrs.Other.Sleep))
}
// Size .
func (l *Log) Size(c context.Context) (size int) {
return l.attrs.Other.Size
}
func (l *Log) bulkDatabusData(c context.Context, attrs *model.Attrs, writeEntityIndex bool, bulkData ...model.MapData) (err error) {
var (
request elastic.BulkableRequest
bulkRequest map[string]*elastic.BulkService
businessID int
)
bulkRequest = map[string]*elastic.BulkService{}
for _, b := range bulkData {
indexName := ""
if business, ok := b["business"].(int64); ok {
businessID = int(business)
if v, ok := b["ctime"].(string); ok {
if cTime, timeErr := time.Parse("2006-01-02 15:04:05", v); timeErr == nil {
if info, ok := l.business[businessID]; ok {
suffix := strings.Replace(cTime.Format(info.Format), "week", l.week[cTime.Day()/8], -1) + "_" + info.Version
if !writeEntityIndex {
indexName = attrs.Index.IndexAliasPrefix + "_" + strconv.Itoa(businessID) + "_" + suffix
} else {
indexName = attrs.Index.IndexEntityPrefix + "_" + strconv.Itoa(businessID) + "_" + suffix
}
}
}
}
}
if indexName == "" {
log.Error("appid(%v) ac.d.bulkDatabusData business business(%v) data(%+v)", l.appid, b["business"], b)
continue
}
esCluster := l.business[businessID].Cluster // 上方已经判断l.business[businessID]是否存在
if _, ok := bulkRequest[esCluster]; !ok {
if _, eok := l.d.ESPool[esCluster]; eok {
bulkRequest[esCluster] = l.d.ESPool[esCluster].Bulk()
} else {
log.Error("appid(%v) ac.d.bulkDatabusData cluster no find error(%v)", l.appid, esCluster)
continue //忽略这条数据
}
}
//发送数据中心
if l.business[businessID].DataCenter == 1 {
arr := make([]interface{}, len(l.infoCField))
for i, f := range l.infoCField {
if v, ok := b[f]; ok {
arr[i] = fmt.Sprintf("%v", v)
}
}
if er := l.infoC.Info(arr...); er != nil {
log.Error("appid(%v) ac.infoC.Info error(%v)", l.appid, er)
}
}
//数据处理
for k, v := range b {
if t, ok := l.mapping[businessID][k]; ok {
switch t {
case "int_to_bin":
if item, ok := v.(int64); ok {
item := int(item)
arr := []string{}
for i := 0; item != 0; i++ {
if item&1 == 1 {
arr = append(arr, strconv.Itoa(item&1<<uint(i)))
}
item = item >> 1
}
b[k] = arr
} else {
delete(b, k)
}
case "array":
if arr, ok := v.([]interface{}); ok {
b[k] = arr
} else {
delete(b, k)
}
}
} else {
delete(b, k)
}
}
request = elastic.NewBulkIndexRequest().Index(indexName).Type(attrs.Index.IndexType).Doc(b)
bulkRequest[esCluster].Add(request)
}
for _, v := range bulkRequest {
if v.NumberOfActions() == 0 {
continue
}
if _, err = v.Do(c); err != nil {
log.Error("appid(%s) bulk error(%v)", attrs.AppID, err)
}
}
return
}

View File

@@ -0,0 +1,25 @@
package dao
// import (
// "context"
// "go-common/app/job/main/search/model"
// "go-common/database/sql"
// "go-common/xstr"
// )
// const (
// _getAssetSQL = "SELECT id, name, type, src FROM digger_asset where id in (?)"
// )
// func (d *Dao) getAsset(c context.Context, ids []int64) (res *model.SQLAsset, err error) {
// res = new(model.SQLAsset)
// row := d.SearchDB.QueryRow(c, _getAssetSQL, xstr.JoinInts(ids))
// if err = row.Scan(&res.ID, &res.Name, &res.Type, &res.Src); err != nil {
// if err == sql.ErrNoRows {
// err = nil
// res = nil
// }
// }
// return
// }

View File

@@ -0,0 +1,266 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/model"
"go-common/library/database/sql"
"go-common/library/log"
)
const (
_getAttrsSQL = "SELECT appid,db_name,es_name,table_prefix,table_format,index_prefix,index_version,index_format,index_type,index_id,index_mapping, " +
"data_index_suffix,review_num,review_time,sleep,size,business,data_fields,data_extra,sql_by_id,sql_by_mtime,sql_by_idmtime,databus_info,databus_index_id FROM digger_app WHERE appid=?"
)
type attr struct {
d *Dao
appID string
attrs *model.Attrs
}
func newAttr(d *Dao, appID string) (ar *attr) {
ar = &attr{
d: d,
appID: appID,
attrs: new(model.Attrs),
}
if err := ar.initAttrs(); err != nil {
//fmt.Println("strace:init>", err)
log.Error("d.initAttrs error (%v)", err)
}
return
}
func (ar *attr) initAttrs() (err error) {
var sqlAttrs *model.SQLAttrs
for {
if sqlAttrs, err = ar.getSQLAttrs(context.TODO()); err != nil || sqlAttrs == nil {
log.Error("d.Attrs error (%v)", err)
time.Sleep(time.Second * 3)
continue
}
break
}
// attr-src
ar.attrs.Business = sqlAttrs.Business
ar.attrs.AppID = sqlAttrs.AppID
ar.attrs.DBName = sqlAttrs.DBName
ar.attrs.ESName = sqlAttrs.ESName
ar.attrs.DtbName = sqlAttrs.DtbName
// attr-table
if err = ar.parseTable(sqlAttrs); err != nil {
err = fmt.Errorf("parseTable appid(%s) err(%v)", ar.appID, err)
return
}
// attr-index
if err = ar.parseIndex(sqlAttrs); err != nil {
err = fmt.Errorf("parseIndex appid(%s) err(%v)", ar.appID, err)
return
}
// attr-datasql
if err = ar.parseDataSQL(sqlAttrs); err != nil {
err = fmt.Errorf("parseDataSQL appid(%s) err(%v)", ar.appID, err)
return
}
// attr-sql
// attr-data_extra
if err = ar.parseExtraData(sqlAttrs); err != nil {
err = fmt.Errorf("parseExtraData appid(%s) err(%v)", ar.appID, err)
return
}
// attr-databus
if err = ar.parseDatabus(sqlAttrs); err != nil {
err = fmt.Errorf("parseDatabus appid(%s) err(%v)", ar.appID, err)
return
}
// attr-other
ar.attrs.Other = &model.AttrOther{
ReviewNum: sqlAttrs.ReviewNum,
ReviewTime: sqlAttrs.ReviewTime,
Sleep: sqlAttrs.Sleep,
Size: sqlAttrs.Size,
}
return
}
func (ar *attr) getSQLAttrs(c context.Context) (res *model.SQLAttrs, err error) {
res = new(model.SQLAttrs)
row := ar.d.SearchDB.QueryRow(c, _getAttrsSQL, ar.appID)
//fmt.Println("appID", ar.appID)
if err = row.Scan(&res.AppID, &res.DBName, &res.ESName, &res.TablePrefix, &res.TableFormat, &res.IndexAliasPrefix, &res.IndexVersion, &res.IndexFormat, &res.IndexType, &res.IndexID, &res.IndexMapping,
&res.DataIndexSuffix, &res.ReviewNum, &res.ReviewTime, &res.Sleep, &res.Size, &res.Business, &res.DataFields, &res.DataExtraInfo, &res.SQLByID, &res.SQLByMTime, &res.SQLByIDMTime, &res.DatabusInfo, &res.DatabusIndexID); err != nil {
if err == sql.ErrNoRows {
err = nil
res = nil
}
}
return
}
func (ar *attr) parseTable(sqlAttrs *model.SQLAttrs) (err error) {
table := new(model.AttrTable)
table.TablePrefix = sqlAttrs.TablePrefix
table.TableFormat = sqlAttrs.TableFormat
tableFormat := strings.Split(table.TableFormat, ",")
if len(tableFormat) != 5 {
err = fmt.Errorf("wrong tableForamt(%s)", tableFormat)
return
}
if table.TableSplit = tableFormat[0]; table.TableSplit != "single" {
if table.TableFrom, err = strconv.Atoi(tableFormat[1]); err != nil {
return
}
if table.TableTo, err = strconv.Atoi(tableFormat[2]); err != nil {
return
}
}
table.TableZero = tableFormat[3]
table.TableFixed = (tableFormat[4] == "fixed")
ar.attrs.Table = table
return
}
func (ar *attr) parseIndex(sqlAttrs *model.SQLAttrs) (err error) {
index := new(model.AttrIndex)
index.IndexAliasPrefix = sqlAttrs.IndexAliasPrefix
index.IndexEntityPrefix = sqlAttrs.IndexAliasPrefix + sqlAttrs.IndexVersion
index.IndexFormat = sqlAttrs.IndexFormat
index.IndexType = sqlAttrs.IndexType
index.IndexID = sqlAttrs.IndexID
index.IndexMapping = sqlAttrs.IndexMapping
indexFormat := strings.Split(index.IndexFormat, ",")
if len(indexFormat) != 5 {
err = fmt.Errorf("wrong indexFormat(%s)", indexFormat)
return
}
if index.IndexID == "base" {
err = fmt.Errorf("indexID Prohibition 'base' (%s)", indexFormat)
return
}
if index.IndexSplit = indexFormat[0]; index.IndexSplit != "single" {
if index.IndexFrom, err = strconv.Atoi(indexFormat[1]); err != nil {
return
}
if index.IndexTo, err = strconv.Atoi(indexFormat[2]); err != nil {
return
}
}
index.IndexZero = indexFormat[3]
index.IndexFixed = (indexFormat[4] == "fixed")
ar.attrs.Index = index
return
}
func (ar *attr) parseDataSQL(sqlAttrs *model.SQLAttrs) (err error) {
dataSQL := new(model.AttrDataSQL)
dataSQL.DataIndexFormatFields = make(map[string]string)
dataSQL.DataDtbFields = make(map[string][]string)
dataSQL.DataFieldsV2 = make(map[string]model.AttrDataFields)
dataSQL.DataIndexSuffix = sqlAttrs.DataIndexSuffix
dataSQL.DataFields = sqlAttrs.DataFields
dataSQL.DataExtraInfo = sqlAttrs.DataExtraInfo
if dataSQL.DataFields == "" {
return
}
p := []model.AttrDataFields{} //DataFieldsV2
sqlFields := []string{}
if e := json.Unmarshal([]byte(dataSQL.DataFields), &p); e != nil {
fields := strings.Split(dataSQL.DataFields, ",")
for _, v := range fields {
exp := strings.Split(v, ":")
indexFieldName := exp[0]
dataSQL.DataIndexFields = append(dataSQL.DataIndexFields, indexFieldName)
sqlFields = append(sqlFields, exp[1])
dataSQL.DataIndexFormatFields[indexFieldName] = exp[2]
if exp[3] == "n" {
dataSQL.DataIndexRemoveFields = append(dataSQL.DataIndexRemoveFields, indexFieldName)
}
}
} else {
// json方式
for _, v := range p {
dataSQL.DataFieldsV2[v.ESField] = v
dataSQL.DataIndexFields = append(dataSQL.DataIndexFields, v.ESField)
sqlFields = append(sqlFields, v.SQL)
dataSQL.DataIndexFormatFields[v.ESField] = v.Expect
if v.Stored == "n" {
dataSQL.DataIndexRemoveFields = append(dataSQL.DataIndexRemoveFields, v.ESField)
}
if v.InDtb == "y" {
dataSQL.DataDtbFields[v.Field] = append(dataSQL.DataDtbFields[v.Field], v.ESField)
}
}
}
//fmt.Println(dataSQL.DataDtbFields)
//sqlFields顺序和attr.DataIndexFields要一致
if (len(sqlFields) != len(dataSQL.DataIndexFields)) && (len(sqlFields) == 0 || len(dataSQL.DataIndexFields) == 0) {
log.Error("sqlFields and attr.DataIndexFields are different")
return
}
dataSQL.SQLFields = strings.Join(sqlFields, ",")
if ar.attrs.Table.TableSplit == "single" {
dataSQL.SQLByID = fmt.Sprintf(sqlAttrs.SQLByID, dataSQL.SQLFields)
dataSQL.SQLByMTime = fmt.Sprintf(sqlAttrs.SQLByMTime, dataSQL.SQLFields)
dataSQL.SQLByIDMTime = fmt.Sprintf(sqlAttrs.SQLByIDMTime, dataSQL.SQLFields)
} else {
dataSQL.SQLByID = sqlAttrs.SQLByID
dataSQL.SQLByMTime = sqlAttrs.SQLByMTime
dataSQL.SQLByIDMTime = sqlAttrs.SQLByIDMTime
}
ar.attrs.DataSQL = dataSQL
return
}
func (ar *attr) parseExtraData(sqlAttrs *model.SQLAttrs) (err error) {
if sqlAttrs.DataExtraInfo != "" {
err = json.Unmarshal([]byte(sqlAttrs.DataExtraInfo), &ar.attrs.DataExtras)
}
// append all format field from extra data
for _, v := range ar.attrs.DataExtras {
if v.FieldsStr == "" {
continue
}
fields := strings.Split(v.FieldsStr, ",")
for _, v := range fields {
exp := strings.Split(v, ":")
ar.attrs.DataSQL.DataIndexFormatFields[exp[0]] = exp[2]
}
}
return
}
func (ar *attr) parseDatabus(sqlAttrs *model.SQLAttrs) (err error) {
dtb := new(model.AttrDatabus)
if sqlAttrs.DatabusInfo != "" {
databusInfo := strings.Split(sqlAttrs.DatabusInfo, ",")
if len(databusInfo) != 3 {
err = fmt.Errorf("wrong databusInfo(%s)", databusInfo)
return
}
dtb.Databus = databusInfo[0]
if dtb.AggCount, err = strconv.Atoi(databusInfo[1]); err != nil {
return
}
if dtb.Ticker, err = strconv.Atoi(databusInfo[2]); err != nil {
return
}
}
if sqlAttrs.DatabusIndexID != "" {
databusIndexID := strings.Split(sqlAttrs.DatabusIndexID, ":")
if len(databusIndexID) != 2 {
err = fmt.Errorf("wrong databusIndexID(%s)", databusIndexID)
return
}
dtb.PrimaryID = databusIndexID[0]
dtb.RelatedID = databusIndexID[1]
}
ar.attrs.Databus = dtb
return
}

View File

@@ -0,0 +1,77 @@
package dao
import (
"context"
"encoding/json"
"errors"
"time"
"go-common/app/job/main/search/model"
"go-common/library/database/sql"
"go-common/library/log"
)
const (
_getBusinessSQL = "SELECT business, app_ids, asset_db, asset_es, asset_dtb FROM digger_business WHERE business=?"
)
type bns struct {
d *Dao
business string
bInfo *model.Bsn
}
func newBusiness(d *Dao, business string) (bs *bns, err error) {
bs = &bns{
d: d,
business: business,
bInfo: new(model.Bsn),
}
if err = bs.initBusiness(); err != nil {
log.Error("d.initBusiness error (%v)", err)
}
return
}
func (bs *bns) initBusiness() (err error) {
var sqlBusiness *model.SQLBusiness
for {
if sqlBusiness, err = bs.getBusiness(context.TODO()); err != nil {
log.Error("initBusiness error (%v)", err)
time.Sleep(time.Second * 3)
continue
}
break
}
if sqlBusiness == nil {
err = errors.New("initBusiness: " + bs.business + " not found in `digger_business`")
return
}
bs.bInfo.Business = sqlBusiness.Business
bs.bInfo.AppInfo = make([]model.BsnAppInfo, 0)
// business-appinfo
if sqlBusiness.AppIds != "" {
err = json.Unmarshal([]byte(sqlBusiness.AppIds), &bs.bInfo.AppInfo)
}
// business-assetdb
// business-assetes
// business-assedtb
return
}
func (bs *bns) getBusiness(c context.Context) (res *model.SQLBusiness, err error) {
res = new(model.SQLBusiness)
row := bs.d.SearchDB.QueryRow(c, _getBusinessSQL, bs.business)
if err = row.Scan(&res.Business, &res.AppIds, &res.AssetDB, &res.AssetES, &res.AssetDtb); err != nil {
log.Error("business row.Scan error(%v)", err)
if err == sql.ErrNoRows {
err = nil
res = nil
return
}
}
return
}

View File

@@ -0,0 +1,77 @@
package dao
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/model"
"go-common/library/database/sql"
"go-common/library/log"
xtime "go-common/library/time"
)
const (
_getOffsetSQL = "SELECT offset_incr_id,offset_incr_time,review_incr_id,review_icnr_time FROM digger_offset WHERE project=? AND table_name=?"
//TODO 有问题,字段少了
_updateOffsetSQL = "UPDATE digger_offset SET offset_incr_id=?,offset_incr_time=?,mtime=? WHERE project=? AND table_name=?"
//_initOffsetSQL = "INSERT INTO digger_offset(project,table_name,offset_incr_time,offset_recover_id,offset_recover_time) VALUES(?,?,?,?,?,?) " +
// "ON DUPLICATE KEY UPDATE offset_recover_id=?, offset_recover_time=?"
)
// Offset get offset
func (d *Dao) Offset(c context.Context, appid, tableName string) (res *model.Offset, err error) {
res = new(model.Offset)
row := d.SearchDB.QueryRow(c, _getOffsetSQL, appid, tableName)
if err = row.Scan(&res.OffID, &res.OffTime, &res.ReviewID, &res.ReviewTime); err != nil {
log.Error("OffsetID row.Scan error(%v)", err)
if err == sql.ErrNoRows {
err = nil
res.OffID = 1
res.OffTime = xtime.Time(time.Now().Unix())
return
}
log.Error("offset row.Scan error(%v)", err)
}
return
}
// updateOffset update offset
func (d *Dao) updateOffset(c context.Context, offset *model.LoopOffset, appid, tableName string) (err error) {
nowFormat := time.Now().Format("2006-01-02 15:04:05")
if _, err = d.SearchDB.Exec(c, _updateOffsetSQL, offset.OffsetID, offset.OffsetTime, nowFormat, appid, tableName); err != nil {
log.Error("updateOffset Exec() error(%v)", err)
}
return
}
// bulkInitOffset .
func (d *Dao) bulkInitOffset(c context.Context, offset *model.LoopOffset, attrs *model.Attrs, arr []string) (err error) {
var (
values = []string{}
nowFormat = time.Now().Format("2006-01-02 15:04:05")
insertOffsetSQL = "INSERT INTO digger_offset(project,table_name,table_suffix,offset_incr_time,offset_recover_id,offset_recover_time) VALUES"
)
if len(arr) == 0 {
for i := attrs.Table.TableFrom; i <= attrs.Table.TableTo; i++ {
if attrs.Table.TableTo == 0 {
arr = append(arr, attrs.Table.TablePrefix)
} else {
arr = append(arr, fmt.Sprintf("%s%0"+attrs.Table.TableZero+"d", attrs.Table.TablePrefix, i))
}
}
}
for _, v := range arr {
// TODO why???
// table := attrs.Table.TablePrefix + v
// value := "('" + attrs.AppID + "','" + table + "','" + v + "','" + nowFormat + "'," + strconv.FormatInt(offset.RecoverID, 10) + ",'" + offset.RecoverTime + "')"
value := "('" + attrs.AppID + "','" + v + "','" + attrs.Table.TablePrefix + "','" + nowFormat + "'," + strconv.FormatInt(offset.RecoverID, 10) + ",'" + offset.RecoverTime + "')"
values = append(values, value)
}
valueStr := strings.Join(values, ",") + " ON DUPLICATE KEY UPDATE offset_recover_id=VALUES(offset_recover_id),offset_recover_time=VALUES(offset_recover_time)"
bulkInserSQL := insertOffsetSQL + valueStr
_, err = d.SearchDB.Exec(c, bulkInserSQL)
return
}

View File

@@ -0,0 +1,64 @@
package dao
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/model"
. "github.com/smartystreets/goconvey/convey"
)
func WithCO(f func(d *Dao)) func() {
return func() {
dir, _ := filepath.Abs("../cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := New(conf.Conf)
f(d)
}
}
func Test_Offset(t *testing.T) {
Convey("Test_Offset", t, WithCO(func(d *Dao) {
var (
err error
c = context.TODO()
)
d.Offset(c, "", "")
So(err, ShouldBeNil)
}))
}
func Test_UpdateOffset(t *testing.T) {
Convey("Test_UpdateOffset", t, WithCO(func(d *Dao) {
var (
err error
c = context.TODO()
offset = &model.LoopOffset{}
)
d.updateOffset(c, offset, "", "")
So(err, ShouldBeNil)
}))
}
func Test_BulkInitOffset(t *testing.T) {
Convey("Test_BulkInitOffset", t, WithCO(func(d *Dao) {
var (
c = context.TODO()
err error
offset = &model.LoopOffset{}
attrs = &model.Attrs{
Table: &model.AttrTable{},
}
)
attrs.Table.TableFrom = 0
attrs.Table.TableTo = 0
err = d.bulkInitOffset(c, offset, attrs, []string{})
So(err, ShouldBeNil)
}))
}

View File

@@ -0,0 +1,665 @@
package dao
import (
"bytes"
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/model"
"go-common/library/xstr"
// "go-common/database/hbase"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/log/infoc"
"go-common/library/queue/databus"
"go-common/library/stat/prom"
"gopkg.in/olivere/elastic.v5"
)
var errorsCount = prom.BusinessErrCount
const (
// business
// search db name. for table attr,offset,manager.
_searchDB = "search"
)
// App .
type App interface {
Business() string
InitIndex(c context.Context)
InitOffset(c context.Context)
Offset(c context.Context)
SetRecover(c context.Context, recoverID int64, recoverTime string, i int)
IncrMessages(c context.Context) (length int, err error)
AllMessages(c context.Context) (length int, err error)
BulkIndex(c context.Context, start, end int, writeEntityIndex bool) (err error)
Commit(c context.Context) (err error)
Sleep(c context.Context)
Size(c context.Context) (size int)
}
// Dao .
type Dao struct {
c *conf.Config
// smsClient
sms *sms
// search db
SearchDB *xsql.DB
// hbase *hbase.Client
BusinessPool map[string]model.BsnAppInfo
AttrPool map[string]*model.Attrs
AppPool map[string]App
DBPool map[string]*xsql.DB
ESPool map[string]*elastic.Client
DatabusPool map[string]*databus.Databus
InfoCPool map[string]*infoc.Infoc
}
// New .
func New(c *conf.Config) (d *Dao) {
d = &Dao{
c: c,
DBPool: newDbPool(c),
}
// check search db
if d.SearchDB = d.DBPool[_searchDB]; d.SearchDB == nil {
panic("SearchDB must config")
}
d.sms = newSMS(d)
d.BusinessPool = newBusinessPool(d)
d.AttrPool = newAttrPool(d)
d.ESPool = newEsPool(c, d)
// consumer
d.DatabusPool = newDatabusPool(c, d)
d.InfoCPool = newInfoCPool(c, d)
return
}
// newDatabusPool .
func newDatabusPool(c *conf.Config, d *Dao) (pool map[string]*databus.Databus) {
pool = make(map[string]*databus.Databus)
if c.Business.Index {
return
}
for name := range d.BusinessPool {
if config, ok := c.Databus[name]; ok {
pool[name] = databus.New(config)
}
}
return
}
// newInfoCPool .
func newInfoCPool(c *conf.Config, d *Dao) (pool map[string]*infoc.Infoc) {
pool = map[string]*infoc.Infoc{}
if c.Business.Index {
return
}
for k := range d.BusinessPool {
if n, ok := c.InfoC[k]; ok {
pool[k] = infoc.New(n)
}
}
return
}
// newBusinessPool all appid info from one business
func newBusinessPool(d *Dao) (pool map[string]model.BsnAppInfo) {
pool = map[string]model.BsnAppInfo{}
if bns, err := newBusiness(d, d.c.Business.Env); err == nil {
for _, v := range bns.bInfo.AppInfo {
if v.AppID != "" {
pool[v.AppID] = v
}
}
}
return
}
// newAttrPool .
func newAttrPool(d *Dao) (pool map[string]*model.Attrs) {
pool = make(map[string]*model.Attrs)
for k := range d.BusinessPool {
ar := newAttr(d, k)
pool[k] = ar.attrs
}
//fmt.Println("strace:attr-pool>", pool)
return
}
// SetRecover set recover.
func (d *Dao) SetRecover(c context.Context, appid string, recoverID int64, recoverTime string, i int) {
d.AppPool[appid].SetRecover(c, recoverID, recoverTime, i)
}
// newDbPool db combo
func newDbPool(c *conf.Config) (pool map[string]*xsql.DB) {
pool = make(map[string]*xsql.DB)
for dbName, config := range c.DB {
pool[dbName] = xsql.NewMySQL(config)
}
return
}
// newEsCluster cluster action
func newEsPool(c *conf.Config, d *Dao) (esCluster map[string]*elastic.Client) {
esCluster = make(map[string]*elastic.Client)
for esName, e := range c.Es {
if client, err := elastic.NewClient(elastic.SetURL(e.Addr...)); err == nil {
esCluster[esName] = client
} else {
d.PromError("es:集群连接失败", "cluster: %s, %v", esName, err)
if err := d.SendSMS(fmt.Sprintf("[search-job]%s集群连接失败", esName)); err != nil {
d.PromError("es:集群连接短信失败", "cluster: %s, %v", esName, err)
}
}
}
return
}
// PromError .
func (d *Dao) PromError(name string, format string, args ...interface{}) {
errorsCount.Incr(name)
log.Error(format, args)
}
// Close close dao
func (d *Dao) Close() {
for _, db := range d.DBPool {
db.Close()
}
}
// Ping health of db.
func (d *Dao) Ping(c context.Context) (err error) {
// TODO 循环ping
if err = d.SearchDB.Ping(c); err != nil {
d.PromError("db:ping", "")
return
}
if err = d.pingESCluster(c); err != nil {
d.PromError("es:ping", "d.pingESCluster error(%v)", err)
return
}
return
}
// GetAliases get all aliases by indexAliasPrefix
func (d *Dao) GetAliases(esName, indexAliasPrefix string) (aliases map[string]bool, err error) {
aliases = map[string]bool{}
if _, ok := d.ESPool[esName]; !ok {
log.Error("GetAliases 集群不存在 (%s)", esName)
return
}
if aliasesRes, err := d.ESPool[esName].Aliases().Index(indexAliasPrefix + "*").Do(context.TODO()); err != nil {
log.Error("GetAliases(%s*) failed", indexAliasPrefix)
} else {
for _, indexDetails := range aliasesRes.Indices {
for _, v := range indexDetails.Aliases {
if v.AliasName != "" {
aliases[v.AliasName] = true
}
}
}
}
return
}
// InitIndex create entity indecies & aliases if necessary
func (d *Dao) InitIndex(c context.Context, aliases map[string]bool, esName, indexAliasName, indexEntityName, indexMapping string) {
if indexMapping == "" {
log.Error("indexEntityName(%s) mapping is epmty", indexEntityName)
return
}
for {
exists, err := d.ESPool[esName].IndexExists(indexEntityName).Do(c)
if err != nil {
time.Sleep(time.Second * 3)
continue
}
if !exists {
if _, err := d.ESPool[esName].CreateIndex(indexEntityName).Body(indexMapping).Do(c); err != nil {
log.Error("indexEntityName(%s) create err(%v)", indexEntityName, err)
time.Sleep(time.Second * 3)
continue
}
}
break
}
// add aliases if necessary
if aliases != nil && indexAliasName != indexEntityName {
if _, ok := aliases[indexAliasName]; !ok {
if _, err := d.ESPool[esName].Alias().Add(indexEntityName, indexAliasName).Do(context.TODO()); err != nil {
log.Error("indexEntityName(%s) failed to add alias indexAliasName(%s) err(%v)", indexEntityName, indexAliasName, err)
}
}
}
}
// InitOffset init offset to offset table .
func (d *Dao) InitOffset(c context.Context, offset *model.LoopOffset, attrs *model.Attrs, arr []string) {
for {
if err := d.bulkInitOffset(c, offset, attrs, arr); err != nil {
log.Error("project(%s) initOffset(%v)", attrs.AppID, err)
time.Sleep(time.Second * 3)
continue
}
break
}
}
// InitMapData init each field struct
func InitMapData(fields []string) (item model.MapData, row []interface{}) {
item = make(map[string]interface{})
for _, v := range fields {
item[v] = new(interface{})
}
for _, v := range fields {
row = append(row, item[v])
}
return
}
// UpdateOffsetByMap .
func UpdateOffsetByMap(offsets *model.LoopOffset, mapData ...model.MapData) {
var (
id int64
mtime string
)
length := len(mapData)
if length == 0 {
return
}
offsetTime := offsets.OffsetTime
lastRes := mapData[length-1]
id = lastRes.PrimaryID()
lastMtime := lastRes.StrMTime()
//fmt.Println("real", lastMtime, id, offsets.OffsetID)
if (id != offsets.OffsetID) && (offsetTime == lastMtime) {
offsets.IsLoop = true
} else {
if offsets.IsLoop {
for _, p := range mapData {
tempMtime := p.StrMTime()
if tempMtime == offsetTime {
continue
}
id = p.PrimaryID()
mtime = tempMtime
break
}
} else {
mtime = lastMtime
}
offsets.IsLoop = false
}
offsets.SetTempOffset(id, mtime)
}
// CommitOffset .
func (d *Dao) CommitOffset(c context.Context, offset *model.LoopOffset, appid, tableName string) (err error) {
if offset.TempOffsetID != 0 {
offset.SetOffset(offset.TempOffsetID, "")
}
if offset.TempOffsetTime != "" {
offset.SetOffset(0, offset.TempOffsetTime)
}
if offset.TempRecoverID >= 0 {
offset.SetRecoverOffset(offset.TempRecoverID, "")
}
if offset.TempRecoverTime != "" {
offset.SetRecoverOffset(-1, offset.TempRecoverTime)
}
err = d.updateOffset(c, offset, appid, tableName)
return
}
// JSON2map json to map.
func (d *Dao) JSON2map(rowJSON json.RawMessage) (result map[string]interface{}, err error) {
decoder := json.NewDecoder(bytes.NewReader(rowJSON))
decoder.UseNumber()
if err = decoder.Decode(&result); err != nil {
log.Error("JSON2map.Unmarshal(%s) error(%v)", rowJSON, err)
return nil, err
}
// json.Number转int64
for k, v := range result {
switch t := v.(type) {
case json.Number:
if result[k], err = t.Int64(); err != nil {
log.Error("JSON2map.json.Number(%v)(%v)", t, err)
return nil, err
}
}
}
return
}
// ExtraData .
func (d *Dao) ExtraData(c context.Context, mapData []model.MapData, attrs *model.Attrs, way string, tags []string) (md []model.MapData, err error) {
md = mapData
switch way {
case "db":
for i, item := range mapData {
item.TransData(attrs)
for k, v := range item {
md[i][k] = v
}
}
case "dtb":
for i, item := range mapData {
item.TransDtb(attrs)
for k, v := range item {
md[i][k] = v
}
}
}
for _, ex := range attrs.DataExtras {
// db exists or not
if _, ok := d.DBPool[ex.DBName]; !ok {
log.Error("ExtraData d.DBPool excludes:%s", ex.DBName)
continue
}
if len(tags) != 0 {
for _, v := range tags {
if v != ex.Tag {
continue
}
switch ex.Type {
case "slice":
md, err = d.extraDataSlice(c, md, attrs, ex)
default:
md, err = d.extraDataDefault(c, md, attrs, ex)
}
}
} else {
switch ex.Type {
case "slice":
md, err = d.extraDataSlice(c, md, attrs, ex)
default:
md, err = d.extraDataDefault(c, md, attrs, ex)
}
}
}
return
}
// extraData-default
func (d *Dao) extraDataDefault(c context.Context, mapData []model.MapData, attrs *model.Attrs, ex model.AttrDataExtra) (md []model.MapData, err error) {
md = mapData
// filter ids from in_fields
var (
ids []int64
items map[int64]model.MapData
include []string
)
cdtInField := ex.Condition["in_field"]
items = make(map[int64]model.MapData)
if cld, ok := ex.Condition["include"]; ok {
include = strings.Split(cld, "=")
}
var rows *xsql.Rows
if cdtInFields := strings.Split(cdtInField, ","); len(cdtInFields) == 1 { //FIXME 支持主键多个条件定位一条数据
for _, m := range mapData {
if v, ok := m[cdtInField]; ok {
if len(include) >= 2 { //TODO 支持多种
if cldVal, ok := m[include[0]]; ok && strconv.FormatInt(cldVal.(int64), 10) == include[1] {
ids = append(ids, v.(int64))
}
} else {
ids = append(ids, v.(int64)) //TODO 加去重
}
}
}
// query extra data
//TODO 如果分表太多的业务单次循环size设置过大一下子来50万的数据where in一个表会拒绝请求或超时
if len(ids) > 0 {
if tableFormat := strings.Split(ex.TableFormat, ","); ex.TableFormat == "" || tableFormat[0] == "single" {
i := 0
flag := false
//TODO 缺点:耗内存
for {
var id []int64
if (i+1)*200 < len(ids) {
id = ids[i*200 : (i+1)*200]
} else {
id = ids[i*200:]
flag = true
}
rows, err = d.DBPool[ex.DBName].Query(c, fmt.Sprintf(ex.SQL, xstr.JoinInts(id))+" and 1 = ? ", 1)
if err != nil {
log.Error("extraDataDefault db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataDefault rows.Scan() error(%v)", err)
continue
}
if v, ok := item[ex.InField]; ok {
if v2, ok := v.(*interface{}); ok {
item.TransData(attrs)
items[(*v2).(int64)] = item
}
}
// fmt.Println(item)
}
rows.Close()
i++
if flag {
break
}
}
} else if tableFormat[0] == "int" {
formatData := make(map[int64][]int64)
var dbid = []int64{}
if len(tableFormat) >= 6 { // 弹幕举报根据文章id来分表 dmid进行匹配
for _, m := range mapData {
if v, ok := m[tableFormat[5]]; ok {
dbid = append(dbid, v.(int64)) // 加去重
}
}
} else {
dbid = ids
}
if len(dbid) != len(ids) {
log.Error("tableFormat[5] len error(%v)(%v)", len(dbid), len(ids))
return
}
for i := 0; i < len(ids); i++ {
d, e := strconv.ParseInt(tableFormat[2], 10, 64)
if e != nil {
log.Error("extraDataDefault strconv.Atoi() error(%v)", e)
continue
}
d = dbid[i] % (d + 1)
if d < 0 { //可能有脏数据
continue
}
formatData[d] = append(formatData[d], ids[i])
}
for v, k := range formatData {
rows, err = d.DBPool[ex.DBName].Query(c, fmt.Sprintf(ex.SQL, v, xstr.JoinInts(k))+" and 1 = ? ", 1)
if err != nil {
log.Error("extraDataDefaultTableFormat db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataDefaultTableFormat rows.Scan() error(%v)", err)
continue
}
if v, ok := item[ex.InField]; ok {
if v2, ok := v.(*interface{}); ok {
item.TransData(attrs)
items[(*v2).(int64)] = item
}
}
}
rows.Close()
}
}
}
// fmt.Println("ids:", ids, "items:", items)
// merge data
for i, m := range mapData {
if len(include) >= 2 { //TODO 支持多种
if cldVal, ok := m[include[0]]; !ok || strconv.FormatInt(cldVal.(int64), 10) != include[1] {
continue
}
}
if k, ok := m[cdtInField]; ok {
if item, ok := items[k.(int64)]; ok {
for _, v := range ex.RemoveFields {
delete(item, v)
}
item.TransData(attrs)
for k, v := range item {
md[i][k] = v
}
}
}
}
//fmt.Println(md)
} else {
for i, m := range mapData {
var value []interface{}
for _, v := range cdtInFields {
value = append(value, m[v])
}
rows, err = d.DBPool[ex.DBName].Query(c, ex.SQL, value...)
if err != nil {
log.Error("extraDataDefault db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataDefault rows.Scan() error(%v)", err)
continue
}
item.TransData(attrs)
for _, v := range ex.RemoveFields {
delete(item, v)
}
for k, v := range item {
md[i][k] = v
}
}
rows.Close()
}
}
return
}
// extraData-slice
func (d *Dao) extraDataSlice(c context.Context, mapData []model.MapData, attrs *model.Attrs, ex model.AttrDataExtra) (md []model.MapData, err error) {
md = mapData
// filter ids from in_fields
var (
ids []int64
items map[string]map[string][]interface{}
include []string
)
cdtInField := ex.Condition["in_field"]
items = make(map[string]map[string][]interface{})
sliceFields := strings.Split(ex.SliceField, ",")
if cld, ok := ex.Condition["include"]; ok {
include = strings.Split(cld, "=")
}
for _, m := range mapData {
if v, ok := m[cdtInField]; ok {
if len(include) >= 2 { //TODO 支持多种
if cldVal, ok := m[include[0]]; ok && strconv.FormatInt(cldVal.(int64), 10) == include[1] {
ids = append(ids, v.(int64))
}
} else {
ids = append(ids, v.(int64)) //TODO 加去重
}
}
}
// query extra data
if len(ids) > 0 {
var rows *xsql.Rows
rows, err = d.DBPool[ex.DBName].Query(c, fmt.Sprintf(ex.SQL, xstr.JoinInts(ids))+" and 1 = ? ", 1)
if err != nil {
log.Error("extraDataSlice db.Query error(%v)", err)
return
}
for rows.Next() {
item, row := InitMapData(ex.Fields)
if err = rows.Scan(row...); err != nil {
log.Error("extraDataSlice rows.Scan() error(%v)", err)
continue
}
if v, ok := item[ex.InField]; ok {
if v2, ok := v.(*interface{}); ok {
var key string
switch (*v2).(type) {
case int, int8, int16, int32, int64:
key = strconv.FormatInt((*v2).(int64), 10)
case []uint, []uint8, []uint16, []uint32, []uint64:
key = string((*v2).([]byte))
}
for _, sf := range sliceFields {
if _, ok := items[key]; !ok {
items[key] = make(map[string][]interface{})
}
var res interface{}
if v3, ok := item[sf].(*interface{}); ok {
switch (*v3).(type) {
case []uint, []uint8, []uint16, []uint32, []uint64:
res = string((*v3).([]byte))
default:
res = v3
}
}
items[key][sf] = append(items[key][sf], res)
}
}
}
}
rows.Close()
}
//log.Info("items:%v", items)
// merge data
for i, m := range mapData {
if len(include) >= 2 { //TODO 支持多种
if cldVal, ok := m[include[0]]; !ok || strconv.FormatInt(cldVal.(int64), 10) != include[1] {
continue
}
}
if v, ok := m[cdtInField]; ok {
if item, ok := items[strconv.FormatInt(v.(int64), 10)]; ok {
for _, sf := range sliceFields {
if list, ok := item[sf]; ok {
md[i][sf] = list
}
}
} else {
for _, sf := range sliceFields {
md[i][sf] = []int64{}
}
}
}
}
// for _, v := range md {
// log.Info("md:%v", v)
// }
return
}
// GetConfig .
func (d *Dao) GetConfig(c context.Context) *conf.Config {
return d.c
}

View File

@@ -0,0 +1,59 @@
package dao
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
. "github.com/smartystreets/goconvey/convey"
)
func WithDao(f func(d *Dao)) func() {
return func() {
dir, _ := filepath.Abs("../cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := New(conf.Conf)
f(d)
}
}
func Test_Reply(t *testing.T) {
Convey("open app", t, WithDao(func(d *Dao) {
var (
err error
c = context.TODO()
)
err = d.Ping(c)
So(err, ShouldBeNil)
}))
}
func Test_SetRecover(t *testing.T) {
Convey("set recover", t, WithDao(func(d *Dao) {
var (
err error
c = context.TODO()
)
d.SetRecover(c, "archive_video", 1000, "", 0)
So(err, ShouldBeNil)
}))
}
func Test_Close(t *testing.T) {
Convey("test close", t, WithDao(func(d *Dao) {
d.Close()
}))
}
func Test_SendSMS(t *testing.T) {
Convey("test send sms", t, WithDao(func(d *Dao) {
var err error
err = d.SendSMS("test sms")
So(err, ShouldBeNil)
}))
}

View File

@@ -0,0 +1,243 @@
package dao
import (
"context"
"fmt"
"reflect"
"strconv"
"time"
"go-common/app/job/main/search/model"
"go-common/library/log"
"go-common/library/stat/prom"
"gopkg.in/olivere/elastic.v5"
)
// BulkDatabusData 写入es数据来自databus.
func (d *Dao) BulkDatabusData(c context.Context, attrs *model.Attrs, writeEntityIndex bool, bulkData ...model.MapData) (err error) {
// TODO 需要兼容
var (
request elastic.BulkableRequest
bulkRequest = d.ESPool[attrs.ESName].Bulk()
//indexField = ""
)
//s := strings.Split(attrs.DataSQL.DataIndexSuffix, ";")
//if len(s) >= 2 {
// indexField = strings.Split(s[1], ":")[0]
//}
for _, b := range bulkData {
var (
indexName string
strID string
)
if name, ok := b["index_name"]; ok {
if indexName, ok = name.(string); ok {
delete(b, "index_name")
} else {
log.Error("dao.es.BulkDBData index_name err")
continue
}
} else {
if !writeEntityIndex {
indexName, _ = b.Index(attrs)
} else {
_, indexName = b.Index(attrs)
}
}
if id, ok := b["index_id"]; ok {
if strID, ok = id.(string); !ok {
log.Error("es.BulkDBData.strID(%v)", id)
continue
}
} else {
if strID, ok = b.StrID(attrs); !ok {
log.Error("es.BulkDBData.strID")
continue
}
}
if indexName == "" {
continue
}
for _, v := range attrs.DataSQL.DataIndexRemoveFields {
delete(b, v)
}
if _, ok := b["index_field"]; ok {
delete(b, "index_field")
//delete(b, indexField)
delete(b, "ctime")
delete(b, "mtime")
}
for k := range b {
if !d.Contain(k, attrs.DataSQL.DataIndexFormatFields) {
delete(b, k)
}
}
key := []string{}
for k := range b {
key = append(key, k)
}
for _, k := range key {
customType, ok := attrs.DataSQL.DataIndexFormatFields[k]
if ok {
switch customType {
case "ip":
switch b[k].(type) {
case float64:
ipFormat := b.InetNtoA(int64(b[k].(float64)))
b[k+"_format"] = ipFormat
case int64:
ipFormat := b.InetNtoA(b[k].(int64))
b[k+"_format"] = ipFormat
}
case "arr":
var arr []int
binaryAttributes := strconv.FormatInt(b[k].(int64), 2)
for i := len(binaryAttributes) - 1; i >= 0; i-- {
b := fmt.Sprintf("%c", binaryAttributes[i])
if b == "1" {
arr = append(arr, len(binaryAttributes)-i)
}
}
b[k+"_format"] = arr
case "bin":
var arr []int
binaryAttributes := strconv.FormatInt(b[k].(int64), 2)
for i := len(binaryAttributes) - 1; i >= 0; i-- {
b := fmt.Sprintf("%c", binaryAttributes[i])
if b == "1" {
arr = append(arr, len(binaryAttributes)-i)
}
}
b[k] = arr
case "workflow":
if state, ok := b[k].(int64); ok {
b["state"] = state & 15
b["business_state"] = state >> 4 & 15
delete(b, k)
}
case "time":
if v, ok := b[k].(string); ok {
if v == "0000-00-00 00:00:00" {
b[k] = "0001-01-01 00:00:00"
}
}
default:
// as long as you happy
}
}
}
if strID == "" {
request = elastic.NewBulkIndexRequest().Index(indexName).Type(attrs.Index.IndexType).Doc(b)
} else {
request = elastic.NewBulkUpdateRequest().Index(indexName).Type(attrs.Index.IndexType).Id(strID).Doc(b).DocAsUpsert(true)
}
//fmt.Println(request)
bulkRequest.Add(request)
}
if bulkRequest.NumberOfActions() == 0 {
return
}
now := time.Now()
// prom.BusinessInfoCount.Add("redis:bulk:doc", int64(bulkRequest.NumberOfActions()))
for i := 0; i < bulkRequest.NumberOfActions(); i++ {
prom.BusinessInfoCount.Incr("redis:bulk:doc")
}
if _, err = bulkRequest.Do(c); err != nil {
log.Error("appid(%s) bulk error(%v)", attrs.AppID, err)
}
prom.LibClient.Timing("redis:bulk", int64(time.Since(now)/time.Millisecond))
return
}
// BulkDBData 写入es数据来自db.
func (d *Dao) BulkDBData(c context.Context, attrs *model.Attrs, writeEntityIndex bool, bulkData ...model.MapData) (err error) {
var (
indexName string
strID string
request elastic.BulkableRequest
bulkRequest = d.ESPool[attrs.ESName].Bulk()
)
for _, b := range bulkData {
if name, ok := b["index_name"]; ok {
if indexName, ok = name.(string); ok {
delete(b, "index_name")
} else {
log.Error("dao.es.BulkDBData index_name err")
continue
}
} else {
if !writeEntityIndex {
indexName, _ = b.Index(attrs)
} else {
_, indexName = b.Index(attrs)
}
}
if id, ok := b["index_id"]; ok {
if strID, ok = id.(string); !ok {
log.Error("es.BulkDBData.strID(%v)", id)
continue
}
} else {
if strID, ok = b.StrID(attrs); !ok {
log.Error("es.BulkDBData.strID")
continue
}
}
if indexName == "" || strID == "" {
continue
}
//attr提供要去除掉的字段不往ES中写
for _, v := range attrs.DataSQL.DataIndexRemoveFields {
delete(b, v)
}
request = elastic.NewBulkUpdateRequest().Index(indexName).Type(attrs.Index.IndexType).Id(strID).Doc(b).DocAsUpsert(true).RetryOnConflict(3)
//fmt.Println(request)
bulkRequest.Add(request)
}
if bulkRequest.NumberOfActions() == 0 {
// 注意这里request格式问题会引起action为0
return
}
log.Info("insert number is %d", bulkRequest.NumberOfActions())
now := time.Now()
// prom.BusinessInfoCount.Add("redis:bulk:doc", int64(bulkRequest.NumberOfActions()))
for i := 0; i < bulkRequest.NumberOfActions(); i++ {
prom.BusinessInfoCount.Incr("redis:bulk:doc")
}
if _, err = bulkRequest.Do(c); err != nil {
log.Error("appid(%s) bulk error(%v)", attrs.AppID, err)
}
prom.LibClient.Timing("redis:bulk", int64(time.Since(now)/time.Millisecond))
return
}
// pingEsCluster ping es cluster
func (d *Dao) pingESCluster(ctx context.Context) (err error) {
//for name, client := range d.ESPool {
// if _, _, err = client.Ping(d.c.Es[name].Addr[0]).Do(ctx); err != nil {
// d.PromError("Es:Ping", "%s:Ping error(%v)", name, err)
// return
// }
//}
return
}
// Contain .
func (d *Dao) Contain(obj interface{}, target interface{}) bool {
targetValue := reflect.ValueOf(target)
switch reflect.TypeOf(target).Kind() {
case reflect.Slice, reflect.Array:
for i := 0; i < targetValue.Len(); i++ {
if targetValue.Index(i).Interface() == obj {
return true
}
}
case reflect.Map:
if targetValue.MapIndex(reflect.ValueOf(obj)).IsValid() {
return true
}
}
return false
}

View File

@@ -0,0 +1,78 @@
package dao
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/model"
. "github.com/smartystreets/goconvey/convey"
)
func WithES(f func(d *Dao)) func() {
return func() {
dir, _ := filepath.Abs("../cmd/goconvey.toml")
flag.Set("conf", dir)
flag.Parse()
conf.Init()
d := New(conf.Conf)
f(d)
}
}
func Test_WithES(t *testing.T) {
Convey("Test_WithES", t, WithES(func(d *Dao) {
var (
err error
c = context.TODO()
)
err = d.Ping(c)
So(err, ShouldBeNil)
}))
}
func Test_BulkDatabusData(t *testing.T) {
Convey("Test_BulkDatabusData", t, WithES(func(d *Dao) {
var (
err error
c = context.TODO()
attrs = &model.Attrs{
DataSQL: &model.AttrDataSQL{},
}
)
attrs.ESName = "archive"
attrs.DataSQL.DataIndexSuffix = ""
d.BulkDatabusData(c, attrs, false)
So(err, ShouldBeNil)
}))
}
func Test_BulkDBData(t *testing.T) {
Convey("Test_BulkDBData", t, WithES(func(d *Dao) {
var (
err error
c = context.TODO()
attrs = &model.Attrs{
DataSQL: &model.AttrDataSQL{},
}
)
attrs.ESName = "archive"
attrs.DataSQL.DataIndexSuffix = ""
d.BulkDBData(c, attrs, false)
So(err, ShouldBeNil)
}))
}
func Test_PingESCluster(t *testing.T) {
Convey("Test_PingESCluster", t, WithES(func(d *Dao) {
var (
c = context.TODO()
err error
)
err = d.pingESCluster(c)
So(err, ShouldBeNil)
}))
}

View File

@@ -0,0 +1,40 @@
package dao
// import (
// "context"
// "time"
// "go-common/app/job/main/search/model"
// "go-common/log"
// "golang/gohbase/hrpc"
// )
// // stat archive stat
// func (d *Dao) stat(c context.Context, tableName, startRow, endRow string, from, to uint64, limit int) (res []*model.HbaseArchiveStat, err error) {
// var (
// scan *hrpc.Scan
// results []*hrpc.Result
// ctx, cancel = context.WithTimeout(c, time.Duration(d.c.HBase.ReadsTimeout))
// )
// defer cancel()
// if scan, err = hrpc.NewScanRangeStr(ctx, tableName, startRow, endRow, from, to); err != nil {
// log.Error("d.hbase.stat hrpc.NewScanRangeStr table(%s) startRow(%s) endRow(%s) from(%d) to(%d) error(%v)", tableName, startRow, endRow, from, to, err)
// return
// }
// scan.SetLimit(limit)
// if results, err = d.hbase.Scan(ctx, scan); err != nil {
// log.Error("d.hbase.Scan error(%v)", err)
// return
// }
// for _, r := range results {
// for _, c := range r.Cells {
// oneRes := &model.HbaseArchiveStat{
// Row: string(c.Row),
// TimeStamp: uint64(*c.Timestamp),
// Value: string(c.Value),
// }
// res = append(res, oneRes)
// }
// }
// return
// }

View File

@@ -0,0 +1,72 @@
package dao
import (
"net/http"
"net/url"
"strconv"
"time"
"go-common/library/log"
)
const _smsURL = "http://ops-mng.bilibili.co/api/sendsms"
type sms struct {
d *Dao
client *http.Client
lastTime int64
interval int64
params *url.Values
}
func newSMS(d *Dao) (s *sms) {
s = &sms{
d: d,
client: &http.Client{},
lastTime: time.Now().Unix() - d.c.SMS.Interval, //如果不想让初始化的时候告警,把减号去掉
interval: d.c.SMS.Interval,
params: &url.Values{
"phone": []string{d.c.SMS.Phone},
"token": []string{d.c.SMS.Token},
},
}
return
}
// SendSMS .
func (d *Dao) SendSMS(msg string) (err error) {
if !d.sms.IntervalCheck() {
log.Error("发短信太频繁啦, msg%s", msg)
return
}
if err = d.sms.Send(msg); err != nil {
log.Error("发短信失败, msg%s, error(%v)", msg, err)
}
return
}
func (sms *sms) Send(msg string) (err error) {
var req *http.Request
sms.params.Set("message", msg)
if req, err = http.NewRequest("GET", _smsURL+"?"+sms.params.Encode(), nil); err != nil {
return
}
req.Header.Set("x1-bilispy-timeout", strconv.FormatInt(int64(time.Duration(1)/time.Millisecond), 10))
if _, err = sms.client.Do(req); err != nil {
log.Error("ops-mng sendsms url(%s) error(%v)", _smsURL+"?"+sms.params.Encode(), err)
}
return
}
// IntervalCheck accessible or not to send msg at present time
func (sms *sms) IntervalCheck() (send bool) {
now := time.Now().Unix()
if (now - sms.lastTime) >= sms.interval {
send = true
sms.lastTime = now
} else {
send = false
}
return
}

View File

@@ -0,0 +1,39 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"es.go",
"http.go",
],
importpath = "go-common/app/job/main/search/http",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/model:go_default_library",
"//app/job/main/search/service:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,50 @@
package http
import (
"strconv"
"go-common/app/job/main/search/model"
"go-common/library/ecode"
bm "go-common/library/net/http/blademaster"
)
func action(c *bm.Context) {
var (
params = c.Request.Form
recoverID int64
writeEntityIndex bool
)
appid := params.Get("appid")
action := params.Get("action")
if appid == "" || action == "" {
c.JSON(nil, ecode.RequestErr)
return
}
if !model.ExistsAction[action] {
c.JSON(nil, ecode.RequestErr)
return
}
if params.Get("recover_id") != "" {
if rid, err := strconv.ParseInt(params.Get("recover_id"), 10, 64); err == nil {
recoverID = rid
}
}
if params.Get("entity") == "1" {
writeEntityIndex = true
} else {
writeEntityIndex = false
}
c.JSON(svr.HTTPAction(ctx, appid, action, recoverID, writeEntityIndex))
}
func stat(c *bm.Context) {
var (
params = c.Request.Form
)
appid := params.Get("appid")
if appid == "" {
c.JSON(nil, ecode.RequestErr)
return
}
c.JSON(svr.Stat(ctx, appid))
}

View File

@@ -0,0 +1,43 @@
package http
import (
"context"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/service"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
)
var (
ctx = context.TODO()
svr *service.Service
)
// Init init http.
func Init(c *conf.Config, s *service.Service) {
svr = s
// init local router
engine := bm.DefaultServer(c.HTTPServer)
route(engine)
// init local server
if err := engine.Start(); err != nil {
log.Error("bm.DefaultServer error(%v)", err)
panic(err)
}
}
// innerRouter init local router api path.
func route(e *bm.Engine) {
e.GET("/x/search-job/action", action)
e.GET("/x/search-job/stat", stat)
e.Ping(ping)
}
// ping check server ok.
func ping(ctx *bm.Context) {
if err := svr.Ping(ctx); err != nil {
log.Error("search job ping error(%v)", err)
ctx.AbortWithStatus(503)
}
}

View File

@@ -0,0 +1,41 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"config_asset.go",
"config_attrs.go",
"config_business.go",
"config_offset.go",
"es.go",
"map_data.go",
"model.go",
],
importpath = "go-common/app/job/main/search/model",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/time:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,9 @@
package model
// SQLAsset .
type SQLAsset struct {
ID int64
Name string
Type int
Src string
}

View File

@@ -0,0 +1,131 @@
package model
// SQLAttrs get attrs from db
type SQLAttrs struct {
AppID string
DBName string
ESName string
DtbName string
TablePrefix string
TableFormat string
IndexAliasPrefix string //写和读数据时,指向的索引。是别名索引
IndexVersion string //创建索引时,指向的索引。这里是实体索引版本号
IndexFormat string
IndexType string
IndexID string
IndexMapping string
DataIndexSuffix string
DataExtraInfo string
ReviewNum int64
ReviewTime int64
Sleep float64
Size int
Business string
DataFields string
SQLByID string
SQLByMTime string
SQLByIDMTime string
DatabusInfo string
DatabusIndexID string
}
// Attrs parse AppAttrs
type Attrs struct {
Business string
AppID string
DBName string
ESName string
DtbName string
Table *AttrTable
Index *AttrIndex
DataSQL *AttrDataSQL
DataExtras []AttrDataExtra //appID需要关联其他库的数据
Databus *AttrDatabus
Other *AttrOther
}
// AttrTable .
type AttrTable struct {
TablePrefix string
TableFormat string
TableSplit string
TableFrom int
TableTo int
TableZero string
TableFixed bool
}
// AttrIndex .
type AttrIndex struct {
IndexAliasPrefix string //写和读数据时,指向的索引。是别名索引
IndexEntityPrefix string //创建索引时,指向的索引。是实体索引名
IndexFormat string
IndexSplit string
IndexFrom int
IndexTo int
IndexType string
IndexID string
IndexMapping string
IndexZero string
IndexFixed bool
}
// AttrDataSQL .
type AttrDataSQL struct {
DataIndexSuffix string //索引数据归属
DataFields string
DataFieldsV2 map[string]AttrDataFields //存放json转换得到的data_fields字段信息, 替换老的DataFields
DataIndexFields []string //来自DataFields左数第一位
DataIndexRemoveFields []string //ES需要移除的字段
DataIndexFormatFields map[string]string //ES每个字段的格式化如int64,time,int等
DataDtbFields map[string][]string //databus的字段对应的es字段, TODO 改成 map[string]map[string]bool 或 map[string][]string应对一个数据库字段用在多个es字段
DataExtraInfo string
SQLFields string //来自DataFields左数第二位含表名和字段的alias以及mysql函数等其他表达式
SQLByID string //因为有left join的缘故顾提供完整sql抛除字段部分下同
SQLByMTime string
SQLByIDMTime string
}
// AttrDataFields .
type AttrDataFields struct {
ESField string `json:"es"`
Field string `json:"field"`
SQL string `json:"sql"`
Expect string `json:"expect"`
Stored string `json:"stored"`
InDtb string `json:"in_dtb"`
}
// AttrDataExtra .
type AttrDataExtra struct {
Type string `json:"type"`
Tag string `json:"tag"`
Condition map[string]string `json:"condition"`
SliceField string `json:"slice_field"` // 逗号分隔,支持多个字段
DBName string `json:"dbname"`
Table string `json:"table"`
TableFormat string `json:"table_format"`
InField string `json:"in_field"`
FieldsStr string `json:"fields_str"`
Fields []string `json:"fields"`
RemoveFields []string `json:"remove_fields"`
SQL string `json:"sql"`
}
// AttrDatabus .
type AttrDatabus struct {
DatabusInfo string
Ticker int // 定时时间(毫秒)
AggCount int // 聚合数量
Databus string // databus Map key
PrimaryID string // 主表索引id
RelatedID string // 关联表索引id
}
// AttrOther .
type AttrOther struct {
ReviewNum int64
ReviewTime int64
Sleep float64
Size int
}

View File

@@ -0,0 +1,38 @@
package model
import (
"go-common/library/database/sql"
)
// SQLBusiness single table offset
type SQLBusiness struct {
Business string
AppIds string
AssetDB string
AssetES string
AssetDtb string
}
// Bsn single table offset
type Bsn struct {
Business string
AppInfo []BsnAppInfo
AssetDB map[string]*sql.Config
AssetES []string
//AssetDtb []AssetDtb
}
// BsnAppInfo .
type BsnAppInfo struct {
AppID string `json:"appid"`
IncrWay string `json:"incr_way"`
IncrOpen bool `json:"incr_open"`
RecoverLock bool
}
// AssetDtb .
// type AssetDtb struct {
// dtb map[string]*databus.Config
// size int
// sleep int
// }

View File

@@ -0,0 +1,128 @@
package model
import (
"time"
)
// LoopOffset single table offset
type LoopOffset struct {
IsLoop bool
OffsetID int64
OffsetTime string
TempOffsetID int64
TempOffsetTime string
RecoverID int64
RecoverTime string
TempRecoverID int64
TempRecoverTime string
ReviewID int64
ReviewTime int64
}
// SetLoop .
func (lo *LoopOffset) SetLoop(isLoop bool) {
lo.IsLoop = isLoop
}
// SetReview .
func (lo *LoopOffset) SetReview(rid int64, rtime int64) {
lo.ReviewID = rid
lo.ReviewTime = rtime
}
// SetOffset .
func (lo *LoopOffset) SetOffset(id int64, t string) {
if id != 0 {
lo.OffsetID = id
}
if t != "" {
lo.OffsetTime = t
if !lo.IsLoop {
if local, err := time.LoadLocation("Local"); err == nil {
if t2, e := time.ParseInLocation("2006-01-02 15:04:05", t, local); e == nil && t2.Unix()-lo.ReviewTime > 0 {
lo.OffsetTime = time.Unix(t2.Unix()-lo.ReviewTime, 0).Format("2006-01-02 15:04:05") //往前推ReviewTime
}
}
}
}
}
// SetTempOffset .
func (lo *LoopOffset) SetTempOffset(id int64, time string) {
if id != 0 {
lo.TempOffsetID = id
}
if time != "" {
lo.TempOffsetTime = time
}
}
// SetRecoverOffset .
func (lo *LoopOffset) SetRecoverOffset(recoverID int64, recoverTime string) {
if recoverID >= 0 {
lo.RecoverID = recoverID
}
if recoverTime != "" {
lo.RecoverTime = recoverTime
}
}
// SetRecoverTempOffset .
func (lo *LoopOffset) SetRecoverTempOffset(recoverID int64, recoverTime string) {
if recoverID >= 0 {
lo.TempRecoverID = recoverID
}
if recoverTime != "" {
lo.TempRecoverTime = recoverTime
}
}
// LoopOffsets more tables offset
type LoopOffsets map[int]*LoopOffset
// SetLoops .
func (los LoopOffsets) SetLoops(i int, isLoop bool) {
if _, ok := los[i]; ok {
los[i].IsLoop = isLoop
}
}
// SetOffsets .
func (los LoopOffsets) SetOffsets(i int, id int64, time string) {
if id != 0 {
los[i].OffsetID = id
}
if time != "" {
los[i].OffsetTime = time
}
}
// SetTempOffsets .
func (los LoopOffsets) SetTempOffsets(i int, id int64, time string) {
if id != 0 {
los[i].TempOffsetID = id
}
if time != "" {
los[i].TempOffsetTime = time
}
}
// SetRecoverOffsets .
func (los LoopOffsets) SetRecoverOffsets(i int, recoverID int64, recoverTime string) {
if recoverID >= 0 {
los[i].RecoverID = recoverID
}
if recoverTime != "" {
los[i].RecoverTime = recoverTime
}
}
// SetRecoverTempOffsets .
func (los LoopOffsets) SetRecoverTempOffsets(i int, recoverID int64, recoverTime string) {
if recoverID >= 0 {
los[i].TempRecoverID = recoverID
}
if recoverTime != "" {
los[i].TempRecoverTime = recoverTime
}
}

View File

@@ -0,0 +1,35 @@
package model
import (
"time"
xtime "go-common/library/time"
)
// ES .
type ES struct {
Addr string
}
// Offset .
type Offset struct {
OffID int64
OffTime xtime.Time
ReviewID int64
ReviewTime int64
}
// OffsetID .
func (o *Offset) OffsetID() int64 {
return o.OffID - o.ReviewID
}
// OffsetTime .
func (o *Offset) OffsetTime() string {
return time.Unix(int64(o.OffTime), 0).Format("2006-01-02 15:04:05")
}
// var .
var (
ExistsAction = map[string]bool{"repair": true, "all": true}
)

View File

@@ -0,0 +1,222 @@
package model
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"go-common/library/log"
)
// MapData .
type MapData map[string]interface{}
// StrID .
func (m MapData) StrID(attrs *Attrs) (string, bool) {
if attrs.Index.IndexID == "UUID" {
return "", true
}
var data []interface{}
arr := strings.Split(attrs.Index.IndexID, ",")
arrLen := len(arr)
if arrLen >= 2 {
for _, v := range arr[1:] {
if item, ok := m[v].(*interface{}); ok {
data = append(data, item)
continue
}
if item, ok := m[v].(interface{}); ok {
data = append(data, item)
continue
}
log.Error("model.map_data.StrID err (%v)", v)
return "", false
}
return fmt.Sprintf(arr[0], data...), true
}
return "", false
}
// Index .
func (m MapData) Index(attrs *Attrs) (indexAliasName, indexEntityName string) {
switch attrs.Index.IndexSplit {
case "single":
indexAliasName = attrs.Index.IndexAliasPrefix
indexEntityName = attrs.Index.IndexEntityPrefix
case "int":
if attrs.DataSQL.DataIndexSuffix != "" {
s := strings.Split(attrs.DataSQL.DataIndexSuffix, ";")
v := strings.Split(s[1], ":")
if id, ok := m[v[0]].(*interface{}); ok {
// indexAliasName = fmt.Sprintf("%s%d", attrs.Index.IndexAliasPrefix, (*id).(int64)%100) // mod
divisor, _ := strconv.ParseInt(v[2], 10, 64)
indexAliasName = fmt.Sprintf("%s"+s[0], attrs.Index.IndexAliasPrefix, (*id).(int64)%divisor)
indexEntityName = fmt.Sprintf("%s"+s[0], attrs.Index.IndexEntityPrefix, (*id).(int64)%divisor)
}
if id, ok := m[v[0]].(interface{}); ok {
divisor, _ := strconv.ParseInt(v[2], 10, 64)
indexAliasName = fmt.Sprintf("%s"+s[0], attrs.Index.IndexAliasPrefix, id.(int64)%divisor)
indexEntityName = fmt.Sprintf("%s"+s[0], attrs.Index.IndexEntityPrefix, id.(int64)%divisor)
}
}
}
//fmt.Println("indexname", indexAliasName, indexEntityName)
return
}
// DtbIndex .
// func (m MapData) DtbIndex(attrs *Attrs) (indexName string) {
// if attrs.Index.IndexZero == "0" {
// indexName = attrs.Index.IndexAliasPrefix
// return
// }
// if attrs.DataSQL.DataIndexSuffix != "" {
// s := strings.Split(attrs.DataSQL.DataIndexSuffix, ";")
// v := strings.Split(s[1], ":")
// divisor, _ := strconv.ParseInt(v[2], 10, 64)
// indexName = fmt.Sprintf("%s"+s[0], attrs.Index.IndexAliasPrefix, int64(m[v[0]].(float64))%divisor)
// }
// return
// }
// PrimaryID .
func (m MapData) PrimaryID() int64 {
if m["_id"] != nil {
if id, ok := m["_id"].(*interface{}); ok {
return (*id).(int64)
}
}
return 0
}
// StrMTime .
func (m MapData) StrMTime() string {
if m["_mtime"] != nil {
if mtime, ok := m["_mtime"].(*interface{}); ok {
return (*mtime).(time.Time).Format("2006-01-02 15:04:05")
} else if mtime, ok := m["_mtime"].(string); ok {
return mtime
}
}
return ""
}
// StrCTime .
func (m MapData) StrCTime() string {
if m["ctime"] != nil {
if ctime, ok := m["ctime"].(*interface{}); ok {
return (*ctime).(time.Time).Format("2006-01-02")
} else if ctime, ok := m["ctime"].(string); ok {
return ctime
}
}
return ""
}
// InetNtoA int64 to string ip.
func (m MapData) InetNtoA(ip int64) string {
return fmt.Sprintf("%d.%d.%d.%d",
byte(ip>>24), byte(ip>>16), byte(ip>>8), byte(ip))
}
// TransData transfer address into value
func (m MapData) TransData(attr *Attrs) {
for k, v := range m {
// transfer automaticlly
if v2, ok := v.(*interface{}); ok {
switch (*v2).(type) {
case time.Time:
m[k] = (*v2).(time.Time).Format("2006-01-02 15:04:05")
case []uint, []uint8, []uint16, []uint32, []uint64:
m[k] = string((*v2).([]byte))
case int, int8, int16, int32, int64: // 一定要用于extra_data查询
m[k] = (*v2).(int64)
case nil:
m[k] = int64(0) //给个默认值当查到为null时
default:
// other types
}
}
// transfer again by custom
if t, ok := attr.DataSQL.DataIndexFormatFields[k]; ok {
if v3, ok := v.(*interface{}); ok {
switch t {
case "ip":
if *v3 == nil {
*v3 = int64(0)
}
ipFormat := m.InetNtoA((*v3).(int64))
m[k+"_format"] = ipFormat
case "arr":
var arr []int
binaryAttributes := strconv.FormatInt((*v3).(int64), 2)
for i := len(binaryAttributes) - 1; i >= 0; i-- {
b := fmt.Sprintf("%c", binaryAttributes[i])
if b == "1" {
arr = append(arr, len(binaryAttributes)-i)
}
}
m[k+"_format"] = arr
case "bin":
var arr []int
binaryAttributes := strconv.FormatInt((*v3).(int64), 2)
for i := len(binaryAttributes) - 1; i >= 0; i-- {
b := fmt.Sprintf("%c", binaryAttributes[i])
if b == "1" {
arr = append(arr, len(binaryAttributes)-i)
}
}
m[k] = arr
case "array_json":
var arr []int64
arr = []int64{}
json.Unmarshal([]byte((*v3).([]uint8)), &arr) //如果不是json就是空数组
// println(len(arr))
m[k] = arr
case "day":
m[k] = (*v3).(time.Time).Format("2006-01-02")
case "workflow":
delete(m, k)
default:
// other types
}
}
}
}
}
// TransDtb transfer databus fields into es fields
func (m MapData) TransDtb(attr *Attrs) {
// TODO 注释要打开不然无法移除不要的dtb字段
// for k := range m {
// if _, ok := attr.DataSQL.DataDtbFields[k]; !ok {
// if k == "index_name" {
// continue
// }
// delete(m, k)
// }
// }
res := map[string]interface{}{}
for k, dv := range attr.DataSQL.DataDtbFields {
for _, dk := range dv {
if v, ok := m[k]; ok {
switch v.(type) {
case float64:
res[dk] = int64(v.(float64))
default:
res[dk] = v
}
}
}
}
for k := range res {
m[k] = res[k]
}
id, okID := attr.DataSQL.DataFieldsV2["_id"]
key, okKey := attr.DataSQL.DataDtbFields[id.Field]
if len(key) >= 1 && okID && okKey {
m["_id"] = m[key[0]]
}
}

View File

@@ -0,0 +1,46 @@
package model
import (
"encoding/json"
"time"
)
// Stat all data statistics
type Stat struct {
Counts int `json:"counts"`
}
// Message canal binlog message.
type Message struct {
Action string `json:"action"`
Table string `json:"table"`
New json.RawMessage `json:"new"`
Old json.RawMessage `json:"old"`
}
// DatabusPool poll from db.
var DatabusPool = []string{"dm", "dmreport_new"}
// JSONTime .
type JSONTime time.Time
// UnmarshalJSON .
func (p *JSONTime) UnmarshalJSON(data []byte) error {
local, err := time.ParseInLocation(`"2006-01-02 15:04:05"`, string(data), time.Local)
*p = JSONTime(local)
return err
}
// MarshalJSON .
func (p JSONTime) MarshalJSON() ([]byte, error) {
data := make([]byte, 0)
data = append(data, '"')
data = time.Time(p).AppendFormat(data, "2006-01-02 15:04:05")
data = append(data, '"')
return data, nil
}
// String .
func (p JSONTime) String() string {
return time.Time(p).Format("2006-01-02 15:04:05")
}

View File

@@ -0,0 +1,56 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"all_test.go",
"service_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"all.go",
"incr.go",
"service.go",
"stat.go",
],
importpath = "go-common/app/job/main/search/service",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/search/conf:go_default_library",
"//app/job/main/search/dao:go_default_library",
"//app/job/main/search/dao/base:go_default_library",
"//app/job/main/search/model:go_default_library",
"//library/log:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,68 @@
package service
import (
"context"
"go-common/app/job/main/search/model"
"go-common/library/log"
)
// all all data to es
func (s *Service) all(c context.Context, appid string, writeEntityIndex bool) {
var stat = new(model.Stat)
app := s.base.D.AppPool[appid]
app.InitIndex(c)
app.InitOffset(c)
//app.Offset(c)
app.Sleep(c)
for {
start := 0
length, err := app.AllMessages(c)
if err != nil {
log.Error("AllMessages error(%v)", err)
app.Sleep(c)
continue
}
for {
end := start + _bulkSize
diff := length - start
if diff > _bulkSize {
if err := app.BulkIndex(c, start, end, writeEntityIndex); err != nil {
log.Error("es:BulkIndex error(%v)", err)
app.Sleep(c)
continue
}
start = end
} else if diff > 0 && diff <= _bulkSize {
if err := app.BulkIndex(c, start, length, writeEntityIndex); err != nil {
log.Error("BulkIndex error(%v)", err)
app.Sleep(c)
continue
}
if err := app.Commit(c); err != nil {
log.Error("UpdateOffsetID error(%v)", err)
app.Sleep(c)
continue
}
app.Sleep(c)
break
} else {
app.Sleep(c)
break
}
}
stat.Counts += length
s.updateStat(appid, stat)
if length < app.Size(c) {
switch appid {
case "pgc_media", "esports", "esports_contests", "academy_archive", "esports_fav_all", "activity_all":
app.SetRecover(c, 0, "", 0)
app.Sleep(c)
continue
}
break
}
app.Sleep(c)
}
log.Info("appid:%s, all data to es successful!!!", appid)
}

View File

@@ -0,0 +1,21 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func Test_Stat(t *testing.T) {
var (
err error
c = context.TODO()
)
Convey("Stat", t, WithService(func(s *Service) {
_, err = s.Stat(c, "music_songs")
So(err, ShouldBeNil)
}))
}

View File

@@ -0,0 +1,39 @@
package service
import (
"context"
"time"
"go-common/app/job/main/search/dao"
)
// incr increment data
func (s *Service) incr(c context.Context, app dao.App) {
app.InitIndex(c)
app.Sleep(c)
app.Offset(c)
Loop:
for {
length, err := app.IncrMessages(c)
if err != nil {
s.base.D.PromError("IncrMessages", "IncrMessages error(%v)", err)
app.Sleep(c)
continue
}
for start := 0; start < length; start += _bulkSize {
diff := length - start
if diff > _bulkSize {
diff = _bulkSize
}
if err := app.BulkIndex(c, start, start+diff, false); err != nil {
s.base.D.PromError("BulkIndex", "BulkIndex error(%v)", err)
time.Sleep(120 * time.Second) // 使databus readTimeout重新消费
continue Loop
}
}
if err := app.Commit(c); err != nil {
s.base.D.PromError("UpdateOffsetID", "UpdateOffsetID error(%v)", err)
}
app.Sleep(c)
}
}

View File

@@ -0,0 +1,88 @@
package service
import (
"context"
"sync"
"go-common/app/job/main/search/conf"
"go-common/app/job/main/search/dao/base"
"go-common/app/job/main/search/model"
"go-common/library/log"
)
var (
ctx = context.TODO()
)
const (
_bulkSize = 5000
)
// Service .
type Service struct {
c *conf.Config
// base
base *base.Base
//mutex
mutex *sync.RWMutex
// stats
stats map[string]*model.Stat
}
// New .
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
base: base.NewBase(c),
mutex: new(sync.RWMutex),
stats: make(map[string]*model.Stat),
}
s.incrproc()
return
}
// incrproc incr data
func (s *Service) incrproc() {
for appid, e := range s.base.D.AppPool {
if !s.base.D.BusinessPool[appid].IncrOpen {
continue
}
if e.Business() == s.c.Business.Env && !s.c.Business.Index {
go s.incr(ctx, e)
}
}
}
// Close .
func (s *Service) Close() {
s.base.D.Close()
}
// Ping .
func (s *Service) Ping(c context.Context) error {
return s.base.D.Ping(c)
}
// HTTPAction http action
func (s *Service) HTTPAction(ctx context.Context, appid, action string, recoverID int64, writeEntityIndex bool) (msg string, err error) {
switch action {
case "repair":
case "all":
if _, ok := s.base.D.AppPool[appid]; !ok {
msg = "appid不在appPool中"
log.Error("AppPool inclueds (%v)", s.base.D.AppPool)
return
}
s.base.D.SetRecover(ctx, appid, recoverID, "", 0)
go s.all(context.Background(), appid, writeEntityIndex)
default:
return
}
return
}
// Stat .
func (s *Service) Stat(ctx context.Context, appid string) (st *model.Stat, err error) {
st = s.stat(appid)
return
}

View File

@@ -0,0 +1,19 @@
package service
import (
"flag"
"path/filepath"
"go-common/app/job/main/search/conf"
)
func WithService(f func(s *Service)) func() {
return func() {
dir, _ := filepath.Abs("../goconvey.toml")
flag.Set("conf", dir)
conf.Init()
s := New(conf.Conf)
// s.dao = dao.New(conf.Conf)
f(s)
}
}

View File

@@ -0,0 +1,20 @@
package service
import (
"go-common/app/job/main/search/model"
)
// stat get stat
func (s *Service) stat(appid string) (st *model.Stat) {
s.mutex.RLock()
st = s.stats[appid]
s.mutex.RUnlock()
return
}
// updateStat update stat
func (s *Service) updateStat(appid string, st *model.Stat) {
s.mutex.Lock()
s.stats[appid] = st
s.mutex.Unlock()
}