Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,21 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/history/cmd:all-srcs",
"//app/job/main/history/conf:all-srcs",
"//app/job/main/history/dao:all-srcs",
"//app/job/main/history/http:all-srcs",
"//app/job/main/history/model:all-srcs",
"//app/job/main/history/service:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,110 @@
#### history-job
##### Version 1.11.4
> 1. tidb client
##### Version 1.11.3
> 1. 重新打包
##### Version 1.11.2
> 1. 去掉删除逻辑
##### Version 1.11.1
> 1. 调整删除逻辑
##### Version 1.11.0
> 1. 调整删除逻辑: 增加互斥锁 按时间分批删除数据
##### Version 1.10.2
> 1. 配置重试时间
##### Version 1.10.1
> 1. 无限重试insert
##### Version 1.10.0
> 1. 删除用户过多的历史记录
##### Version 1.9.1
> 1. 调整burst
##### Version 1.9.0
> 1. 限制写入速度
##### Version 1.8.3
> 1. 每次都commit
##### Version 1.8.2
> 1. 调整上报日志
##### Version 1.8.1
> 1. 增加prom上报
##### Version 1.8.0
> 1. 相同的mid聚合在一起
##### Version 1.7.3
> 1. 增加ignore参数
##### Version 1.7.2
> 1. rebase master
##### Version 1.7.1
> 1. 调整删除语句 改为根据mtime删除
> 2. 限制TIDB 写入qps
> 3. 修复落后时重复写入的问题
> 4. 支持忽略databus消息
##### Version 1.6.1
> 1. 去掉错误重试
##### Version 1.6.0
> 1. job写入数据库
##### Version 1.5.3
> 1. 消费databus同步commit
##### Version 1.5.1
> 1. 调整调用rpc超时时间
> 2. 分批调用flush rpc 接口
##### Version 1.5.0
> 1. 接入history-service
##### Version 1.4.8
> 1.异步写hbase fix
##### Version 1.4.7
> 1.异步写hbase
##### Version 1.4.6
> 1.重新构建镜像
##### Version 1.4.5
> 1.迁移 bm fix
##### Version 1.4.4
> 1.迁移 bm
##### Version 1.4.3
> 1.迁移main目录
##### Version 1.4.2
> 1.去除statsd
##### Version 1.4.1
> 1.去除identify
##### Version 1.4.0
> 1.接入新配置中心
##### Version 1.3.0
> 1.优化聚合
##### Version 1.2.5
> 1.修复消费跟不上
##### Version 1.2.4
> 1.修复聚合丢数据
##### Version 1.2.3
> 1.bug
##### Version 1.2.2
> 1.增加 mid白名单
##### Version 1.2.1
##### Version 1.2.0
> 1.异步聚合数据
##### Version 1.1.0
> 1.播放进度聚合databus
##### Version 1.0.0
> 1.rpc聚合数据

View File

@@ -0,0 +1,16 @@
# Owner
renwei
wangxu01
# Author
wangxu01
renyashun
zhangshengchao
# Reviewer
zhapuyu
wangxu01
renyashun
chenzhihui
zhangshengchao

View File

@@ -0,0 +1,19 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- renwei
- renyashun
- wangxu01
- zhangshengchao
labels:
- job
- job/main/history
- main
options:
no_parent_owners: true
reviewers:
- chenzhihui
- renyashun
- wangxu01
- zhangshengchao
- zhapuyu

View File

@@ -0,0 +1,10 @@
#### history-job
##### 项目简介
> 1.history-job
##### 编译环境
> 请只用golang v1.7.x以上版本编译执行。
##### 依赖包
> 1.公共包go-common

View File

@@ -0,0 +1,48 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "cmd",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
data = ["history-job.toml"],
importpath = "go-common/app/job/main/history/cmd",
tags = ["automanaged"],
deps = [
"//app/job/main/history/conf:go_default_library",
"//app/job/main/history/http:go_default_library",
"//app/job/main/history/service:go_default_library",
"//library/log:go_default_library",
"//library/net/trace:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)

View File

@@ -0,0 +1,137 @@
# This is a TOML document. Boom.
[log]
dir = "/data/log/history-job/"
[multiHTTP]
[multiHTTP.outer]
addrs = ["0.0.0.0:6431"]
maxListen = 10
[job]
max = 10240
batch = 100
expire = "5s"
url = "http://api.bilibili.co/x/internal/v2/history/flush"
ServiceBatch = 100
DeleteLimit = 1000
DeleteStartHour = 10
DeleteEndHour = 15
CacheLen = 1000
QPSLimit = 10000
IgnoreMsg = false
RetryTime = "1s"
DeleteStep = "5m"
[job.Client]
dial = "1s"
timeout = "1s"
keepAlive = "60s"
timer = 1000
key = "f6433799dbd88751"
secret = "36f8ddb1806207fe07013ab6a77a3935"
[job.Client.breaker]
window ="10s"
sleep ="10ms"
bucket = 10
ratio = 0.1
request = 100
[hisSub]
key = "0Pub71WwEMKXu63qtztu"
secret = "0Pub71WwEMKXu63qtztv"
group = "HistoryMerge-HistoryJob-S"
topic = "HistoryMerge-T"
action = "sub"
[hisSub.redis]
name = "history-job/historysub"
proto = "tcp"
addr = "172.16.33.56:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[mysql]
addr = "127.0.0.1:4000"
dsn = "root:@tcp(127.0.0.1:4000)/bilibili_history?timeout=200ms&readTimeout=200ms&writeTimeout=200ms&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "1s"
execTimeout = "1s"
tranTimeout = "1s"
[LongDB]
addr = "127.0.0.1:4000"
dsn = "root:@tcp(127.0.0.1:4000)/bilibili_history?timeout=1h&readTimeout=1h&writeTimeout=1h&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "1h"
execTimeout = "1h"
tranTimeout = "1h"
[tidb]
addr = "127.0.0.1:4000"
dsn = "root:@tcp(127.0.0.1:4000)/bilibili_history?timeout=200ms&readTimeout=200ms&writeTimeout=200ms&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "1s"
execTimeout = "1s"
tranTimeout = "1s"
[Longtidb]
addr = "127.0.0.1:4000"
dsn = "root:@tcp(127.0.0.1:4000)/bilibili_history?timeout=1h&readTimeout=1h&writeTimeout=1h&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "1h"
execTimeout = "1h"
tranTimeout = "1h"
[ServiceHisSub]
key = "170e302355453683"
secret = "3d0e8db7bed0503949e545a469789279"
group= "HistoryServiceMerge-MainCommunity-S"
topic= "HistoryServiceMerge-T"
action="pub"
name = "history"
proto = "tcp"
addr = "172.18.33.50:6205"
idle = 1
active = 10
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "10s"
[info]
master = ""
meta = ""
testRowKey = "history"
dialTimeout = "100ms"
readTimeout = "250ms"
readsTimeout = "300ms"
writeTimeout = "250ms"
writesTimeout = "300ms"
[info.zookeeper]
root = ""
addrs = ["172.18.33.131:2181","172.18.33.168:2181","172.18.33.169:2181"]
timeout = "30s"
[redis]
name = "history-service"
proto = "tcp"
addr = "127.0.0.1:6379"
idle = 10
active = 10
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "10s"
expire = "1m"
[merge]
MaxSize = 51200
Interval = "1m"
Buffer = 10240
Worker = 100

View File

@@ -0,0 +1,47 @@
package main
import (
"flag"
"os"
"os/signal"
"syscall"
"go-common/app/job/main/history/conf"
"go-common/app/job/main/history/http"
"go-common/app/job/main/history/service"
"go-common/library/log"
"go-common/library/net/trace"
)
func main() {
flag.Parse()
if err := conf.Init(); err != nil {
log.Error("conf.Init() error(%v)", err)
panic(err)
}
log.Init(conf.Conf.Log)
trace.Init(conf.Conf.Tracer)
defer trace.Close()
defer log.Close()
log.Info("history-job start")
// init
svr := service.New(conf.Conf)
http.Init(conf.Conf, svr)
// init signal
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
for {
s := <-c
log.Info("history get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
svr.Close()
log.Info("history exit")
return
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}

View File

@@ -0,0 +1,46 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["conf.go"],
importpath = "go-common/app/job/main/history/conf",
tags = ["automanaged"],
deps = [
"//library/cache/redis:go_default_library",
"//library/conf:go_default_library",
"//library/database/hbase.v2:go_default_library",
"//library/database/tidb:go_default_library",
"//library/ecode/tip:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/trace:go_default_library",
"//library/queue/databus:go_default_library",
"//library/sync/pipeline:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/BurntSushi/toml:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)

View File

@@ -0,0 +1,103 @@
package conf
import (
"flag"
"go-common/library/cache/redis"
"go-common/library/conf"
"go-common/library/database/tidb"
ecode "go-common/library/ecode/tip"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/trace"
"go-common/library/queue/databus"
"go-common/library/sync/pipeline"
xtime "go-common/library/time"
"go-common/library/database/hbase.v2"
"github.com/BurntSushi/toml"
)
const (
configKey = "history-job.toml"
)
// global conf
var (
confPath string
Conf = &Config{}
)
// Config service conf
type Config struct {
App *bm.App
Log *log.Config
Tracer *trace.Config
Ecode *ecode.Config
Job *Job
Info *HBaseConfig
HisSub *databus.Config
ServiceHisSub *databus.Config
Sub *databus.Config
BM *bm.ServerConfig
Redis *redis.Config
Merge *pipeline.Config
TiDB *tidb.Config
LongTiDB *tidb.Config
}
// HBaseConfig .
type HBaseConfig struct {
*hbase.Config
WriteTimeout xtime.Duration
ReadTimeout xtime.Duration
}
// Job job.
type Job struct {
URL string
Client *bm.ClientConfig
Expire xtime.Duration
Max int
Batch int
ServiceBatch int
DeleteLimit int
DeleteStartHour int
DeleteEndHour int
DeleteStep xtime.Duration
// 用户最近播放列表长度
CacheLen int
QPSLimit int
IgnoreMsg bool
RetryTime xtime.Duration
}
func init() {
flag.StringVar(&confPath, "conf", "", "config path")
}
// Init init conf
func Init() (err error) {
if confPath == "" {
return configCenter()
}
_, err = toml.DecodeFile(confPath, &Conf)
return
}
func configCenter() (err error) {
var (
ok bool
value string
client *conf.Client
)
if client, err = conf.New(); err != nil {
return
}
if value, ok = client.Value(configKey); !ok {
panic(err)
}
_, err = toml.Decode(value, &Conf)
return
}

View File

@@ -0,0 +1,68 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"dao_test.go",
"hbase_test.go",
"mysql_test.go",
"redis_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/interface/main/history/model:go_default_library",
"//app/job/main/history/conf:go_default_library",
"//app/service/main/history/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"dao.go",
"hbase.go",
"mysql.go",
"redis.go",
],
importpath = "go-common/app/job/main/history/dao",
tags = ["automanaged"],
deps = [
"//app/interface/main/history/model:go_default_library",
"//app/job/main/history/conf:go_default_library",
"//app/service/main/history/model:go_default_library",
"//library/cache/redis:go_default_library",
"//library/database/hbase.v2:go_default_library",
"//library/database/tidb:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)

View File

@@ -0,0 +1,117 @@
package dao
import (
"context"
"errors"
"fmt"
"net/url"
"time"
"go-common/app/job/main/history/conf"
"go-common/app/service/main/history/model"
"go-common/library/cache/redis"
"go-common/library/database/tidb"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/database/hbase.v2"
)
var errFlushRequest = errors.New("error flush history to store")
// Dao dao.
type Dao struct {
conf *conf.Config
HTTPClient *bm.Client
URL string
info *hbase.Client
redis *redis.Pool
db *tidb.DB
longDB *tidb.DB
insertStmt *tidb.Stmts
businessesStmt *tidb.Stmts
allHisStmt *tidb.Stmts
delUserStmt *tidb.Stmts
BusinessesMap map[int64]*model.Business
BusinessNames map[string]*model.Business
}
// New new history dao and return.
func New(c *conf.Config) (dao *Dao) {
dao = &Dao{
conf: c,
redis: redis.NewPool(c.Redis),
HTTPClient: bm.NewClient(c.Job.Client),
URL: c.Job.URL,
info: hbase.NewClient(c.Info.Config),
db: tidb.NewTiDB(c.TiDB),
longDB: tidb.NewTiDB(c.LongTiDB),
}
dao.businessesStmt = dao.db.Prepared(_businessesSQL)
dao.insertStmt = dao.db.Prepared(_addHistorySQL)
dao.allHisStmt = dao.db.Prepared(_allHisSQL)
dao.delUserStmt = dao.db.Prepared(_delUserHisSQL)
dao.loadBusiness()
go dao.loadBusinessproc()
return
}
// Flush flush history to store by mids.
func (d *Dao) Flush(c context.Context, mids string, stime int64) (err error) {
params := url.Values{}
params.Set("mids", mids)
params.Set("time", fmt.Sprintf("%d", stime))
var res = &struct {
Code int `json:"code"`
Msg string `json:"message"`
}{}
if err = d.HTTPClient.Post(c, d.URL, "", params, res); err != nil {
log.Error("d.HTTPClient.Post(%s?%s) error(%v)", d.URL, params.Encode(), err)
return
}
if res.Code != 0 {
log.Error("d.HTTPClient.Post(%s?%s) code:%d msg:%s", d.URL, params.Encode(), res.Code, res.Msg)
err = errFlushRequest
return
}
return
}
// Ping check connection success.
func (d *Dao) Ping(c context.Context) (err error) {
return
}
// Close close the redis and kafka resource.
func (d *Dao) Close() {
d.redis.Close()
d.db.Close()
d.longDB.Close()
}
func (d *Dao) loadBusiness() {
var business []*model.Business
var err error
businessMap := make(map[string]*model.Business)
businessIDMap := make(map[int64]*model.Business)
for {
if business, err = d.Businesses(context.TODO()); err != nil {
time.Sleep(time.Second)
continue
}
for _, b := range business {
businessMap[b.Name] = b
businessIDMap[b.ID] = b
}
d.BusinessNames = businessMap
d.BusinessesMap = businessIDMap
return
}
}
func (d *Dao) loadBusinessproc() {
for {
time.Sleep(time.Minute * 5)
d.loadBusiness()
}
}

View File

@@ -0,0 +1,34 @@
package dao
import (
"flag"
"go-common/app/job/main/history/conf"
"os"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.community.history-job")
flag.Set("conf_token", "8cf9e76766a95b96d18ab7c5b621374d")
flag.Set("tree_id", "2297")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/history-job.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}

View File

@@ -0,0 +1,54 @@
package dao
import (
"context"
"crypto/md5"
"encoding/binary"
"encoding/json"
"fmt"
"strconv"
"time"
"go-common/app/interface/main/history/model"
"go-common/library/log"
)
var (
tableInfo = "ugc:history"
family = "info"
)
// hashRowKey create rowkey(md5(mid)[:2]+mid) for histroy by mid .
func hashRowKey(mid int64) string {
var bs = make([]byte, 8)
binary.LittleEndian.PutUint64(bs, uint64(mid))
rk := md5.Sum(bs)
return fmt.Sprintf("%x%d", rk[:2], mid)
}
func (d *Dao) column(aid int64, typ int8) string {
if typ < model.TypeArticle {
return strconv.FormatInt(aid, 10)
}
return fmt.Sprintf("%d_%d", aid, typ)
}
// Add add history list.
func (d *Dao) Add(ctx context.Context, h *model.History) error {
valueByte, err := json.Marshal(h)
if err != nil {
log.Error("json.Marshal(%v) error(%v)", h, err)
return err
}
fValues := make(map[string][]byte)
column := d.column(h.Aid, h.TP)
fValues[column] = valueByte
key := hashRowKey(h.Mid)
values := map[string]map[string][]byte{family: fValues}
ctx, cancel := context.WithTimeout(ctx, time.Duration(d.conf.Info.WriteTimeout))
defer cancel()
if _, err = d.info.PutStr(ctx, tableInfo, key, values); err != nil {
log.Error("info.PutStr error(%v)", err)
}
return nil
}

View File

@@ -0,0 +1,52 @@
package dao
import (
"context"
"go-common/app/interface/main/history/model"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaohashRowKey(t *testing.T) {
convey.Convey("hashRowKey", t, func(ctx convey.C) {
var (
mid = int64(14771787)
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
p1 := hashRowKey(mid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaocolumn(t *testing.T) {
convey.Convey("column", t, func(ctx convey.C) {
var (
aid = int64(14771787)
typ = int8(3)
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
p1 := d.column(aid, typ)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAdd(t *testing.T) {
convey.Convey("Add", t, func(ctx convey.C) {
var (
h = &model.History{Mid: 14771787, Aid: 14771787}
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
err := d.Add(context.Background(), h)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}

View File

@@ -0,0 +1,124 @@
package dao
import (
"context"
"database/sql"
"time"
"go-common/app/service/main/history/model"
"go-common/library/database/tidb"
"go-common/library/log"
)
var (
_businessesSQL = "SELECT id, name, ttl FROM business"
_addHistorySQL = "INSERT INTO histories(mid, kid, business_id, aid, sid, epid, sub_type, cid, device, progress, view_at) VALUES(?,?,?,?,?,?,?,?,?,?,?)" +
"ON DUPLICATE KEY UPDATE aid =?, sid=?, epid=?, sub_type=?, cid=?, device=?, progress=?, view_at=?"
_deleteSQL = "DELETE FROM histories WHERE business_id = ? AND mtime >= ? AND mtime < ? LIMIT ?"
_allHisSQL = "SELECT mtime FROM histories WHERE mid = ? AND business_id = ? ORDER BY mtime desc"
_earlyHistorySQL = "SELECT mtime FROM histories WHERE business_id = ? ORDER BY mtime LIMIT 1"
_delUserHisSQL = "DELETE FROM histories WHERE mid = ? AND mtime < ? and business_id = ?"
)
// Businesses business
func (d *Dao) Businesses(c context.Context) (res []*model.Business, err error) {
var rows *tidb.Rows
if rows, err = d.businessesStmt.Query(c); err != nil {
log.Error("db.businessesStmt.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
b := &model.Business{}
if err = rows.Scan(&b.ID, &b.Name, &b.TTL); err != nil {
log.Error("rows.Business.Scan error(%v)", err)
return
}
res = append(res, b)
}
err = rows.Err()
return
}
// DeleteHistories delete histories
func (d *Dao) DeleteHistories(c context.Context, bid int64, beginTime, endTime time.Time) (rows int64, err error) {
var res sql.Result
begin := time.Now()
if res, err = d.longDB.Exec(c, _deleteSQL, bid, beginTime, endTime, d.conf.Job.DeleteLimit); err != nil {
log.Error("DeleteHistories(%v %v %v) err: %v", bid, beginTime, endTime, err)
return
}
rows, err = res.RowsAffected()
log.Info("clean business histories bid: %v begin: %v end: %v rows: %v, time: %v", bid, beginTime, endTime, rows, time.Since(begin))
return
}
// AddHistories add histories to db
func (d *Dao) AddHistories(c context.Context, hs []*model.History) (err error) {
if len(hs) == 0 {
return
}
var tx *tidb.Tx
if tx, err = d.db.Begin(c); err != nil {
log.Error("tx.BeginTran() error(%v)", err)
return
}
for _, h := range hs {
if _, err = tx.Stmts(d.insertStmt).Exec(c, h.Mid, h.Kid, h.BusinessID, h.Aid, h.Sid, h.Epid, h.SubType, h.Cid, h.Device, h.Progress, h.ViewAt,
h.Aid, h.Sid, h.Epid, h.SubType, h.Cid, h.Device, h.Progress, h.ViewAt); err != nil {
log.Error("addHistories exec err mid: %v err: %+v", h.Mid, err)
tx.Rollback()
return
}
}
if err = tx.Commit(); err != nil {
log.Error("add histories commit(%+v) err: %v", hs, err)
return
}
log.Infov(c, log.D{Key: "log", Value: "addHistories db"}, log.D{Key: "len", Value: len(hs)})
return
}
// DeleteUserHistories .
func (d *Dao) DeleteUserHistories(c context.Context, mid, bid int64, t time.Time) (rows int64, err error) {
var res sql.Result
if res, err = d.delUserStmt.Exec(c, mid, t, bid); err != nil {
log.Error("DeleteUserHistories(%v %v) err: %v", bid, t, err)
return
}
rows, err = res.RowsAffected()
return
}
// UserHistories .
func (d *Dao) UserHistories(c context.Context, mid, businessID int64) (res []time.Time, err error) {
var rows *tidb.Rows
if rows, err = d.allHisStmt.Query(c, mid, businessID); err != nil {
log.Error("db.UserHistories.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
var t time.Time
if err = rows.Scan(&t); err != nil {
log.Error("rows.UserHistories.Scan error(%v)", err)
return
}
res = append(res, t)
}
err = rows.Err()
return
}
// EarlyHistory .
func (d *Dao) EarlyHistory(c context.Context, businessID int64) (res time.Time, err error) {
if err = d.longDB.QueryRow(c, _earlyHistorySQL, businessID).Scan(&res); err != nil {
if err == tidb.ErrNoRows {
res = time.Now()
err = nil
return
}
log.Error("db.EarlyHistory.Query error(%v)", err)
}
return
}

View File

@@ -0,0 +1,108 @@
package dao
import (
"context"
"go-common/app/service/main/history/model"
"testing"
"time"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoBusinesses(t *testing.T) {
convey.Convey("Businesses", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
res, err := d.Businesses(c)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoDeleteHistories(t *testing.T) {
convey.Convey("DeleteHistories", t, func(ctx convey.C) {
var (
c = context.Background()
bid = int64(14771787)
beginTime = time.Now()
endTime = time.Now()
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
rows, err := d.DeleteHistories(c, bid, beginTime, endTime)
ctx.Convey("Then err should be nil.rows should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(rows, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAddHistories(t *testing.T) {
convey.Convey("AddHistories", t, func(ctx convey.C) {
var (
c = context.Background()
hs = []*model.History{}
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
err := d.AddHistories(c, hs)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoDeleteUserHistories(t *testing.T) {
convey.Convey("DeleteUserHistories", t, func(ctx convey.C) {
var (
c = context.Background()
mid = int64(14771787)
bid = int64(14771787)
no = time.Now()
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
rows, err := d.DeleteUserHistories(c, mid, bid, no)
ctx.Convey("Then err should be nil.rows should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(rows, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoUserHistories(t *testing.T) {
convey.Convey("UserHistories", t, func(ctx convey.C) {
var (
c = context.Background()
mid = int64(14771787)
businessID = int64(3)
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
_, err := d.UserHistories(c, mid, businessID)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoEarlyHistory(t *testing.T) {
convey.Convey("EarlyHistory", t, func(ctx convey.C) {
var (
c = context.Background()
businessID = int64(3)
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
res, err := d.EarlyHistory(c, businessID)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(res, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,138 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"time"
"go-common/app/service/main/history/model"
"go-common/library/cache/redis"
"go-common/library/log"
)
const _deleteDuration = 3600 * 12
// keyIndex return history index key.
func keyIndex(business string, mid int64) string {
return fmt.Sprintf("i_%d_%s", mid, business)
}
// keyHistory return history key.
func keyHistory(business string, mid int64) string {
return fmt.Sprintf("h_%d_%s", mid, business)
}
// HistoriesCache return the user histories from redis.
func (d *Dao) HistoriesCache(c context.Context, merges []*model.Merge) (res []*model.History, err error) {
conn := d.redis.Get(c)
defer conn.Close()
for _, merge := range merges {
key := keyHistory(d.BusinessesMap[merge.Bid].Name, merge.Mid)
if err = conn.Send("HGET", key, merge.Kid); err != nil {
log.Error("conn.Send(HGET %v %v) error(%v)", key, merge.Kid, err)
return
}
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush() error(%v)", err)
return
}
for i := 0; i < len(merges); i++ {
var value []byte
if value, err = redis.Bytes(conn.Receive()); err != nil {
if err == redis.ErrNil {
err = nil
continue
}
log.Error("conn.Receive error(%v)", err)
return
}
h := &model.History{}
if err = json.Unmarshal(value, h); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", value, err)
err = nil
continue
}
h.BusinessID = d.BusinessNames[h.Business].ID
res = append(res, h)
}
return
}
// TrimCache trim history.
func (d *Dao) TrimCache(c context.Context, business string, mid int64, limit int) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
aids, err := redis.Int64s(conn.Do("ZRANGE", keyIndex(business, mid), 0, -limit-1))
if err != nil {
log.Error("conn.Do(ZRANGE %v) error(%v)", keyIndex(business, mid), err)
return
}
if len(aids) == 0 {
return
}
return d.DelCache(c, business, mid, aids)
}
// DelCache delete the history redis.
func (d *Dao) DelCache(c context.Context, business string, mid int64, aids []int64) (err error) {
var (
key1 = keyIndex(business, mid)
key2 = keyHistory(business, mid)
args1 = []interface{}{key1}
args2 = []interface{}{key2}
)
for _, aid := range aids {
args1 = append(args1, aid)
args2 = append(args2, aid)
}
conn := d.redis.Get(c)
defer conn.Close()
if err = conn.Send("ZREM", args1...); err != nil {
log.Error("conn.Send(ZREM %s,%v) error(%v)", key1, aids, err)
return
}
if err = conn.Send("HDEL", args2...); err != nil {
log.Error("conn.Send(HDEL %s,%v) error(%v)", key2, aids, err)
return
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush() error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive() error(%v)", err)
return
}
}
return
}
// DelLock delete proc lock
func (d *Dao) DelLock(c context.Context) (ok bool, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := "his_job_del_proc"
if err = conn.Send("SETNX", key, time.Now().Unix()); err != nil {
log.Error("DelLock conn.SETNX() error(%v)", err)
return
}
if err = conn.Send("EXPIRE", key, _deleteDuration); err != nil {
log.Error("DelLock conn.Expire() error(%v)", err)
return
}
if err = conn.Flush(); err != nil {
log.Error("DelLock conn.Flush() error(%v)", err)
return
}
if ok, err = redis.Bool(conn.Receive()); err != nil {
log.Error("conn.Receive() error(%v)", err)
return
}
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive() error(%v)", err)
}
return
}

View File

@@ -0,0 +1,107 @@
package dao
import (
"context"
"go-common/app/service/main/history/model"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaokeyIndex(t *testing.T) {
convey.Convey("keyIndex", t, func(ctx convey.C) {
var (
business = ""
mid = int64(14771787)
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
p1 := keyIndex(business, mid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaokeyHistory(t *testing.T) {
convey.Convey("keyHistory", t, func(ctx convey.C) {
var (
business = "archive"
mid = int64(14771787)
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
p1 := keyHistory(business, mid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoHistoriesCache(t *testing.T) {
convey.Convey("HistoriesCache", t, func(ctx convey.C) {
var (
c = context.Background()
merges = []*model.Merge{}
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
_, err := d.HistoriesCache(c, merges)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoTrimCache(t *testing.T) {
convey.Convey("TrimCache", t, func(ctx convey.C) {
var (
c = context.Background()
business = ""
mid = int64(14771787)
limit = int(10)
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
err := d.TrimCache(c, business, mid, limit)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoDelCache(t *testing.T) {
convey.Convey("DelCache", t, func(ctx convey.C) {
var (
c = context.Background()
business = ""
mid = int64(14771787)
aids = []int64{}
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
err := d.DelCache(c, business, mid, aids)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
if err != nil {
ctx.So(err, convey.ShouldNotBeNil)
} else {
ctx.So(err, convey.ShouldBeNil)
}
})
})
})
}
func TestDaoDelLock(t *testing.T) {
convey.Convey("DelLock", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
ok, err := d.DelLock(c)
ctx.Convey("Then err should be nil.ok should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(ok, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,38 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["http.go"],
importpath = "go-common/app/job/main/history/http",
tags = ["automanaged"],
deps = [
"//app/job/main/history/conf:go_default_library",
"//app/job/main/history/service:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)

View File

@@ -0,0 +1,28 @@
package http
import (
"go-common/app/job/main/history/conf"
"go-common/app/job/main/history/service"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
)
var svc *service.Service
// Init http server .
func Init(c *conf.Config, s *service.Service) {
svc = s
engine := bm.DefaultServer(c.BM)
router(engine)
if err := engine.Start(); err != nil {
log.Error("bm.DefaultServer error(%v)", err)
panic(err)
}
}
func router(e *bm.Engine) {
e.Ping(ping)
}
func ping(c *bm.Context) {
}

View File

@@ -0,0 +1,32 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["job.go"],
importpath = "go-common/app/job/main/history/model",
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)

View File

@@ -0,0 +1,13 @@
package model
// ArgMid arg.
type ArgMid struct {
Mid int64
Ts int64
}
// Merge merge.
type Merge struct {
Mid int64 `json:"mid"`
Now int64 `json:"now"`
}

View File

@@ -0,0 +1,65 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["service_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/history/conf:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"delete.go",
"merge.go",
"progress.go",
"service.go",
],
importpath = "go-common/app/job/main/history/service",
tags = ["automanaged"],
deps = [
"//app/interface/main/history/model:go_default_library",
"//app/job/main/history/conf:go_default_library",
"//app/job/main/history/dao:go_default_library",
"//app/job/main/history/model:go_default_library",
"//app/service/main/history/model:go_default_library",
"//library/log:go_default_library",
"//library/queue/databus:go_default_library",
"//library/stat/prom:go_default_library",
"//library/sync/pipeline:go_default_library",
"//library/sync/pipeline/fanout:go_default_library",
"//library/xstr:go_default_library",
"//vendor/golang.org/x/time/rate:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)

View File

@@ -0,0 +1,74 @@
package service
import (
"context"
"time"
"go-common/library/log"
"go-common/library/stat/prom"
)
const _businessArchive = 3
const _delLen = 1000
func (s *Service) shouldDelete() bool {
now := time.Now()
return now.Hour() >= s.c.Job.DeleteStartHour && now.Hour() < s.c.Job.DeleteEndHour
}
func (s *Service) deleteproc() {
for {
now := time.Now()
if !s.shouldDelete() {
time.Sleep(time.Minute)
continue
}
if ok, err := s.dao.DelLock(context.Background()); err != nil {
time.Sleep(time.Second)
continue
} else if !ok {
log.Info("not get lock wait.")
time.Sleep(time.Hour * 6)
continue
}
log.Info("start clean db")
bs, err := s.dao.Businesses(context.Background())
if err != nil {
time.Sleep(time.Second)
continue
}
for _, b := range bs {
if b.TTL <= 0 {
continue
}
endTime := time.Unix(now.Unix()-b.TTL, 0)
startTime, err := s.dao.EarlyHistory(context.Background(), b.ID)
if err != nil {
continue
}
log.Info("start clean business %s start:%v end: %v", b.Name, startTime, endTime)
var count int64
for startTime.Before(endTime) {
if !s.shouldDelete() {
log.Info("%s not delete time.", b.Name)
break
}
partTime := startTime.Add(time.Duration(s.c.Job.DeleteStep))
rows, err := s.dao.DeleteHistories(context.Background(), b.ID, startTime, partTime)
prom.BusinessInfoCount.Add("del-"+b.Name, rows)
if err != nil {
time.Sleep(time.Second)
continue
}
count += rows
// 删除完这个时间段的数据后再删除下个时间段
if rows == 0 {
startTime = partTime
}
}
log.Info("end clean business %s, rows: %v", b.Name, count)
}
log.Info("end clean db")
time.Sleep(time.Hour * 6)
}
}

View File

@@ -0,0 +1,119 @@
package service
import (
"context"
"encoding/json"
"fmt"
"hash/crc32"
"sort"
"strings"
"time"
"go-common/app/service/main/history/model"
"go-common/library/log"
"go-common/library/stat/prom"
"go-common/library/sync/pipeline"
)
func (s *Service) serviceConsumeproc() {
var (
err error
msgs = s.serviceHisSub.Messages()
)
for {
msg, ok := <-msgs
if !ok {
log.Error("s.serviceConsumeproc closed")
return
}
if s.c.Job.IgnoreMsg {
err = msg.Commit()
log.Info("serviceConsumeproc key:%s partition:%d offset:%d err: %+v, ts:%v ignore", msg.Key, msg.Partition, msg.Offset, err, msg.Timestamp)
continue
}
ms := make([]*model.Merge, 0, 32)
if err = json.Unmarshal(msg.Value, &ms); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
for _, x := range ms {
key := fmt.Sprintf("%d-%d-%d", x.Mid, x.Bid, x.Kid)
s.merge.SyncAdd(context.Background(), key, x)
}
err := msg.Commit()
log.Info("serviceConsumeproc key:%s partition:%d offset:%d err: %+v, len(%v)", msg.Key, msg.Partition, msg.Offset, err, len(ms))
}
}
func (s *Service) serviceFlush(merges []*model.Merge) {
// 相同的mid聚合在一起
sort.Slice(merges, func(i, j int) bool { return merges[i].Mid < merges[j].Mid })
var ms []*model.Merge
for _, m := range merges {
if (len(ms) < s.c.Job.ServiceBatch) || (ms[len(ms)-1].Mid == m.Mid) {
ms = append(ms, m)
continue
}
s.FlushCache(context.Background(), ms)
ms = []*model.Merge{m}
}
if len(ms) > 0 {
s.FlushCache(context.Background(), ms)
}
}
// FlushCache 数据从缓存写入到DB中
func (s *Service) FlushCache(c context.Context, merges []*model.Merge) (err error) {
var histories []*model.History
if histories, err = s.dao.HistoriesCache(c, merges); err != nil {
log.Error("historyDao.Cache(%+v) error(%v)", merges, err)
return
}
prom.BusinessInfoCount.Add("histories-db", int64(len(histories)))
if err = s.limit.WaitN(context.Background(), len(histories)); err != nil {
log.Error("s.limit.WaitN(%v) err: %+v", len(histories), err)
}
for {
if err = s.dao.AddHistories(c, histories); err != nil {
prom.BusinessInfoCount.Add("retry", int64(len(histories)))
time.Sleep(time.Duration(s.c.Job.RetryTime))
continue
}
break
}
s.cache.Do(c, func(c context.Context) {
for _, merge := range merges {
limit := s.c.Job.CacheLen
s.dao.TrimCache(context.Background(), merge.Business, merge.Mid, limit)
}
})
return
}
func (s *Service) initMerge() {
s.merge = pipeline.NewPipeline(s.c.Merge)
s.merge.Split = func(a string) int {
midStr := strings.Split(a, "-")[0]
return int(crc32.ChecksumIEEE([]byte(midStr)))
}
s.merge.Do = func(c context.Context, ch int, values map[string][]interface{}) {
var merges []*model.Merge
for _, vs := range values {
var t int64
var m *model.Merge
for _, v := range vs {
prom.BusinessInfoCount.Incr("dbus-msg")
if v.(*model.Merge).Time >= t {
m = v.(*model.Merge)
}
}
if m.Mid%1000 == 0 {
log.Info("debug: merge mid:%v, ch:%v, value:%+v", m.Mid, ch, m)
}
merges = append(merges, m)
}
prom.BusinessInfoCount.Add(fmt.Sprintf("ch-%v", ch), int64(len(merges)))
s.serviceFlush(merges)
}
s.merge.Start()
}

View File

@@ -0,0 +1,38 @@
package service
import (
"context"
"encoding/json"
"go-common/app/interface/main/history/model"
"go-common/library/log"
)
func (s *Service) subproc() {
for {
msg, ok := <-s.sub.Messages()
if !ok {
log.Info("subproc exit")
return
}
msg.Commit()
m := &model.History{}
if err := json.Unmarshal(msg.Value, &m); err != nil {
log.Error("json.Unmarshal() error(%v)", err)
continue
}
if m.Mid != 0 && m.Aid != 0 {
s.add(m)
}
}
}
func (s *Service) add(m *model.History) {
for j := 0; j < 3; j++ {
err := s.dao.Add(context.Background(), m)
if err == nil {
return
}
log.Error("s.dao.Add() err:%+v", err)
}
}

View File

@@ -0,0 +1,205 @@
package service
import (
"context"
"encoding/json"
"strconv"
"sync"
"time"
"go-common/app/job/main/history/conf"
"go-common/app/job/main/history/dao"
"go-common/app/job/main/history/model"
hmdl "go-common/app/service/main/history/model"
"go-common/library/log"
"go-common/library/queue/databus"
"go-common/library/sync/pipeline"
"go-common/library/sync/pipeline/fanout"
"go-common/library/xstr"
"golang.org/x/time/rate"
)
const (
_chanSize = 1024
_runtineSzie = 32
_retryCnt = 3
)
type message struct {
next *message
data *databus.Message
done bool
}
// Service struct of service.
type Service struct {
c *conf.Config
waiter *sync.WaitGroup
dao *dao.Dao
hisSub *databus.Databus
serviceHisSub *databus.Databus
sub *databus.Databus
mergeChan []chan *message
doneChan chan []*message
merge *pipeline.Pipeline
businesses map[int64]*hmdl.Business
cache *fanout.Fanout
limit *rate.Limiter
}
// New create service instance and return.
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
dao: dao.New(c),
waiter: new(sync.WaitGroup),
hisSub: databus.New(c.HisSub),
serviceHisSub: databus.New(c.ServiceHisSub),
sub: databus.New(c.Sub),
mergeChan: make([]chan *message, _chanSize),
doneChan: make(chan []*message, _chanSize),
cache: fanout.New("cache"),
limit: rate.NewLimiter(rate.Limit(c.Job.QPSLimit), c.Job.ServiceBatch*2),
}
s.businesses = s.dao.BusinessesMap
go s.subproc()
go s.consumeproc()
go s.serviceConsumeproc()
go s.deleteproc()
s.initMerge()
for i := 0; i < _runtineSzie; i++ {
c := make(chan *message, _chanSize)
s.mergeChan[i] = c
go s.mergeproc(c)
}
return
}
func (s *Service) consumeproc() {
var (
err error
n int
head, last *message
msgs = s.hisSub.Messages()
)
for {
select {
case msg, ok := <-msgs:
if !ok {
log.Error("s.consumeproc closed")
return
}
// marked head to first commit
m := &message{data: msg}
if head == nil {
head = m
last = m
} else {
last.next = m
last = m
}
if n, err = strconv.Atoi(msg.Key); err != nil {
log.Error("strconv.Atoi(%s) error(%v)", msg.Key, err)
}
// use specify goruntine to flush
s.mergeChan[n%_runtineSzie] <- m
msg.Commit()
log.Info("consumeproc key:%s partition:%d offset:%d", msg.Key, msg.Partition, msg.Offset)
case done := <-s.doneChan:
// merge partitions to commit offset
commits := make(map[int32]*databus.Message)
for _, d := range done {
d.done = true
}
for ; head != nil && head.done; head = head.next {
commits[head.data.Partition] = head.data
}
// for _, m := range commits {
// m.Commit()
// }
}
}
}
func (s *Service) mergeproc(c chan *message) {
var (
err error
max = s.c.Job.Max
merges = make(map[int64]int64, 10240)
marked = make([]*message, 0, 10240)
ticker = time.NewTicker(time.Duration(s.c.Job.Expire))
)
for {
select {
case msg, ok := <-c:
if !ok {
log.Error("s.mergeproc closed")
return
}
ms := make([]*model.Merge, 0, 32)
if err = json.Unmarshal(msg.data.Value, &ms); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.data.Value, err)
continue
}
for _, m := range ms {
if now, ok := merges[m.Mid]; !ok || now > m.Now {
merges[m.Mid] = m.Now
}
}
marked = append(marked, msg)
if len(merges) < max {
continue
}
case <-ticker.C:
}
if len(merges) > 0 {
s.flush(merges)
s.doneChan <- marked
merges = make(map[int64]int64, 10240)
marked = make([]*message, 0, 10240)
}
}
}
func (s *Service) flush(res map[int64]int64) {
var (
err error
ts int64
mids []int64
batch = s.c.Job.Batch
)
for mid, now := range res {
if now < ts || ts == 0 {
ts = now
}
mids = append(mids, mid)
}
for len(mids) > 0 {
if len(mids) < batch {
batch = len(mids)
}
for i := 0; i < _retryCnt; i++ {
if err = s.dao.Flush(context.Background(), xstr.JoinInts(mids[:batch]), ts); err == nil {
break
}
time.Sleep(time.Millisecond * 100)
}
mids = mids[batch:]
}
}
// Ping ping .
func (s *Service) Ping() {}
// Close .
func (s *Service) Close() {
if s.sub != nil {
s.sub.Close()
}
if s.serviceHisSub != nil {
s.serviceHisSub.Close()
}
s.merge.Close()
s.waiter.Wait()
}

View File

@@ -0,0 +1,30 @@
package service
import (
"flag"
"fmt"
"path/filepath"
"testing"
"go-common/app/job/main/history/conf"
. "github.com/smartystreets/goconvey/convey"
)
var s *Service
func init() {
dir, _ := filepath.Abs("../cmd/history-job-test.toml")
flag.Set("conf", dir)
err := conf.Init()
if err != nil {
fmt.Printf("conf.Init() error(%v)", err)
}
s = New(conf.Conf)
}
func Test_Ping(t *testing.T) {
Convey("Test_Ping", t, func() {
s.Ping()
})
}