Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,65 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["service_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/history/conf:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"delete.go",
"merge.go",
"progress.go",
"service.go",
],
importpath = "go-common/app/job/main/history/service",
tags = ["automanaged"],
deps = [
"//app/interface/main/history/model:go_default_library",
"//app/job/main/history/conf:go_default_library",
"//app/job/main/history/dao:go_default_library",
"//app/job/main/history/model:go_default_library",
"//app/service/main/history/model:go_default_library",
"//library/log:go_default_library",
"//library/queue/databus:go_default_library",
"//library/stat/prom:go_default_library",
"//library/sync/pipeline:go_default_library",
"//library/sync/pipeline/fanout:go_default_library",
"//library/xstr:go_default_library",
"//vendor/golang.org/x/time/rate:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)

View File

@@ -0,0 +1,74 @@
package service
import (
"context"
"time"
"go-common/library/log"
"go-common/library/stat/prom"
)
const _businessArchive = 3
const _delLen = 1000
func (s *Service) shouldDelete() bool {
now := time.Now()
return now.Hour() >= s.c.Job.DeleteStartHour && now.Hour() < s.c.Job.DeleteEndHour
}
func (s *Service) deleteproc() {
for {
now := time.Now()
if !s.shouldDelete() {
time.Sleep(time.Minute)
continue
}
if ok, err := s.dao.DelLock(context.Background()); err != nil {
time.Sleep(time.Second)
continue
} else if !ok {
log.Info("not get lock wait.")
time.Sleep(time.Hour * 6)
continue
}
log.Info("start clean db")
bs, err := s.dao.Businesses(context.Background())
if err != nil {
time.Sleep(time.Second)
continue
}
for _, b := range bs {
if b.TTL <= 0 {
continue
}
endTime := time.Unix(now.Unix()-b.TTL, 0)
startTime, err := s.dao.EarlyHistory(context.Background(), b.ID)
if err != nil {
continue
}
log.Info("start clean business %s start:%v end: %v", b.Name, startTime, endTime)
var count int64
for startTime.Before(endTime) {
if !s.shouldDelete() {
log.Info("%s not delete time.", b.Name)
break
}
partTime := startTime.Add(time.Duration(s.c.Job.DeleteStep))
rows, err := s.dao.DeleteHistories(context.Background(), b.ID, startTime, partTime)
prom.BusinessInfoCount.Add("del-"+b.Name, rows)
if err != nil {
time.Sleep(time.Second)
continue
}
count += rows
// 删除完这个时间段的数据后再删除下个时间段
if rows == 0 {
startTime = partTime
}
}
log.Info("end clean business %s, rows: %v", b.Name, count)
}
log.Info("end clean db")
time.Sleep(time.Hour * 6)
}
}

View File

@@ -0,0 +1,119 @@
package service
import (
"context"
"encoding/json"
"fmt"
"hash/crc32"
"sort"
"strings"
"time"
"go-common/app/service/main/history/model"
"go-common/library/log"
"go-common/library/stat/prom"
"go-common/library/sync/pipeline"
)
func (s *Service) serviceConsumeproc() {
var (
err error
msgs = s.serviceHisSub.Messages()
)
for {
msg, ok := <-msgs
if !ok {
log.Error("s.serviceConsumeproc closed")
return
}
if s.c.Job.IgnoreMsg {
err = msg.Commit()
log.Info("serviceConsumeproc key:%s partition:%d offset:%d err: %+v, ts:%v ignore", msg.Key, msg.Partition, msg.Offset, err, msg.Timestamp)
continue
}
ms := make([]*model.Merge, 0, 32)
if err = json.Unmarshal(msg.Value, &ms); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
for _, x := range ms {
key := fmt.Sprintf("%d-%d-%d", x.Mid, x.Bid, x.Kid)
s.merge.SyncAdd(context.Background(), key, x)
}
err := msg.Commit()
log.Info("serviceConsumeproc key:%s partition:%d offset:%d err: %+v, len(%v)", msg.Key, msg.Partition, msg.Offset, err, len(ms))
}
}
func (s *Service) serviceFlush(merges []*model.Merge) {
// 相同的mid聚合在一起
sort.Slice(merges, func(i, j int) bool { return merges[i].Mid < merges[j].Mid })
var ms []*model.Merge
for _, m := range merges {
if (len(ms) < s.c.Job.ServiceBatch) || (ms[len(ms)-1].Mid == m.Mid) {
ms = append(ms, m)
continue
}
s.FlushCache(context.Background(), ms)
ms = []*model.Merge{m}
}
if len(ms) > 0 {
s.FlushCache(context.Background(), ms)
}
}
// FlushCache 数据从缓存写入到DB中
func (s *Service) FlushCache(c context.Context, merges []*model.Merge) (err error) {
var histories []*model.History
if histories, err = s.dao.HistoriesCache(c, merges); err != nil {
log.Error("historyDao.Cache(%+v) error(%v)", merges, err)
return
}
prom.BusinessInfoCount.Add("histories-db", int64(len(histories)))
if err = s.limit.WaitN(context.Background(), len(histories)); err != nil {
log.Error("s.limit.WaitN(%v) err: %+v", len(histories), err)
}
for {
if err = s.dao.AddHistories(c, histories); err != nil {
prom.BusinessInfoCount.Add("retry", int64(len(histories)))
time.Sleep(time.Duration(s.c.Job.RetryTime))
continue
}
break
}
s.cache.Do(c, func(c context.Context) {
for _, merge := range merges {
limit := s.c.Job.CacheLen
s.dao.TrimCache(context.Background(), merge.Business, merge.Mid, limit)
}
})
return
}
func (s *Service) initMerge() {
s.merge = pipeline.NewPipeline(s.c.Merge)
s.merge.Split = func(a string) int {
midStr := strings.Split(a, "-")[0]
return int(crc32.ChecksumIEEE([]byte(midStr)))
}
s.merge.Do = func(c context.Context, ch int, values map[string][]interface{}) {
var merges []*model.Merge
for _, vs := range values {
var t int64
var m *model.Merge
for _, v := range vs {
prom.BusinessInfoCount.Incr("dbus-msg")
if v.(*model.Merge).Time >= t {
m = v.(*model.Merge)
}
}
if m.Mid%1000 == 0 {
log.Info("debug: merge mid:%v, ch:%v, value:%+v", m.Mid, ch, m)
}
merges = append(merges, m)
}
prom.BusinessInfoCount.Add(fmt.Sprintf("ch-%v", ch), int64(len(merges)))
s.serviceFlush(merges)
}
s.merge.Start()
}

View File

@@ -0,0 +1,38 @@
package service
import (
"context"
"encoding/json"
"go-common/app/interface/main/history/model"
"go-common/library/log"
)
func (s *Service) subproc() {
for {
msg, ok := <-s.sub.Messages()
if !ok {
log.Info("subproc exit")
return
}
msg.Commit()
m := &model.History{}
if err := json.Unmarshal(msg.Value, &m); err != nil {
log.Error("json.Unmarshal() error(%v)", err)
continue
}
if m.Mid != 0 && m.Aid != 0 {
s.add(m)
}
}
}
func (s *Service) add(m *model.History) {
for j := 0; j < 3; j++ {
err := s.dao.Add(context.Background(), m)
if err == nil {
return
}
log.Error("s.dao.Add() err:%+v", err)
}
}

View File

@@ -0,0 +1,205 @@
package service
import (
"context"
"encoding/json"
"strconv"
"sync"
"time"
"go-common/app/job/main/history/conf"
"go-common/app/job/main/history/dao"
"go-common/app/job/main/history/model"
hmdl "go-common/app/service/main/history/model"
"go-common/library/log"
"go-common/library/queue/databus"
"go-common/library/sync/pipeline"
"go-common/library/sync/pipeline/fanout"
"go-common/library/xstr"
"golang.org/x/time/rate"
)
const (
_chanSize = 1024
_runtineSzie = 32
_retryCnt = 3
)
type message struct {
next *message
data *databus.Message
done bool
}
// Service struct of service.
type Service struct {
c *conf.Config
waiter *sync.WaitGroup
dao *dao.Dao
hisSub *databus.Databus
serviceHisSub *databus.Databus
sub *databus.Databus
mergeChan []chan *message
doneChan chan []*message
merge *pipeline.Pipeline
businesses map[int64]*hmdl.Business
cache *fanout.Fanout
limit *rate.Limiter
}
// New create service instance and return.
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
dao: dao.New(c),
waiter: new(sync.WaitGroup),
hisSub: databus.New(c.HisSub),
serviceHisSub: databus.New(c.ServiceHisSub),
sub: databus.New(c.Sub),
mergeChan: make([]chan *message, _chanSize),
doneChan: make(chan []*message, _chanSize),
cache: fanout.New("cache"),
limit: rate.NewLimiter(rate.Limit(c.Job.QPSLimit), c.Job.ServiceBatch*2),
}
s.businesses = s.dao.BusinessesMap
go s.subproc()
go s.consumeproc()
go s.serviceConsumeproc()
go s.deleteproc()
s.initMerge()
for i := 0; i < _runtineSzie; i++ {
c := make(chan *message, _chanSize)
s.mergeChan[i] = c
go s.mergeproc(c)
}
return
}
func (s *Service) consumeproc() {
var (
err error
n int
head, last *message
msgs = s.hisSub.Messages()
)
for {
select {
case msg, ok := <-msgs:
if !ok {
log.Error("s.consumeproc closed")
return
}
// marked head to first commit
m := &message{data: msg}
if head == nil {
head = m
last = m
} else {
last.next = m
last = m
}
if n, err = strconv.Atoi(msg.Key); err != nil {
log.Error("strconv.Atoi(%s) error(%v)", msg.Key, err)
}
// use specify goruntine to flush
s.mergeChan[n%_runtineSzie] <- m
msg.Commit()
log.Info("consumeproc key:%s partition:%d offset:%d", msg.Key, msg.Partition, msg.Offset)
case done := <-s.doneChan:
// merge partitions to commit offset
commits := make(map[int32]*databus.Message)
for _, d := range done {
d.done = true
}
for ; head != nil && head.done; head = head.next {
commits[head.data.Partition] = head.data
}
// for _, m := range commits {
// m.Commit()
// }
}
}
}
func (s *Service) mergeproc(c chan *message) {
var (
err error
max = s.c.Job.Max
merges = make(map[int64]int64, 10240)
marked = make([]*message, 0, 10240)
ticker = time.NewTicker(time.Duration(s.c.Job.Expire))
)
for {
select {
case msg, ok := <-c:
if !ok {
log.Error("s.mergeproc closed")
return
}
ms := make([]*model.Merge, 0, 32)
if err = json.Unmarshal(msg.data.Value, &ms); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.data.Value, err)
continue
}
for _, m := range ms {
if now, ok := merges[m.Mid]; !ok || now > m.Now {
merges[m.Mid] = m.Now
}
}
marked = append(marked, msg)
if len(merges) < max {
continue
}
case <-ticker.C:
}
if len(merges) > 0 {
s.flush(merges)
s.doneChan <- marked
merges = make(map[int64]int64, 10240)
marked = make([]*message, 0, 10240)
}
}
}
func (s *Service) flush(res map[int64]int64) {
var (
err error
ts int64
mids []int64
batch = s.c.Job.Batch
)
for mid, now := range res {
if now < ts || ts == 0 {
ts = now
}
mids = append(mids, mid)
}
for len(mids) > 0 {
if len(mids) < batch {
batch = len(mids)
}
for i := 0; i < _retryCnt; i++ {
if err = s.dao.Flush(context.Background(), xstr.JoinInts(mids[:batch]), ts); err == nil {
break
}
time.Sleep(time.Millisecond * 100)
}
mids = mids[batch:]
}
}
// Ping ping .
func (s *Service) Ping() {}
// Close .
func (s *Service) Close() {
if s.sub != nil {
s.sub.Close()
}
if s.serviceHisSub != nil {
s.serviceHisSub.Close()
}
s.merge.Close()
s.waiter.Wait()
}

View File

@@ -0,0 +1,30 @@
package service
import (
"flag"
"fmt"
"path/filepath"
"testing"
"go-common/app/job/main/history/conf"
. "github.com/smartystreets/goconvey/convey"
)
var s *Service
func init() {
dir, _ := filepath.Abs("../cmd/history-job-test.toml")
flag.Set("conf", dir)
err := conf.Init()
if err != nil {
fmt.Printf("conf.Init() error(%v)", err)
}
s = New(conf.Conf)
}
func Test_Ping(t *testing.T) {
Convey("Test_Ping", t, func() {
s.Ping()
})
}