Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,49 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"databus.go",
"loader.go",
"reply_set.go",
"reply_zset.go",
"service.go",
"stat.go",
"statistics.go",
],
importpath = "go-common/app/job/main/reply-feed/service",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/reply-feed/conf:go_default_library",
"//app/job/main/reply-feed/dao:go_default_library",
"//app/job/main/reply-feed/model:go_default_library",
"//library/cache/redis:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/net/netutil:go_default_library",
"//library/queue/databus:go_default_library",
"//library/sync/pipeline/fanout:go_default_library",
"//vendor/github.com/ivpusic/grpool:go_default_library",
"//vendor/github.com/robfig/cron:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,201 @@
package service
import (
"context"
"encoding/json"
"go-common/app/job/main/reply-feed/model"
"go-common/library/log"
)
// func (s *Service) eventproc() {
// defer s.waiter.Done()
// msgs := s.eventConsumer.Messages()
// ctx := context.Background()
// for {
// msg, ok := <-msgs
// if !ok {
// log.Warn("databus consumer channel has been closed.")
// return
// }
// if msg.Topic != s.c.Databus.Event.Topic {
// log.Warn("wrong topic actual (%s) expect (%s)", msg.Topic, s.c.Databus.Stats.Topic)
// continue
// }
// value := &model.EventMsg{}
// if err := json.Unmarshal(msg.Value, value); err != nil {
// log.Error("json.Unmarshal(%v) error(%v)", msg.Value, err)
// continue
// }
// switch value.Action {
// case model.DatabusActionReIdx:
// s.setReplySetBatch(ctx, value.Oid, value.Tp)
// s.upsertZSet(ctx, value.Oid, value.Tp)
// default:
// continue
// }
// msg.Commit()
// log.Info("consumer topic:%s, partitionId:%d, offset:%d, Key:%s, Value:%s", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
// }
// }
func (s *Service) statsproc() {
defer s.waiter.Done()
msgs := s.statsConsumer.Messages()
for {
msg, ok := <-msgs
if !ok {
log.Warn("databus consumer channel has been closed.")
return
}
if msg.Topic != s.c.Databus.Stats.Topic {
log.Warn("wrong topic actual (%s) expect (%s)", msg.Topic, s.c.Databus.Stats.Topic)
continue
}
value := &model.StatsMsg{}
if err := json.Unmarshal(msg.Value, value); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", msg.Value, err)
continue
}
// 脏数据
if value.Reply == nil || value.Subject == nil || (value.Action == model.DatabusActionReport && value.Report == nil) {
log.Error("illegal message (%v)", value)
continue
}
ctx := context.Background()
// 针对评论列表的流程
s.replyListFlow(ctx, value)
// 针对统计数据的流程
s.statisticsFlow(ctx, value)
msg.Commit()
log.Info("consumer topic:%s, partitionId:%d, offset:%d, Key:%s, Value:%s", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
}
}
func (s *Service) statisticsFlow(ctx context.Context, value *model.StatsMsg) {
var (
reply = value.Reply
oid = value.Subject.Oid
tp = value.Subject.Type
rpID = reply.RpID
isHotReply bool
name string
err error
)
s.statisticsLock.RLock()
name = s.statisticsStats[value.Sharding()].Name
s.statisticsLock.RUnlock()
if value.HotCondition() {
if !reply.IsRoot() {
rpID = reply.Root
}
if name == model.DefaultAlgorithm {
if isHotReply, err = s.dao.IsOriginHot(ctx, oid, rpID, tp); err != nil {
return
}
} else {
if isHotReply, err = s.isHot(ctx, name, oid, rpID, tp); err != nil {
return
}
}
}
s.addUV(ctx, value, isHotReply)
switch value.Action {
case model.DatabusActionLike:
if isHotReply {
s.statisticsStats[value.Sharding()].HotLike++
}
s.statisticsStats[value.Sharding()].TotalLike++
case model.DatabusActionHate:
if isHotReply {
s.statisticsStats[value.Sharding()].HotHate++
}
s.statisticsStats[value.Sharding()].TotalHate++
case model.DatabusActionCancelLike:
if isHotReply && s.statisticsStats[value.Sharding()].HotLike > 0 {
s.statisticsStats[value.Sharding()].HotLike--
}
if s.statisticsStats[value.Sharding()].TotalLike > 0 {
s.statisticsStats[value.Sharding()].TotalLike--
}
case model.DatabusActionCancelHate:
if isHotReply && s.statisticsStats[value.Sharding()].HotHate > 0 {
s.statisticsStats[value.Sharding()].HotHate--
}
if s.statisticsStats[value.Sharding()].TotalHate > 0 {
s.statisticsStats[value.Sharding()].TotalHate--
}
case model.DatabusActionReport:
if isHotReply {
s.statisticsStats[value.Sharding()].HotReport++
}
s.statisticsStats[value.Sharding()].TotalReport++
case model.DatabusActionReply:
if reply.IsRoot() {
s.statisticsStats[value.Sharding()].TotalRootReply++
} else {
if isHotReply {
s.statisticsStats[value.Sharding()].HotChildReply++
}
s.statisticsStats[value.Sharding()].TotalChildReply++
}
}
}
func (s *Service) replyListFlow(ctx context.Context, value *model.StatsMsg) {
var (
subject = value.Subject
reply = value.Reply
oid = subject.Oid
tp = subject.Type
stat *model.ReplyStat
reportCount int
err error
)
if value.Report == nil {
reportCount = 0
} else {
reportCount = value.Report.Count
}
// if root reply get stat, else get root reply stat
if reply.IsRoot() {
stat = &model.ReplyStat{
RpID: reply.RpID,
Reply: reply.RCount,
Like: reply.Like,
Hate: reply.Hate,
Report: reportCount,
SubjectTime: subject.CTime,
ReplyTime: reply.CTime,
}
} else {
if stat, err = s.GetStatByID(ctx, oid, tp, reply.Root); err != nil || stat == nil {
return
}
}
if reply.IsRoot() {
switch value.Action {
case model.DatabusActionTop, model.DatabusActionDel, model.DatabusActionRptDel:
s.remReply(ctx, oid, tp, stat.RpID)
case model.DatabusActionUnTop, model.DatabusActionRecover, model.DatabusActionReply:
s.addReplySet(ctx, oid, tp, stat.RpID)
case model.DatabusActionLike, model.DatabusActionCancelLike, model.DatabusActionCancelHate, model.DatabusActionHate, model.DatabusActionReport:
s.updateStat(ctx, stat.RpID, stat)
default:
return
}
} else {
switch value.Action {
case model.DatabusActionReply, model.DatabusActionRecover:
stat.Reply++
case model.DatabusActionDel, model.DatabusActionRptDel:
if stat.Reply > 0 {
stat.Reply--
}
default:
return
}
s.updateStat(ctx, stat.RpID, stat)
}
s.upsertZSet(ctx, oid, tp)
}

View File

@@ -0,0 +1,100 @@
package service
import (
"context"
"encoding/json"
"go-common/app/job/main/reply-feed/model"
"go-common/library/log"
)
func (s *Service) loadAlgorithm() (err error) {
ss, err := s.dao.SlotStats(context.Background())
if err != nil {
return
}
// 按name聚合slot
ssMap := make(map[string]*model.SlotsStat)
for _, s := range ss {
if v, exists := ssMap[s.Name]; exists {
v.Slots = append(v.Slots, s.Slot)
} else {
ssMap[s.Name] = &model.SlotsStat{
Name: s.Name,
Slots: []int{s.Slot},
Algorithm: s.Algorithm,
Weight: s.Weight,
}
}
}
var algorithms []model.Algorithm
for name, ss := range ssMap {
if ss.Weight == "" {
continue
}
var (
algorithm model.Algorithm
w interface{}
)
if ss.Algorithm == model.WilsonLHRRAlgorithm || ss.Algorithm == model.WilsonLHRRFluidAlgorithm {
if err = json.Unmarshal([]byte(ss.Weight), &w); err != nil {
log.Error("json.Unmarshal() error(%v), name (%s), algorightm (%s), weight (%s)", err, name, ss.Algorithm, ss.Weight)
return
}
}
switch ss.Algorithm {
case model.WilsonLHRRAlgorithm:
weight := w.(map[string]interface{})
algorithm = model.NewWilsonLHRR(name, ss.Slots, &model.WilsonLHRRWeight{
Like: weight["like"].(float64),
Hate: weight["hate"].(float64),
Reply: weight["reply"].(float64),
Report: weight["report"].(float64),
})
case model.WilsonLHRRFluidAlgorithm:
weight := w.(map[string]interface{})
algorithm = model.NewWilsonLHRRFluid(name, ss.Slots, &model.WilsonLHRRFluidWeight{
Like: weight["like"].(float64),
Hate: weight["hate"].(float64),
Reply: weight["reply"].(float64),
Report: weight["report"].(float64),
Slope: weight["slope"].(float64),
})
case model.OriginAlgorithm:
algorithm = model.NewOrigin(name, ss.Slots)
case model.LikeDescAlgorithm:
algorithm = model.NewLikeDesc(name, ss.Slots)
case model.DefaultAlgorithm:
continue
default:
log.Warn("invalid algorithm")
continue
}
if algorithm != nil {
algorithms = append(algorithms, algorithm)
}
}
s.algorithmsLock.Lock()
s.algorithms = algorithms
s.algorithmsLock.Unlock()
return
}
func (s *Service) loadSlots() (err error) {
ctx := context.Background()
slotsMap, err := s.dao.SlotsMapping(ctx)
if err != nil {
return
}
s.statisticsLock.Lock()
for name, mapping := range slotsMap {
for _, slot := range mapping.Slots {
s.statisticsStats[slot].Name = name
s.statisticsStats[slot].Slot = slot
}
log.Warn("name stat (name: %s, slots: %v)", name, mapping.Slots)
}
log.Warn("statistics stat (%v)", s.statisticsStats)
s.statisticsLock.Unlock()
return
}

View File

@@ -0,0 +1,91 @@
package service
import (
"context"
"go-common/app/job/main/reply-feed/model"
"go-common/library/log"
)
// setReplySetBatch set reply set batch.
func (s *Service) setReplySetBatch(ctx context.Context, oid int64, tp int) (err error) {
var (
stats []*model.ReplyStat
rpIDs []int64
)
// 从DB查出满足热门评论条件的评论ID
if rpIDs, err = s.dao.RpIDs(ctx, oid, tp); err != nil || len(rpIDs) <= 0 {
return
}
// 从MC或者DB中取出reply stat
if stats, err = s.GetStatsByID(ctx, oid, tp, rpIDs); err != nil {
return
}
for _, stat := range stats {
stat := stat
s.statQ.Do(ctx, func(ctx context.Context) {
s.dao.SetReplyStatMc(ctx, stat)
})
}
return s.dao.SetReplySetRds(ctx, oid, tp, rpIDs)
}
// addReplySet add one rpID into redis reply set.
func (s *Service) addReplySet(ctx context.Context, oid int64, tp int, rpID int64) (err error) {
ok, err := s.dao.ExpireReplySetRds(ctx, oid, tp)
if err != nil {
return
}
if ok {
if err = s.dao.AddReplySetRds(ctx, oid, tp, rpID); err != nil {
return
}
} else {
if err = s.setReplySetBatch(ctx, oid, tp); err != nil {
return
}
}
return
}
func (s *Service) remSet(ctx context.Context, oid, rpID int64, tp int) (err error) {
if err = s.dao.RemReplySetRds(ctx, oid, rpID, tp); err != nil {
log.Error("Remove rpID from set error (%v)", err)
}
return
}
// func (s *Service) delSet(ctx context.Context, oid int64, tp int) (err error) {
// if err = s.dao.DelReplySetRds(ctx, oid, tp); err != nil {
// log.Error("delete reply set(oid: %d, type: %d)", oid, tp)
// }
// return
// }
// func (s *Service) delReply(ctx context.Context, oid int64, tp int) {
// var err error
// if err = s.delSet(ctx, oid, tp); err != nil {
// s.replyListQ.Do(ctx, func(ctx context.Context) {
// s.delSet(ctx, oid, tp)
// })
// }
// if err = s.delZSet(ctx, oid, tp); err != nil {
// s.replyListQ.Do(ctx, func(ctx context.Context) {
// s.delZSet(ctx, oid, tp)
// })
// }
// }
func (s *Service) remReply(ctx context.Context, oid int64, tp int, rpID int64) {
var err error
if err = s.remSet(ctx, oid, rpID, tp); err != nil {
s.replyListQ.Do(ctx, func(ctx context.Context) {
s.remSet(ctx, oid, rpID, tp)
})
}
if err = s.remZSet(ctx, oid, tp, rpID); err != nil {
s.replyListQ.Do(ctx, func(ctx context.Context) {
s.remZSet(ctx, oid, tp, rpID)
})
}
}

View File

@@ -0,0 +1,138 @@
package service
import (
"context"
"sync"
"time"
"go-common/app/job/main/reply-feed/model"
"go-common/library/cache/redis"
"go-common/library/log"
)
// func (s *Service) delZSet(ctx context.Context, oid int64, tp int) (err error) {
// var names []string
// s.algorithmsLock.RLock()
// for _, algorithm := range s.algorithms {
// names = append(names, algorithm.Name())
// }
// s.algorithmsLock.RUnlock()
// if err = s.dao.DelReplyZSetRds(ctx, names, oid, tp); err != nil {
// log.Error("Del ZSet from redis oid(%d) type(%d) error(%v)", oid, tp, err)
// }
// return
// }
func (s *Service) remZSet(ctx context.Context, oid int64, tp int, rpID int64) (err error) {
var (
names []string
)
s.algorithmsLock.RLock()
for _, algorithm := range s.algorithms {
names = append(names, algorithm.Name())
}
s.algorithmsLock.RUnlock()
for _, name := range names {
if err = s.dao.RemReplyZSetRds(ctx, name, oid, tp, rpID); err != nil {
log.Error("Remove reply (name: %s, oid: %d, type: %d, rpID: %d) from ZSet failed.", name, oid, tp, rpID)
return
}
}
return
}
func (s *Service) upsertZSet(ctx context.Context, oid int64, tp int) {
var (
rpIDs []int64
rs []*model.ReplyStat
err error
ts int64
)
// 获取计时器
if ts, err = s.dao.CheckerTsRds(ctx, oid, tp); err != nil && err != redis.ErrNil {
// 出错不刷新,如果缓存里还没有的话刷新
return
} else if time.Now().Unix()-ts < s.c.RefreshTime {
// 小于CD时间不刷新
return
}
// 从reply set中取rpIDs
ok, err := s.dao.ExpireReplySetRds(ctx, oid, tp)
if err != nil {
return
}
if ok {
// 缓存有则从redis中取
if rpIDs, err = s.dao.ReplySetRds(ctx, oid, tp); err != nil {
return
}
} else {
// 缓存中没有从DB中取
if rpIDs, err = s.dao.RpIDs(ctx, oid, tp); err != nil {
return
}
// 异步回源
s.taskQ.Do(ctx, func(ctx context.Context) {
s.setReplySetBatch(ctx, oid, tp)
})
}
// 从MC中获取reply stat
if rs, err = s.GetStatsByID(ctx, oid, tp, rpIDs); err != nil {
return
}
// 重新计算分值
rsMap, err := s.recalculateScore(ctx, rs)
if err != nil {
return
}
for name, rs := range rsMap {
name, rs := name, rs
s.replyListQ.Do(ctx, func(ctx context.Context) {
s.dao.SetReplyZSetRds(ctx, name, oid, tp, rs)
})
}
// 更新完后更新计时器
if err = s.dao.SetCheckerTsRds(ctx, oid, tp); err != nil {
log.Error("set refresh checker error (%v)", err)
}
}
// recalculateScore recalculate all e group reply list score.
func (s *Service) recalculateScore(ctx context.Context, stats []*model.ReplyStat) (rsMap map[string][]*model.ReplyScore, err error) {
rsMap = make(map[string][]*model.ReplyScore)
s.algorithmsLock.RLock()
defer s.algorithmsLock.RUnlock()
for _, algorithm := range s.algorithms {
wg := sync.WaitGroup{}
rs := make([]*model.ReplyScore, len(stats))
for i := range stats {
j := i
wg.Add(1)
s.calculator.JobQueue <- func() {
rs[j] = algorithm.Score(stats[j])
wg.Done()
}
}
wg.Wait()
rsMap[algorithm.Name()] = rs
}
return
}
func (s *Service) isHot(ctx context.Context, name string, oid, rpID int64, tp int) (isHot bool, err error) {
rpIDs, err := s.dao.RangeReplyZSetRds(ctx, name, oid, tp, 0, 5)
if err != nil || len(rpIDs) <= 0 {
return
}
rs, err := s.GetStatsByID(ctx, oid, tp, rpIDs)
if err != nil {
return
}
for _, r := range rs {
if r.RpID == rpID && r.Like >= 3 {
isHot = true
return
}
}
return
}

View File

@@ -0,0 +1,111 @@
package service
import (
"context"
"sync"
"time"
"go-common/app/job/main/reply-feed/conf"
"go-common/app/job/main/reply-feed/dao"
"go-common/app/job/main/reply-feed/model"
"go-common/library/log"
"go-common/library/net/netutil"
"go-common/library/queue/databus"
"go-common/library/sync/pipeline/fanout"
"github.com/ivpusic/grpool"
"github.com/robfig/cron"
)
// Service struct
type Service struct {
c *conf.Config
dao *dao.Dao
// 定时任务
cron *cron.Cron
// backoff
bc netutil.BackoffConfig
statsConsumer *databus.Databus
// eventConsumer *databus.Databus
taskQ *fanout.Fanout
uvQ *fanout.Fanout
statQ *fanout.Fanout
replyListQ *fanout.Fanout
waiter sync.WaitGroup
// 专门计算热评分数的goroutine pool
calculator *grpool.Pool
statisticsStats [model.SlotsNum]model.StatisticsStat
algorithmsLock sync.RWMutex
statisticsLock sync.RWMutex
algorithms []model.Algorithm
}
// New init
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
dao: dao.New(c),
cron: cron.New(),
bc: netutil.BackoffConfig{
MaxDelay: 1 * time.Second,
BaseDelay: 100 * time.Millisecond,
Factor: 1.6,
Jitter: 0.2,
},
statsConsumer: databus.New(c.Databus.Stats),
// eventConsumer: databus.New(c.Databus.Event),
// 处理异步写任务的goroutine
taskQ: fanout.New("task"),
uvQ: fanout.New("uv-task", fanout.Worker(4), fanout.Buffer(2048)),
statQ: fanout.New("memcache", fanout.Worker(4), fanout.Buffer(2048)),
replyListQ: fanout.New("redis", fanout.Worker(4), fanout.Buffer(2048)),
calculator: grpool.NewPool(4, 2048),
}
var err error
if err = s.loadAlgorithm(); err != nil {
panic(err)
}
if err = s.loadSlots(); err != nil {
panic(err)
}
go s.loadproc()
// 消费databus
s.waiter.Add(1)
go s.statsproc()
// s.waiter.Add(1)
// go s.eventproc()
// 每整小时执行一次将统计数据写入DB
s.cron.AddFunc("@hourly", func() {
s.persistStatistics()
})
s.cron.Start()
return s
}
func (s *Service) loadproc() {
for {
time.Sleep(time.Minute)
s.loadAlgorithm()
s.loadSlots()
}
}
// Ping Service
func (s *Service) Ping(c context.Context) (err error) {
return s.dao.Ping(c)
}
// Close Service
func (s *Service) Close() {
s.statsConsumer.Close()
// s.eventConsumer.Close()
log.Warn("consumer closed")
s.waiter.Wait()
s.persistStatistics()
s.dao.Close()
}

View File

@@ -0,0 +1,117 @@
package service
import (
"context"
"go-common/app/job/main/reply-feed/model"
"go-common/library/ecode"
"go-common/library/log"
)
func (s *Service) updateStat(ctx context.Context, rpID int64, stat *model.ReplyStat) {
s.statQ.Do(ctx, func(ctx context.Context) {
s.dao.SetReplyStatMc(ctx, stat)
})
}
// GetStatByID 从缓存获取单条评论stat获取不到则从DB获取
func (s *Service) GetStatByID(ctx context.Context, oid int64, tp int, rpID int64) (stat *model.ReplyStat, err error) {
stats, err := s.GetStatsByID(ctx, oid, tp, []int64{rpID})
if err != nil {
return
}
if len(stats) > 0 {
stat = stats[0]
} else {
err = ecode.ReplyNotExist
log.Error("reply not exists rpID %d", rpID)
}
return
}
// GetStatsByID 从缓存获取多条评论stat获取不到则从DB获取
func (s *Service) GetStatsByID(ctx context.Context, oid int64, tp int, rpIDs []int64) (rs []*model.ReplyStat, err error) {
var (
rsMap map[int64]*model.ReplyStat
missIDs []int64
missed map[int64]*model.ReplyStat
)
if rsMap, missIDs, err = s.dao.ReplyStatsMc(ctx, rpIDs); err != nil {
return
}
for _, stat := range rsMap {
rs = append(rs, stat)
}
if len(missIDs) > 0 {
// miss从DB查
if missed, err = s.getStatsByIDDB(ctx, oid, tp, missIDs); err != nil {
rs = nil
return
}
for _, stat := range missed {
stat := stat
rs = append(rs, stat)
s.statQ.Do(ctx, func(ctx context.Context) {
s.dao.SetReplyStatMc(ctx, stat)
})
}
}
return
}
// getStatsByIDDB 从数据库获取热门评论stats这里不需要一致性所以跨表查再聚合
func (s *Service) getStatsByIDDB(ctx context.Context, oid int64, tp int, rpIDs []int64) (rs map[int64]*model.ReplyStat, err error) {
if len(rpIDs) == 0 {
return
}
replyMap, err := s.dao.ReplyLHRCStatsByID(ctx, oid, rpIDs)
if err != nil {
return
}
reportMap, err := s.dao.ReportStatsByID(ctx, oid, rpIDs)
if err != nil {
return
}
ctime, err := s.dao.SubjectStats(ctx, oid, tp)
if err != nil {
return
}
for rpID := range replyMap {
r, ok := reportMap[rpID]
if ok && r != nil {
replyMap[rpID].Report = r.Report
}
replyMap[rpID].SubjectTime = ctime
}
rs = replyMap
return
}
// GetStatsDB 从数据库获取热门评论stats这里不需要一致性所以跨表查再聚合
func (s *Service) GetStatsDB(ctx context.Context, oid int64, tp int) (rs []*model.ReplyStat, err error) {
replyMap, err := s.dao.ReplyLHRCStats(ctx, oid, tp)
if err != nil {
return
}
var RpIDs []int64
for rpID := range replyMap {
RpIDs = append(RpIDs, rpID)
}
reportMap, err := s.dao.ReportStatsByID(ctx, oid, RpIDs)
if err != nil {
return
}
ctime, err := s.dao.SubjectStats(ctx, oid, tp)
if err != nil {
return
}
for rpID := range replyMap {
r, ok := reportMap[rpID]
if ok && r != nil {
replyMap[rpID].Report = r.Report
}
replyMap[rpID].SubjectTime = ctime
rs = append(rs, replyMap[rpID])
}
return
}

View File

@@ -0,0 +1,162 @@
package service
import (
"context"
"time"
"go-common/app/job/main/reply-feed/model"
"go-common/library/log"
)
func hourNow() int {
return time.Now().Hour()
}
func lastHour() int {
hour := hourNow()
if hour == 0 {
return 23
}
return hour - 1
}
// AddUV ...
func (s *Service) addUV(ctx context.Context, value *model.StatsMsg, isHot bool) {
var action string
switch value.Action {
case model.DatabusActionLike:
action = model.StatisticActionLike
case model.DatabusActionHate:
action = model.StatisticActionHate
case model.DatabusActionReport:
action = model.StatisticActionReport
case model.DatabusActionReply:
if value.Reply.IsRoot() {
action = model.StatisticActionRootReply
} else {
action = model.StatisticActionChildReply
}
}
if action == "" || value.Mid == 0 {
return
}
s.uvQ.Do(ctx, func(ctx context.Context) {
if isHot {
s.dao.AddUV(ctx, action, hourNow(), int(value.Sharding()), value.Mid, model.StatisticKindHot)
}
s.dao.AddUV(ctx, action, hourNow(), int(value.Sharding()), value.Mid, model.StatisticKindTotal)
})
}
// uvStatistics ...
func (s *Service) uvStatistics(ctx context.Context, slots []int, stat *model.StatisticsStat) {
var (
keys []string
lastHour = lastHour()
x, y, z = len(model.StatisticKinds), len(model.StatisticActions), len(slots)
idxMap = make([][][]int, x)
idx int
)
for i, kind := range model.StatisticKinds {
idxMap[i] = make([][]int, y)
for j, action := range model.StatisticActions {
idxMap[i][j] = make([]int, z)
for k, slot := range slots {
keys = append(keys, s.dao.KeyUV(action, lastHour, slot, kind))
idxMap[i][j][k] = idx
idx++
}
}
}
counts, err := s.dao.CountUV(ctx, keys)
if err != nil || len(counts) != len(keys) {
return
}
for i, kind := range model.StatisticKinds {
for j, action := range model.StatisticActions {
for k := range slots {
count := counts[idxMap[i][j][k]]
switch {
case kind == model.StatisticKindTotal:
switch action {
case model.StatisticActionRootReply:
stat.TotalRootUV += count
case model.StatisticActionChildReply:
stat.TotalChildUV += count
case model.StatisticActionLike:
stat.TotalLikeUV += count
case model.StatisticActionHate:
stat.TotalHateUV += count
case model.StatisticActionReport:
stat.TotalReportUV += count
}
case kind == model.StatisticKindHot:
switch action {
case model.StatisticActionChildReply:
stat.HotChildUV += count
case model.StatisticActionLike:
stat.HotLikeUV += count
case model.StatisticActionHate:
stat.HotHateUV += count
case model.StatisticActionReport:
stat.HotReportUV += count
}
}
}
}
}
}
// persistStatistics persist statistics
func (s *Service) persistStatistics() {
ctx := context.Background()
statisticsMap := make(map[string]*model.StatisticsStat)
nameMapping := make(map[string][]int)
s.statisticsLock.RLock()
for slot, stat := range s.statisticsStats {
nameMapping[stat.Name] = append(nameMapping[stat.Name], slot)
s, ok := statisticsMap[stat.Name]
if ok {
statisticsMap[stat.Name] = s.Merge(&stat)
} else {
statisticsMap[stat.Name] = &stat
}
}
s.statisticsLock.RUnlock()
now := time.Now()
year, month, day := now.Date()
date := year*10000 + int(month)*100 + day
hour := now.Hour()
for name, stat := range statisticsMap {
s.uvStatistics(ctx, nameMapping[name], stat)
err := s.dao.UpsertStatistics(ctx, name, date, hour, stat)
var (
retryTimes = 0
maxRetryTimes = 5
)
for err != nil && retryTimes < maxRetryTimes {
time.Sleep(s.bc.Backoff(retryTimes))
err = s.dao.UpsertStatistics(ctx, name, date, hour, stat)
retryTimes++
}
if retryTimes >= maxRetryTimes {
log.Error("upsert statistics error retry reached max retry times.")
}
}
for i := range s.statisticsStats {
reset(&s.statisticsStats[i])
}
log.Warn("reset statistics at %v", now)
}
func reset(stat *model.StatisticsStat) {
stat.HotChildReply = 0
stat.HotHate = 0
stat.HotLike = 0
stat.HotReport = 0
stat.TotalChildReply = 0
stat.TotalRootReply = 0
stat.TotalReport = 0
stat.TotalLike = 0
stat.TotalHate = 0
}