Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,76 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"cache.go",
"databus.go",
"handler.go",
"handler_resource.go",
"handler_task.go",
"monitor.go",
"report.go",
"service.go",
"task_job.go",
"task_log.go",
"weight.go",
],
importpath = "go-common/app/job/main/aegis/service",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/dao:go_default_library",
"//app/job/main/aegis/dao/email:go_default_library",
"//app/job/main/aegis/dao/monitor:go_default_library",
"//app/job/main/aegis/model:go_default_library",
"//app/job/main/aegis/model/monitor:go_default_library",
"//app/service/main/account/api:go_default_library",
"//app/service/main/up/api/v1:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/queue/databus:go_default_library",
"//library/queue/databus/databusutil:go_default_library",
"//library/queue/databus/report:go_default_library",
"//library/xstr:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"monitor_test.go",
"service_test.go",
],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/model/monitor:go_default_library",
"//app/service/main/account/api:go_default_library",
"//library/log:go_default_library",
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)

View File

@@ -0,0 +1,219 @@
package service
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
"go-common/library/xstr"
)
func (s *Service) initCache() {
s.newactiveBizFlow = make(map[string]struct{})
s.syncConfigCache(context.Background())
s.syncConsumerCache(context.Background())
s.oldactiveBizFlow = s.newactiveBizFlow
}
func (s *Service) cacheProc() {
for {
s.syncTaskCache()
time.Sleep(3 * time.Minute)
s.syncConfigCache(context.Background())
s.syncWeightWatch(context.Background())
s.syncConsumerCache(context.Background())
}
}
func (s *Service) syncTaskCache() {
var (
tasks []*model.Task
lastid = int64(0)
err error
)
// 1.停滞任务,10分钟未变化检查是否遗漏
lastid = 0
for {
mtime := time.Now().Add(-10 * time.Minute)
if tasks, lastid, err = s.dao.QueryTask(context.Background(), model.TaskStateDispatch, mtime, lastid, 1000); err != nil || len(tasks) == 0 {
break
}
for _, task := range tasks {
log.Info("检测到遗漏 停滞任务(%+v)", task)
s.dao.SetTask(context.Background(), task)
s.dao.PushPersonalTask(context.Background(), task.BusinessID, task.FlowID, task.UID, task.ID)
}
time.Sleep(time.Second)
}
// 2.延迟任务,半小时未变化,检查是否遗漏
lastid = 0
for {
mtime := time.Now().Add(-30 * time.Minute)
if tasks, lastid, err = s.dao.QueryTask(context.Background(), model.TaskStateDelay, mtime, lastid, 1000); err != nil || len(tasks) == 0 {
break
}
for _, task := range tasks {
log.Info("检测到遗漏 延迟任务(%+v)", task)
s.dao.SetTask(context.Background(), task)
s.dao.PushDelayTask(context.Background(), task.BusinessID, task.FlowID, task.UID, task.ID)
}
time.Sleep(time.Second)
}
// 3.实时任务,1小时未变化检查是否遗漏
lastid = 0
for {
mtime := time.Now().Add(-60 * time.Minute)
if tasks, lastid, err = s.dao.QueryTask(context.Background(), model.TaskStateInit, mtime, lastid, 1000); err != nil || len(tasks) == 0 {
break
}
for _, task := range tasks {
log.Info("检测到遗漏 实时任务(%+v)", task)
s.dao.SetTask(context.Background(), task)
}
s.dao.PushPublicTask(context.Background(), tasks...)
time.Sleep(time.Second)
}
}
func (s *Service) syncConfigCache(c context.Context) (err error) {
s.oldactiveBizFlow = s.newactiveBizFlow
configs, err := s.dao.TaskActiveConfigs(c)
if err != nil {
return
}
rangeWCCache := make(map[int64]map[string]*model.RangeWeightConfig)
equalWCCache := make(map[string][]*model.EqualWeightConfig)
assignCache := make(map[string][]*model.AssignConfig)
activeBizFlow := make(map[string]struct{})
for _, item := range configs {
key := fmt.Sprintf("%d-%d", item.BusinessID, item.FlowID)
activeBizFlow[key] = struct{}{}
switch item.ConfType {
case model.TaskConfigAssign:
assign := new(struct {
Mids string `json:"mids"`
Uids string `json:"uids"`
})
if err = json.Unmarshal([]byte(item.ConfJSON), assign); err != nil {
log.Error("json.Unmarshal error(%v)", err)
continue
}
ac := &model.AssignConfig{}
if item.UID > 0 {
ac.Admin = item.UID
} else {
ac.Admin = 399
}
assign.Mids = strings.TrimSpace(assign.Mids)
assign.Uids = strings.TrimSpace(assign.Uids)
if ac.Mids, err = xstr.SplitInts(assign.Mids); err != nil {
log.Error("xstr.SplitInts error(%v)", err)
continue
}
if ac.Uids, err = xstr.SplitInts(assign.Uids); err != nil {
log.Error("xstr.SplitInts error(%v)", err)
continue
}
if _, ok := assignCache[key]; ok {
assignCache[key] = append(assignCache[key], ac)
} else {
assignCache[key] = []*model.AssignConfig{ac}
}
case model.TaskConfigRangeWeight:
wcitem := &model.RangeWeightConfig{}
if err = json.Unmarshal([]byte(item.ConfJSON), wcitem); err != nil {
log.Error("json.Unmarshal error(%v)", err)
continue
}
if _, ok := rangeWCCache[item.BusinessID]; ok {
rangeWCCache[item.BusinessID][wcitem.Name] = wcitem
} else {
rangeWCCache[item.BusinessID] = map[string]*model.RangeWeightConfig{
wcitem.Name: wcitem,
}
}
case model.TaskConfigEqualWeight:
ewcitem := &model.EqualWeightConfig{}
if err = json.Unmarshal([]byte(item.ConfJSON), ewcitem); err != nil {
log.Error("json.Unmarshal error(%v)", err)
continue
}
ewcitem.Uname = item.Uname
ewcitem.Description = item.Description
ewcitem.IDs = strings.TrimSpace(ewcitem.IDs)
if _, ok := equalWCCache[key]; ok {
equalWCCache[key] = append(equalWCCache[key], ewcitem)
} else {
equalWCCache[key] = []*model.EqualWeightConfig{ewcitem}
}
}
}
s.rangeWeightCfg = rangeWCCache
s.equalWeightCfg = equalWCCache
s.assignConfig = assignCache
s.newactiveBizFlow = activeBizFlow
return
}
func (s *Service) syncWeightWatch(c context.Context) {
for key := range s.oldactiveBizFlow {
if _, ok := s.newactiveBizFlow[key]; !ok {
if wm, ok := s.wmHash[key]; ok {
wm.close = true
log.Info("关闭权重计算器 bizid(%d) flowid(%d)", wm.businessID, wm.flowID)
delete(s.wmHash, key)
}
}
}
for key := range s.newactiveBizFlow {
if _, ok := s.oldactiveBizFlow[key]; !ok {
bizid, _ := parseKey(key)
s.wmHash[key] = NewWeightManager(s, s.getWeightOpt(bizid), key)
}
}
}
func (s *Service) getWeightOpt(bizid int) *model.WeightOPT {
for _, item := range s.c.BizCfg.WeightOpt {
if item.BusinessID == int64(bizid) {
return item
}
}
return nil
}
func (s *Service) syncConsumerCache(c context.Context) (err error) {
s.ccMux.Lock()
defer s.ccMux.Unlock()
consumerCache, err := s.dao.TaskActiveConsumer(c)
if err != nil {
return
}
s.consumerCache = consumerCache
return
}
// getWeightCache .
func (s *Service) getWeightCache(c context.Context, businessid, flowid int64) (rwc map[string]*model.RangeWeightConfig, ewc []*model.EqualWeightConfig) {
key := fmt.Sprintf("%d-%d", businessid, flowid)
rwc = s.rangeWeightCfg[businessid]
ewc = s.equalWeightCfg[key]
return
}

View File

@@ -0,0 +1,322 @@
package service
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"time"
"go-common/app/job/main/aegis/model"
moniMdl "go-common/app/job/main/aegis/model/monitor"
"go-common/library/log"
"go-common/library/queue/databus"
)
var (
_taskTable = "task"
)
func (s *Service) taskconsumeproc() {
defer func() {
log.Warn("taskconsumeproc exited.")
s.wg.Done()
}()
var (
binLogMsgs = s.binLogDataBus.Messages()
)
for {
select {
case msg, ok := <-binLogMsgs:
if !ok {
log.Warn("binLogDataBus has been closed.")
return
}
log.Info("binLogDataBus key(%s) offset(%d) message(%s)",
msg.Key, msg.Offset, msg.Value)
s.handleBinLog(msg)
case rpi := <-s.chanReport:
s.reportResource(context.Background(), rpi.BizID, rpi.FlowID, rpi.RID, rpi.UID)
default:
time.Sleep(time.Second)
}
}
}
func (s *Service) archiveConsumeProc() {
defer func() {
log.Warn("archiveConsumeProc exited.")
s.wg.Done()
}()
var (
msgs = s.archiveDataBus.Messages()
)
for {
var (
msg *databus.Message
ok bool
err error
)
if msg, ok = <-msgs; !ok {
log.Error("s.archiveDataBus.Messages() closed.")
return
}
msg.Commit()
m := &model.BinLog{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Table == "archive" {
s.handleArchiveBinlog(m)
} else if m.Table == "archive_video" {
s.handleVideoBinlog(m)
}
}
}
func (s *Service) handleArchiveBinlog(m *model.BinLog) {
var (
err error
)
na := &moniMdl.BinlogArchive{}
oa := &moniMdl.BinlogArchive{}
if err = json.Unmarshal(m.New, na); err != nil {
log.Error("json.Unmarshal(%s,%+v) error(%v)", m.New, na, err)
return
}
if err = json.Unmarshal(m.New, oa); err != nil {
log.Error("json.Unmarshal(%s,%+v) error(%v)", m.New, oa, err)
return
}
s.monitorArchive(m.Action, na, oa)
}
func (s *Service) handleVideoBinlog(m *model.BinLog) {
var (
err error
)
nv := &moniMdl.BinlogVideo{}
ov := &moniMdl.BinlogVideo{}
if err = json.Unmarshal(m.New, nv); err != nil {
log.Error("json.Unmarshal(%s,%+v) error(%v)", m.New, nv, err)
return
}
if err = json.Unmarshal(m.New, ov); err != nil {
log.Error("json.Unmarshal(%s,%+v) error(%v)", m.New, ov, err)
return
}
s.monitorVideo(m.Action, nv, ov)
}
func (s *Service) handleBinLog(msg *databus.Message) {
defer msg.Commit()
bmsg := new(model.BinLog)
if err := json.Unmarshal(msg.Value, bmsg); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", string(msg.Value), err)
return
}
if bmsg.Table == _taskTable {
old := new(model.Task)
new := new(model.Task)
if err := json.Unmarshal(bmsg.New, new); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", string(bmsg.New), err)
return
}
if bmsg.Action == "update" {
if err := json.Unmarshal(bmsg.Old, old); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", string(bmsg.New), err)
return
}
}
s.handleBinLogMsg(context.Background(), bmsg.Action, old, new)
}
// use specify goroutine to merge messages
log.Info("handleBinlog table:%s key:%s partition:%d offset:%d", bmsg.Table, msg.Key, msg.Partition, msg.Offset)
}
//各种状态简写
const (
INT = model.TaskStateInit
DSP = model.TaskStateDispatch
DEY = model.TaskStateDelay
SUB = model.TaskStateSubmit
RSB = model.TaskStateRscSb
CLO = model.TaskStateClosed
LTD = model.LogTypeTaskDispatch
)
func (s *Service) handleRelease(c context.Context, old, new *model.Task) {
s.dao.RemovePersonalTask(c, old.BusinessID, old.FlowID, old.UID, old.ID)
s.dao.PushPublicTask(c, new)
s.sendTaskLog(c, new, LTD, "release", new.UID, "")
s.dao.IncresByField(c, old.BusinessID, old.FlowID, old.UID, model.Release, 1)
}
func (s *Service) handleDisptach(c context.Context, old, new *model.Task) {
//这里不做缓存同步,顺序会乱
s.sendTaskLog(c, new, LTD, "dispatch", new.UID, "")
s.dao.IncresByField(c, new.BusinessID, new.FlowID, new.UID, model.Dispatch, 1)
}
func (s *Service) handleDelay(c context.Context, old, new *model.Task) {
//这里不做缓存同步,顺序会乱
s.sendTaskLog(c, new, LTD, "delay", new.UID, "")
s.dao.IncresByField(c, new.BusinessID, new.FlowID, new.UID, model.Delay, 1)
}
/*
数据统计时,容易产生误差的几种数据
1. 任务被a领取被b在资源列表提交
2. 任务被a延迟被b在资源列表 或者 延迟列表提交
*/
func (s *Service) handleSubmit(c context.Context, old, new *model.Task) {
switch old.State {
case INT: // 未分配直接提交,资源列表里操作
s.dao.RemovePublicTask(c, old.BusinessID, old.FlowID, old.ID)
case DSP: // 领取后提交,也可能是资源列表操作
s.dao.RemovePersonalTask(c, old.BusinessID, old.FlowID, old.UID, old.ID)
case DEY: // 延迟列表提交,也可能是资源列表操作
s.dao.RemoveDelayTask(c, old.BusinessID, old.FlowID, old.UID, old.ID)
default: // 其他未知情况
log.Error("handleSubmit UNEXPECTED old(%+v) new(%v)", old, new)
}
switch new.State {
case SUB:
s.sendTaskLog(c, new, LTD, "tasksubmit", new.UID, "")
case RSB:
s.sendTaskLog(c, new, LTD, "rscsubmit", new.UID, "")
case CLO:
s.sendTaskLog(c, new, LTD, "close", new.UID, "")
}
s.reportSubmit(c, old, new)
}
func (s *Service) handleCreate(c context.Context, new *model.Task) {
s.dao.PushPublicTask(c, new)
s.sendTaskLog(c, new, LTD, "create", 399, "aegis-job")
s.reportTaskCreate(c, new)
}
func (s *Service) handleBinLogMsg(c context.Context, act string, old, new *model.Task) {
log.Info("handleTaskBinlog act(%s) old(%+v) new(%+v)", act, old, new)
s.dao.SetTask(c, new)
if act == "insert" {
s.handleCreate(c, new)
}
if act == "update" {
switch {
case old.State != new.State: //状态变更
switch new.State {
case INT: // 初始
switch old.State {
case DSP: //释放
s.handleRelease(c, old, new)
default: //其他情况
s.dao.PushPublicTask(c, new)
log.Error("handleTaskBinlog UNEXPECTED INT old(%+v) new(%+v)", old, new)
}
case DSP: // 领取
switch old.State {
case INT:
s.handleDisptach(c, old, new)
default:
log.Error("handleTaskBinlog UNEXPECTED DSP old(%+v) new(%+v)", old, new)
}
case DEY: // 延迟
switch old.State {
case DSP:
s.handleDelay(c, old, new)
default:
log.Error("handleTaskBinlog UNEXPECTED DEY old(%+v) new(%+v)", old, new)
}
case SUB, RSB, CLO: // 提交,关闭
s.handleSubmit(c, old, new)
}
case old.AdminID != new.AdminID: //指派变更
default:
log.Info("其他变更 old(%+v)->new(%+v)", old, new)
}
}
}
func (s *Service) setAssign(c context.Context, task *model.Task) bool {
log.Info("指派判断 setAssign(%+v)", task)
auids := s.hitAssignUids(c, task)
log.Info("指派判断 hitAssignUids(%v)", auids)
if len(auids) == 0 {
return false
}
log.Info("task(%d) 命中指派配置 (%v)", task.ID, auids)
var huids []int64
for auid, uids := range auids {
task.AdminID = auid
huids = s.hitActiveUids(c, task, uids)
length := len(huids)
if length != 0 {
break
}
}
log.Info("task(%d) 指派在线 (%v)", task.ID, huids)
length := len(huids)
if length == 0 {
return false
}
if length == 1 {
task.UID = huids[0]
} else {
// 随机数选一个
task.UID = huids[rand.Intn(length)]
}
log.Info("task(%d) admin(%d) 指派成功 (%d)", task.ID, task.AdminID, task.UID)
return true
}
func (s *Service) hitAssignUids(c context.Context, task *model.Task) (uids map[int64][]int64) {
key := fmt.Sprintf("%d-%d", task.BusinessID, task.FlowID)
uids = make(map[int64][]int64)
if assignC, ok := s.assignConfig[key]; ok {
for _, item := range assignC {
log.Info("指派判断 task(%+v) item(%+v)", task, item)
for _, mid := range item.Mids {
if mid == task.MID {
if aus, ok := uids[item.Admin]; ok {
uids[item.Admin] = append(aus, item.Uids...)
} else {
uids[item.Admin] = item.Uids
}
}
}
}
}
return
}
func (s *Service) hitActiveUids(c context.Context, task *model.Task, uids []int64) (hitid []int64) {
s.ccMux.RLock()
defer s.ccMux.RUnlock()
key := fmt.Sprintf("%d-%d", task.BusinessID, task.FlowID)
if uidCache, ok := s.consumerCache[key]; ok {
for _, uid := range uids {
if _, ok := uidCache[uid]; ok {
if on, _ := s.dao.IsConsumerOn(c, int(task.BusinessID), int(task.FlowID), uid); on {
hitid = append(hitid, uid)
}
}
}
}
return
}

View File

@@ -0,0 +1,373 @@
package service
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"sync"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
"go-common/library/queue/databus"
pkgerr "github.com/pkg/errors"
)
//RscHandler .
type RscHandler interface {
CheckMessage(json.RawMessage) (interface{}, error)
HandleMessage(context.Context, interface{}) error
}
//TaskHandler .
type TaskHandler interface {
CheckMessage(*databus.Message) (interface{}, error)
HandleMessage(context.Context, interface{}) error
}
var (
_ TaskHandler = baseTaskHandler{}
_ TaskHandler = dynamicTaskHandler{}
_ RscHandler = baseResourceAddHandler{}
_ RscHandler = mangaResourceAddHandler{}
_ RscHandler = baseResourceUpdateHandler{}
_ RscHandler = baseResourceCancelHandler{}
)
//单例
var (
basehandleTask *baseTaskHandler
basehandleRscAdd *baseResourceAddHandler
basehandleRscUpdate *baseResourceUpdateHandler
basehandleRscCancel *baseResourceCancelHandler
dynamicHandleTask *dynamicTaskHandler
mangaHandelRscAdd *mangaResourceAddHandler
once sync.Once
)
//ERROR
var (
ErrTaskDuplicate = errors.New("重复任务")
ErrTaskFlowInvalid = errors.New("流程失效")
ErrTaskResourceInvalid = errors.New("资源失效")
ErrInvalidMsg = errors.New("无效消息")
ErrHandlerMiss = errors.New("handler NotFound")
)
//prefix
var (
_prefixTask = "task_"
_prefixRscAdd = "add_"
_prefixRscUpdate = "update_"
_prefixRscCancel = "cancel_"
)
//业务ID
var (
_bizidDynamic = 1
_bizidManga = 2
)
func (s *Service) registerRscHandler(key string, handler RscHandler) {
s.rschandle[key] = handler
}
func (s *Service) registerTaskHandler(key string, handler TaskHandler) {
s.taskhandle[key] = handler
}
func (s *Service) findTaskHandler(key string) TaskHandler {
if handler, ok := s.taskhandle[key]; ok {
return handler
}
log.Warn("key(%s)没找到任务的处理器根据类型使用默认handler", key)
return s.getdynamicTaskHandler()
}
func (s *Service) findRscHandler(key string) RscHandler {
if handler, ok := s.rschandle[key]; ok {
return handler
}
log.Warn("key(%s)没找到业务的处理器根据类型使用默认handler", key)
switch {
case strings.HasPrefix(key, _prefixRscAdd):
return s.getbaseResourceAddHandler()
case strings.HasPrefix(key, _prefixRscUpdate):
return s.getbaseResourceUpdateHandler()
case strings.HasPrefix(key, _prefixRscCancel):
return s.getbaseResourceCancelHandler()
default:
return nil
}
}
//TODO 先写死吧,之后可以根据配置里面的类名用反射实例化
func initHandler(s *Service) {
var (
dynamicTask = fmt.Sprintf("%s%d", _prefixTask, _bizidDynamic)
dynamicRscAdd = fmt.Sprintf("%s%d", _prefixRscAdd, _bizidDynamic)
dynamicRscUpdate = fmt.Sprintf("%s%d", _prefixRscUpdate, _bizidDynamic)
dynamicRscCancel = fmt.Sprintf("%s%d", _prefixRscCancel, _bizidDynamic)
managaTask = fmt.Sprintf("%s%d", _prefixTask, _bizidManga)
managaRscAdd = fmt.Sprintf("%s%d", _prefixRscAdd, _bizidManga)
managaRscUpdate = fmt.Sprintf("%s%d", _prefixRscUpdate, _bizidManga)
managaRscCancel = fmt.Sprintf("%s%d", _prefixRscCancel, _bizidManga)
)
s.rschandle = make(map[string]RscHandler)
s.taskhandle = make(map[string]TaskHandler)
once.Do(func() {
basehandleTask = &baseTaskHandler{Service: s}
basehandleRscAdd = &baseResourceAddHandler{Service: s}
basehandleRscUpdate = &baseResourceUpdateHandler{Service: s}
basehandleRscCancel = &baseResourceCancelHandler{Service: s}
dynamicHandleTask = &dynamicTaskHandler{baseTaskHandler: baseTaskHandler{Service: s}}
mangaHandelRscAdd = &mangaResourceAddHandler{baseResourceAddHandler: baseResourceAddHandler{Service: s}}
})
s.registerRscHandler(dynamicRscAdd, s.getbaseResourceAddHandler())
s.registerRscHandler(dynamicRscUpdate, s.getbaseResourceUpdateHandler())
s.registerRscHandler(dynamicRscCancel, s.getbaseResourceCancelHandler())
s.registerRscHandler(managaRscAdd, s.getmangaResourceAddHandler())
s.registerRscHandler(managaRscUpdate, s.getbaseResourceUpdateHandler())
s.registerRscHandler(managaRscCancel, s.getbaseResourceCancelHandler())
s.registerTaskHandler(managaTask, s.getbaseTaskHandler())
s.registerTaskHandler(dynamicTask, s.getdynamicTaskHandler())
}
func (s *Service) getbaseTaskHandler() *baseTaskHandler {
return basehandleTask
}
func (s *Service) getbaseResourceAddHandler() *baseResourceAddHandler {
return basehandleRscAdd
}
func (s *Service) getbaseResourceUpdateHandler() *baseResourceUpdateHandler {
return basehandleRscUpdate
}
func (s *Service) getbaseResourceCancelHandler() *baseResourceCancelHandler {
return basehandleRscCancel
}
func (s *Service) getdynamicTaskHandler() *dynamicTaskHandler {
return dynamicHandleTask
}
func (s *Service) getmangaResourceAddHandler() *mangaResourceAddHandler {
return mangaHandelRscAdd
}
//解析验证message
/*
TODO
根据DispatchLimit动态设置分发数量
*/
func (s *Service) checkTaskMsg(msg *databus.Message) (*model.Task, error) {
taskMsg := new(model.CreateTaskMsg)
if err := json.Unmarshal(msg.Value, taskMsg); err != nil {
log.Error("checkTaskMsg key(%s) value(%s)", msg.Key, string(msg.Value))
return nil, err
}
if taskMsg.DispatchLimit == 0 || taskMsg.FlowID == 0 || taskMsg.RID == 0 {
log.Error("checkTaskMsg key(%s) value(%s)", msg.Key, string(msg.Value))
return nil, ErrTaskResourceInvalid
}
if s.dao.CheckTask(context.Background(), taskMsg.FlowID, taskMsg.RID) > 0 {
return nil, ErrTaskDuplicate
}
ok, err := s.dao.CheckFlow(context.TODO(), taskMsg.RID, taskMsg.FlowID)
if !ok || err != nil {
return nil, ErrTaskFlowInvalid
}
//先兼容旧的task消息没有传bizid
if taskMsg.BizID == 0 {
res, err := s.dao.Resource(context.Background(), taskMsg.RID)
if err != nil || res == nil {
return nil, ErrTaskResourceInvalid
}
taskMsg.BizID = res.BusinessID
}
return &model.Task{
BusinessID: taskMsg.BizID,
FlowID: taskMsg.FlowID,
RID: taskMsg.RID,
}, nil
}
func (s *Service) writeTaskToDB(c context.Context, task *model.Task) error {
return s.dao.CreateTask(c, task)
}
func (s *Service) checkRscAddMsg(msg json.RawMessage) (*model.AddOption, error) {
addMsg := new(model.AddOption)
if err := json.Unmarshal(msg, addMsg); err != nil {
return nil, err
}
if addMsg.BusinessID == 0 || len(addMsg.OID) == 0 {
return nil, ErrInvalidMsg
}
return addMsg, nil
}
func (s *Service) writeRscAdd(c context.Context, opt *model.AddOption) error {
//TODO 根据错误号重试
return s.dao.RscAdd(c, opt)
}
func (s *Service) checkRscUpdateMsg(msg json.RawMessage) (*model.UpdateOption, error) {
updateMsg := new(model.UpdateOption)
if err := json.Unmarshal(msg, updateMsg); err != nil {
return nil, err
}
if updateMsg.BusinessID == 0 || len(updateMsg.OID) == 0 || len(updateMsg.Update) == 0 {
return nil, ErrInvalidMsg
}
return updateMsg, nil
}
func (s *Service) writeRscUpdate(c context.Context, opt *model.UpdateOption) error {
return s.dao.RscUpdate(c, opt)
}
func (s *Service) checkRscCancelMsg(msg json.RawMessage) (*model.CancelOption, error) {
cancelMsg := new(model.CancelOption)
if err := json.Unmarshal(msg, cancelMsg); err != nil {
return nil, err
}
if cancelMsg.BusinessID == 0 || len(cancelMsg.Oids) == 0 {
return nil, ErrInvalidMsg
}
return cancelMsg, nil
}
func (s *Service) writeRscCancel(c context.Context, opt *model.CancelOption) error {
return s.dao.RscCancel(c, opt)
}
func (s *Service) newrsc(msg *databus.Message) (interface{}, error) {
log.Info("databusgroup new msg key(%+v) partition(%d) offset(%d) value(%s) ", msg.Key, msg.Partition, msg.Offset, string(msg.Value))
rscmsg := new(model.RscMsg)
if err := json.Unmarshal(msg.Value, rscmsg); err != nil {
log.Error("databusgroup json.Unmarshal for msg(%+v)", string(msg.Value))
return nil, ErrInvalidMsg
}
key := fmt.Sprintf("%s_%d", rscmsg.Action, rscmsg.BizID)
handler := s.findRscHandler(key)
if handler == nil {
log.Error("databusgroup can not find handler for msg key(%+v)", key)
return nil, ErrHandlerMiss
}
data, err := handler.CheckMessage(rscmsg.Raw)
if err != nil {
log.Error("databusgroup new msg key(%+v) partition(%d) offset(%d) value(%s) CheckMessage(%v)", msg.Key, msg.Partition, msg.Offset, string(msg.Value), pkgerr.WithStack(err))
}
return data, err
}
func (s *Service) splitrsc(msg *databus.Message, data interface{}) int {
switch t := data.(type) {
case *model.AddOption:
return int(t.BusinessID)
case *model.UpdateOption:
return int(t.BusinessID)
case *model.CancelOption:
return int(t.BusinessID)
default:
return 0
}
}
func (s *Service) dorsc(bmsgs []interface{}) {
for _, msg := range bmsgs {
log.Info("databusgroup do msg(%+v)", msg)
var key string
switch t := msg.(type) {
case *model.AddOption:
key = fmt.Sprintf("%s%d", _prefixRscAdd, t.BusinessID)
case *model.UpdateOption:
key = fmt.Sprintf("%s%d", _prefixRscUpdate, t.BusinessID)
case *model.CancelOption:
key = fmt.Sprintf("%s%d", _prefixRscCancel, t.BusinessID)
default:
log.Error("databusgroup unknow msg(%+v)", msg)
continue
}
handler := s.findRscHandler(key)
if handler == nil {
log.Error("databusgroup msg(%+v) handler NotFound", msg)
continue
}
if err := handler.HandleMessage(context.Background(), msg); err != nil {
log.Error("databusgroup msg(%+v) handler err(%v)", msg, pkgerr.WithStack(err))
continue
}
}
}
func (s *Service) newtask(msg *databus.Message) (interface{}, error) {
log.Info("databusgroup newtask msg key(%+v) partition(%d) offset(%d) value(%s) ", msg.Key, msg.Partition, msg.Offset, string(msg.Value))
taskmsg := new(model.CreateTaskMsg)
if err := json.Unmarshal(msg.Value, taskmsg); err != nil {
log.Error("databusgroup newtask json.Unmarshal for msg(%+v)", string(msg.Value))
return nil, ErrInvalidMsg
}
key := fmt.Sprintf("%s%d", _prefixTask, taskmsg.BizID)
handler := s.findTaskHandler(key)
if handler == nil {
log.Error("databusgroup can not find handler for msg key(%+v)", key)
return nil, ErrHandlerMiss
}
data, err := handler.CheckMessage(msg)
if err != nil {
errmsg := fmt.Sprintf("databusgroup new msg key(%+v) partition(%d) offset(%d) value(%s) CheckMessage(%v)", msg.Key, msg.Partition, msg.Offset, string(msg.Value), pkgerr.WithStack(err))
if err == ErrTaskDuplicate {
log.Warn(errmsg)
} else {
log.Error(errmsg)
}
}
return data, err
}
func (s *Service) splittask(msg *databus.Message, data interface{}) int {
if t, ok := data.(*model.Task); ok {
return int(t.BusinessID)
}
return 0
}
func (s *Service) dotask(bmsgs []interface{}) {
for _, msg := range bmsgs {
log.Info("databusgroup dotask msg(%+v)", msg)
var key string
if t, ok := msg.(*model.Task); ok {
key = fmt.Sprintf("%s%d", _prefixTask, t.BusinessID)
} else {
log.Error("databusgroup dotask unknow msg(%+v)", msg)
continue
}
handler := s.findTaskHandler(key)
if handler == nil {
log.Error("databusgroup dotask msg(%+v) handler NotFound", msg)
continue
}
if err := handler.HandleMessage(context.Background(), msg); err != nil {
log.Error("databusgroup dotask msg(%+v) handler err(%v)", msg, pkgerr.WithStack(err))
continue
}
}
}

View File

@@ -0,0 +1,76 @@
package service
import (
"context"
"encoding/json"
"errors"
"fmt"
"go-common/app/job/main/aegis/model"
)
//ERROR
var (
ErrMangaNoIndex = errors.New("漫画无图")
)
type baseResourceAddHandler struct {
*Service
}
type mangaResourceAddHandler struct {
baseResourceAddHandler
}
type baseResourceUpdateHandler struct {
*Service
}
type baseResourceCancelHandler struct {
*Service
}
func (h baseResourceAddHandler) CheckMessage(msg json.RawMessage) (addObj interface{}, err error) {
return h.checkRscAddMsg(msg)
}
func (h baseResourceAddHandler) HandleMessage(c context.Context, addObj interface{}) error {
return h.writeRscAdd(c, addObj.(*model.AddOption))
}
//漫画的,校验是否有图
func (h mangaResourceAddHandler) CheckMessage(msg json.RawMessage) (addObj interface{}, err error) {
if addObj, err = h.baseResourceAddHandler.CheckMessage(msg); err != nil {
return
}
addopt := addObj.(*model.AddOption)
metas := make(map[string]interface{})
if err = json.Unmarshal([]byte(addopt.MetaData), &metas); err != nil {
return
}
if index, ok := metas["index"]; !ok || len(fmt.Sprint(index)) == 0 {
return nil, ErrMangaNoIndex
}
return
}
func (h mangaResourceAddHandler) HandleMessage(c context.Context, addObj interface{}) error {
return h.baseResourceAddHandler.HandleMessage(c, addObj.(*model.AddOption))
}
func (h baseResourceUpdateHandler) CheckMessage(msg json.RawMessage) (updateObj interface{}, err error) {
return h.checkRscUpdateMsg(msg)
}
func (h baseResourceUpdateHandler) HandleMessage(c context.Context, updateObj interface{}) error {
return h.writeRscUpdate(c, updateObj.(*model.UpdateOption))
}
func (h baseResourceCancelHandler) CheckMessage(msg json.RawMessage) (cancelObj interface{}, err error) {
return h.checkRscCancelMsg(msg)
}
func (h baseResourceCancelHandler) HandleMessage(c context.Context, cancelObj interface{}) error {
return h.writeRscCancel(c, cancelObj.(*model.CancelOption))
}

View File

@@ -0,0 +1,54 @@
package service
import (
"context"
"go-common/app/job/main/aegis/model"
"go-common/library/queue/databus"
"go-common/library/xstr"
)
type baseTaskHandler struct {
*Service
}
type dynamicTaskHandler struct {
baseTaskHandler
}
func (h baseTaskHandler) CheckMessage(msg *databus.Message) (taskObj interface{}, err error) {
return h.checkTaskMsg(msg)
}
func (h baseTaskHandler) HandleMessage(c context.Context, taskObj interface{}) error {
return h.writeTaskToDB(c, taskObj.(*model.Task))
}
func (h dynamicTaskHandler) CheckMessage(msg *databus.Message) (taskObj interface{}, err error) {
var c = context.Background()
if taskObj, err = h.baseTaskHandler.CheckMessage(msg); err != nil {
return
}
//补充mid相关信息
task := taskObj.(*model.Task)
res, err := h.dao.Resource(c, task.RID)
if err != nil || res == nil {
return nil, ErrTaskResourceInvalid
}
task.MID = res.MID
if task.MID > 0 {
groupids, _ := h.dao.UpSpecial(c, task.MID)
task.Group = xstr.JoinInts(groupids)
task.Fans, _ = h.dao.FansCount(c, task.MID)
}
taskObj = task
return
}
func (h dynamicTaskHandler) HandleMessage(c context.Context, obj interface{}) error {
return h.baseTaskHandler.HandleMessage(c, obj.(*model.Task))
}

View File

@@ -0,0 +1,639 @@
package service
import (
"context"
"errors"
"fmt"
moniMdl "go-common/app/job/main/aegis/model/monitor"
accApi "go-common/app/service/main/account/api"
upApi "go-common/app/service/main/up/api/v1"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/xstr"
"math"
"reflect"
"strconv"
"strings"
"time"
)
// monitorArchive 稿件业务监控
// 注意oa有可能是nil使用前必须判断
func (s *Service) monitorArchive(act string, oa, na *moniMdl.BinlogArchive) (errs []error) {
var (
c = context.TODO()
logs []string
err error
errs2 []error
nAddit *moniMdl.ArchiveAddit
)
defer func() {
logStr := strings.Join(logs, "\n")
if x := recover(); x != nil {
log.Error("s.monitorArchive() unknown panic(%v)", x)
} else if len(errs) > 0 {
log.Error("s.monitorArchive(\n act: %s \n oa: %+v \n na: %+v) \n logStr:\n %v \n error:%v", act, oa, na, logStr, errs)
} else {
log.Info("s.monitorArchive(\n act: %s \n oa: %+v \n na: %+v) \n logStr:\n %v", act, oa, na, logStr)
}
}()
if (na.Attr>>moniMdl.ArchiveBitPGC)&int64(1) == 1 {
logs = append(logs, "忽略PGC稿件")
return
}
if na == nil {
err = errors.New("new msg nil")
errs = append(errs, err)
logs = append(logs, "databus数据异常new msg nil")
return
}
na.IsSpecTID = moniMdl.SpecialTypeIDs[na.TypeID]
if nAddit, err = s.moniDao.ArchiveAttr(c, na.ID); err != nil {
logs = append(logs, fmt.Sprintf("warn:稿件Addit获取失败aid:%d error:%v", na.ID, err))
if err != ecode.NothingFound {
errs = append(errs, err)
return
}
err = nil
} else {
na.Addit = nAddit
}
errs2 = s.monitorHandle(moniMdl.BusArc, *na, na.ID)
errs = append(errs, errs2...)
return
}
// monitorUpDelArc 监控UP主删除稿件。监控指定的UP主。1、在高能联盟的up主特殊用户组2、粉丝数超过50w
func (s *Service) monitorUpDelArc(id int64, obj interface{}) (satisfy bool, logs []string, err error) {
var (
c = context.TODO()
pReply *accApi.ProfileStatReply
upReply *upApi.HighAllyUpsReply
a *moniMdl.BinlogArchive
)
logs = append(logs, "s.monitorUpDelArc() begin")
if obj == nil {
logs = append(logs, "\t obj是nil")
err = errors.New("obj is nil")
return
}
switch obj.(type) {
case *moniMdl.BinlogArchive:
a = obj.(*moniMdl.BinlogArchive)
case moniMdl.BinlogArchive:
ac := obj.(moniMdl.BinlogArchive)
a = &ac
default:
logs = append(logs, fmt.Sprintf("\t 未知类型:%+v", obj))
err = errors.New("unknown interface type")
return
}
logs = append(logs, fmt.Sprintf("\t archive:%+v", a))
if a == nil {
err = errors.New("archive is nil")
return
}
if a.State != moniMdl.ArchiveStateDel {
logs = append(logs, "\t 非删除,忽略")
return
}
if a.Copyright != moniMdl.ArchiveOriginal {
logs = append(logs, "\t 非自制,忽略")
return
}
if err = s.moniDao.AddToDelArc(c, a); err != nil {
logs = append(logs, fmt.Sprintf("\t 添加删除信息到redis失败。error:%v", err))
err = nil
}
if id == moniMdl.RuleHighUpDelArc {
if upReply, err = s.up.GetHighAllyUps(c, &upApi.HighAllyUpsReq{Mids: []int64{a.MID}}); err != nil {
logs = append(logs, fmt.Sprintf("\t 获取UP主高能信息失败。error:%v", err))
log.Error("\t s.monitorUpDelArc() s.up.GetHighAllyUps() error:%v", err)
}
logs = append(logs, fmt.Sprintf("\t 用户信息:%+v", upReply))
if upReply != nil {
if _, ok := upReply.Lists[a.MID]; ok {
logs = append(logs, "\t UP主属于高能联盟")
satisfy = true
return
}
}
} else if id == moniMdl.RuleFamUpDelArc {
if pReply, err = s.acc.ProfileWithStat3(c, &accApi.MidReq{Mid: a.MID}); err != nil {
logs = append(logs, fmt.Sprintf("\t 获取UP主信息失败。error:%v", err))
log.Error("\t s.monitorUpDelArc() s.acc.ProfileWithStat3() error:%v", err)
}
logs = append(logs, fmt.Sprintf("\t 用户信息:%+v", pReply))
if pReply != nil && pReply.Follower >= 500000 {
logs = append(logs, "\t UP主属于大UP主")
satisfy = true
return
}
}
return
}
// monitorVideo 视频监控
func (s *Service) monitorVideo(act string, ov, nv *moniMdl.BinlogVideo) (errs []error) {
errs = s.monitorHandle(moniMdl.BusVideo, nv, nv.ID)
return
}
// monitorHandle 处理监控数据
func (s *Service) monitorHandle(bid int64, nObj interface{}, oid int64) (errs []error) {
var (
c = context.TODO()
rules []*moniMdl.Rule
logs, logs2 []string
err error
errs2 []error
oKeys, nKeys []string
)
defer func() {
logStr := strings.Join(logs, "\n")
if x := recover(); x != nil {
log.Error("s.monitorHandle() unknown panic(%v)", x)
} else if len(errs) > 0 {
log.Error("s.monitorHandle(\n na: %+v) \n logStr:\n %v \n error:%v", nObj, logStr, errs)
} else {
log.Info("s.monitorHandle(\n na: %+v) \n logStr:\n %v", nObj, logStr)
}
}()
if nObj == nil {
err = errors.New("new msg nil")
errs = append(errs, err)
logs = append(logs, "databus数据异常new msg nil")
return
}
if rules, err = s.moniDao.RulesByBid(c, bid); err != nil {
logs = append(logs, "获取监控配置失败")
return
}
if len(rules) == 0 {
logs = append(logs, "监控配置不存在")
return
}
for _, rule := range rules {
var allSatisfy = true
//如果是监控UP主大量删稿则特殊处理
if rule.ID == moniMdl.RuleHighUpDelArc || rule.ID == moniMdl.RuleFamUpDelArc {
if allSatisfy, logs2, err = s.monitorUpDelArc(rule.ID, nObj); err != nil {
errs = append(errs, err)
}
logs = append(logs, logs2...)
} else {
for field, cdt := range rule.RuleConf.MoniCdt {
var (
val int64
satisfy bool
)
if val, err = s.reflectIntVal(nObj, field, 0); err != nil {
errs = append(errs, err)
logs = append(logs, fmt.Sprintf("没有找到字段%s", field))
}
if satisfy, err = s.monitorCompSatisfy(cdt.Comp, val); err != nil {
allSatisfy = false
break
}
if !satisfy {
allSatisfy = false
break
}
}
}
if allSatisfy { //如果满足所有条件,则移入监控
nKeys = append(nKeys, fmt.Sprintf(moniMdl.RedisPrefix, rule.ID))
} else { //如果有条件不满足,则移出监控
oKeys = append(oKeys, fmt.Sprintf(moniMdl.RedisPrefix, rule.ID))
}
}
logs = append(logs, fmt.Sprintf("%d移出keys%v", oid, oKeys))
logs = append(logs, fmt.Sprintf("%d移入keys%v", oid, nKeys))
logs2, errs2 = s.monitorSave(oKeys, nKeys, oid)
logs = append(logs, logs2...)
if len(errs2) != 0 {
errs = append(errs, errs2...)
}
return
}
// monitorSave 保存结果
func (s *Service) monitorSave(oKeys, nKeys []string, oid int64) (logs []string, errs []error) {
var (
c = context.TODO()
logs2 []string
err error
)
defer func() {
logStr := strings.Join(logs, "\n")
if x := recover(); x != nil {
log.Error("s.monitorSave() unknown panic(%v)", x)
} else if len(errs) != 0 {
log.Error("s.monitorSave(\n oKeys: %v \n nKeys: %v \n oid: %d) \n logStr:\n %v \n error:%v", oKeys, nKeys, oid, logStr, errs)
} else {
log.Info("s.monitorSave(\n oKeys: %v \n nKeys: %v \n oid: %d) \n logStr:\n %v", oKeys, nKeys, oid, logStr)
}
}()
//从旧key中移出
logs2, err = s.moniDao.RemFromSet(c, oKeys, oid)
logs = append(logs, logs2...)
if err != nil {
errs = append(errs, err)
}
//清除过期的旧key
logs2, err = s.moniDao.ClearExpireSet(c, oKeys)
logs = append(logs, logs2...)
if err != nil {
errs = append(errs, err)
}
//移入到新key
logs2, err = s.moniDao.AddToSet(c, nKeys, oid)
logs = append(logs, logs2...)
if err != nil {
errs = append(errs, err)
}
//清除过期的移入key
logs2, err = s.moniDao.ClearExpireSet(c, nKeys)
logs = append(logs, logs2...)
if err != nil {
errs = append(errs, err)
}
return
}
// monitorNotify 监控报警
func (s *Service) monitorNotify() {
defer func() {
log.Warn("monitorNotify exited.")
}()
var (
c = context.TODO()
err error
rules []*moniMdl.Rule
stats *moniMdl.Stats
min, max int64
)
for {
log.Info("s.monitorNotify() begin")
if rules, err = s.moniDao.ValidRules(c); err != nil {
log.Error("s.monitorNotify() rules:%+v error:%v", rules, err)
time.Sleep(1 * time.Minute)
continue
}
for _, rule := range rules {
if rule.ID == moniMdl.RuleHighUpDelArc || rule.ID == moniMdl.RuleFamUpDelArc {
// 删稿监控特殊处理
s.wg.Add(1)
go s.moniUpDelArcNotify(rule)
continue
}
if min, max, err = s.monitorNotifyTime(rule.RuleConf); err != nil {
log.Error("s.monitorNotify() s.monitorNotifyTime() rule:%+v error:%v", rule, err)
continue
}
if stats, err = s.moniDao.MoniRuleStats(c, rule.ID, min, max); err != nil {
log.Error("s.monitorNotify() s.moniDao.MoniRuleStats(%d,%d,%d) error:%v", rule.ID, min, max, err)
continue
}
notify := s.moniSatisfyNotify(rule.RuleConf, stats)
if notify {
title := fmt.Sprintf("%s监控(aegis)", rule.RuleConf.Name)
body := fmt.Sprintf("当前滞留时间为%s超过阀值滞留量为%d整体量为%d \n报警时间%s", secondsFormat(stats.MaxTime), stats.MoniCount, stats.TotalCount, time.Now().Format("2006-01-02 15:04:05"))
url := ""
switch rule.BID {
case moniMdl.BusVideo:
url = fmt.Sprintf("http://manager.bilibili.co/#!/video/list?monitor_list=%d_%d_%d", rule.Type, rule.BID, rule.ID)
case moniMdl.BusArc:
url = fmt.Sprintf("http://manager.bilibili.co/#!/archive_utils/all?monitor_list=%d_%d_%d", rule.Type, rule.BID, rule.ID)
}
body += fmt.Sprintf("\n跳转链接<a href=\"%s\">点击跳转</a> %s", url, url)
if err = s.monitorSendNotify(c, rule.RuleConf.Notify.Way, rule.RuleConf.Notify.Member, title, body); err != nil {
log.Error("s.monitorNotify() s.monitorSendNotify(%d,%v,%s,%s) error:%v", rule.RuleConf.Notify.Way, rule.RuleConf.Notify.Member, title, body, err)
}
}
}
time.Sleep(30 * time.Minute)
}
}
// moniSatisfyNotify 检查监控是否满足报警,目前只有数量+时长的条件
func (s *Service) moniSatisfyNotify(conf *moniMdl.RuleConf, stats *moniMdl.Stats) (notify bool) {
notify = true
if _, ok := conf.MoniCdt["time"]; ok {
threshold := conf.NotifyCdt["time"].Value
comp := conf.NotifyCdt["time"].Comp
switch comp {
case moniMdl.CompGT:
if int64(stats.MaxTime) < threshold {
notify = false
}
case moniMdl.CompLT:
if int64(stats.MaxTime) > threshold {
notify = false
}
}
}
if _, ok := conf.NotifyCdt["count"]; ok {
threshold := conf.NotifyCdt["count"].Value
comp := conf.NotifyCdt["count"].Comp
switch comp {
case moniMdl.CompGT:
if int64(stats.MoniCount) < threshold {
notify = false
}
case moniMdl.CompLT:
if int64(stats.MoniCount) > threshold {
notify = false
}
}
}
return
}
// moniUpDelArcNotify 特殊处理UP主删稿的逻辑
func (s *Service) moniUpDelArcNotify(rule *moniMdl.Rule) (err error) {
var (
c = context.TODO()
min, max int64
oidMap map[int64]int
oids, mids []int64
infos map[int64]*moniMdl.DelArcInfo
delMap map[int64][]*moniMdl.DelArcInfo
accStats map[int64]*accApi.ProfileStatReply
threshold int
)
defer func() {
s.wg.Done()
}()
if _, ok := rule.RuleConf.NotifyCdt["count"]; !ok {
err = errors.New("notify count config error")
log.Error("s.moniUpDelArcNotify(%+v) 没有count监控配置", rule)
return
}
threshold = int(rule.RuleConf.NotifyCdt["count"].Value)
delMap = make(map[int64][]*moniMdl.DelArcInfo)
min, max, err = s.monitorNotifyTime(rule.RuleConf)
if err != nil {
log.Error("s.monitorNotifyTime() rule:%+v error:%v", rule, err)
return
}
if oidMap, err = s.moniDao.MoniRuleOids(c, rule.ID, min, max); err != nil {
log.Error("s.moniDao.MoniRuleOids() rule:%+v error:%v", rule, err)
return
}
for oid := range oidMap {
oids = append(oids, oid)
}
if infos, err = s.moniDao.ArcDelInfos(c, oids); err != nil {
log.Error("s.moniUpDelArcNotify() s.moniDao.ArcDelInfos(%v) error(%v)", oids, err)
return
}
for _, info := range infos {
delMap[info.MID] = append(delMap[info.MID], info)
mids = append(mids, info.MID)
}
if accStats, err = s.multiAccounts(c, mids); err != nil {
log.Error("s.moniUpDelArcNotify() s.multiAccounts(%v) error(%v)", mids, err)
accStats = make(map[int64]*accApi.ProfileStatReply)
}
for mid, ins := range delMap {
if _, ok := accStats[mid]; !ok {
log.Error("s.monitorNotify() account ")
accStats[mid] = &accApi.ProfileStatReply{
Profile: &accApi.Profile{
Name: "nil",
Mid: mid,
},
}
}
if len(ins) >= threshold {
var (
title, content string
)
for _, v := range ins {
if title == "" {
title = fmt.Sprintf("【异常删稿报警】“%s” 24内已删除%d个自制稿件 ", accStats[mid].Profile.Name, len(ins))
}
if content == "" {
content = fmt.Sprintf("监控规则:%d——%s——%s<br />报警时间:%s<br /><br />", rule.ID, rule.Name, rule.RuleConf.Name, time.Now().Format("2006-01-02 15:04:05"))
content += fmt.Sprintf("<b>UP主昵称:%smid: %d当前粉丝数:%d 24内已删除:%d</b><br /><br />", accStats[mid].Profile.Name, accStats[mid].Profile.Mid, accStats[mid].Follower, len(ins))
content += "<table border=\"1\" style=\"border-collapse: collapse;\"><tr><th>标题</th><th>av号</th><th>删除时间</th></tr>"
}
content += fmt.Sprintf("<tr><td style=\"padding: 5px 10px;\"> %s </td><td style=\"padding: 5px 10px;\"> %d </td><td style=\"padding: 5px 10px;\"> %s </td></tr>", v.Title, v.AID, v.Time)
}
content += "</table>"
if err = s.monitorSendNotify(c, rule.RuleConf.Notify.Way, rule.RuleConf.Notify.Member, title, content); err != nil {
log.Error("s.moniUpDelArcNotify(%d) s.monitorSendNotify(%d,%v,%s,%s) error:%v", rule.ID, rule.RuleConf.Notify.Way, rule.RuleConf.Notify.Member, title, content, err)
}
}
}
return
}
// monitorNotifyTime 计算监控报警的score区间
func (s *Service) monitorNotifyTime(conf *moniMdl.RuleConf) (tFrom, tTo int64, err error) {
now := time.Now().Unix()
if _, ok := conf.NotifyCdt["time"]; !ok {
err = errors.New("配置的 NotifyCdt 中不存在 time")
return
}
timeCdt := conf.NotifyCdt["time"].Value
compCdt := conf.NotifyCdt["time"].Comp
switch compCdt {
case moniMdl.CompGT:
tFrom = 0
tTo = now - timeCdt
case moniMdl.CompLT:
tFrom = now - timeCdt
tTo = now
default:
err = errors.New("配置的 NotifyCdt 中 comparison 不合法: " + compCdt)
return
}
return
}
// reflectIntVal 反射Int值。支持多级查询比如Addit.MissionID。
func (s *Service) reflectIntVal(obj interface{}, field string, dep int) (val int64, err error) {
if dep > 10 {
err = fmt.Errorf("too deep:%d", dep)
return
}
if obj == nil {
err = errors.New("s.reflectIntVal() obj is invalid memory address or nil pointer dereference")
return
}
if reflect.TypeOf(obj).Kind() == reflect.Ptr {
dep += 1
return s.reflectIntVal(reflect.ValueOf(obj).Elem().Interface(), field, dep)
}
if strings.Contains(field, ".") {
fs := strings.Split(field, ".")
for i, v := range fs {
f := strings.Join(fs[i+1:], ".")
fv := reflect.ValueOf(obj).FieldByName(v)
if !fv.IsValid() {
err = fmt.Errorf("s.reflectIntVal() field not found. field:%s obj: %+v", field, obj)
return
}
if fv.IsNil() {
err = fmt.Errorf("s.reflectIntVal() field is nil. field:%s obj: %+v", field, obj)
return
}
dep += 1
return s.reflectIntVal(fv.Elem().Interface(), f, dep)
}
} else {
fv := reflect.ValueOf(obj).FieldByName(field)
if !fv.IsValid() {
err = fmt.Errorf("s.reflectIntVal() field not found. field:%s obj: %+v", field, obj)
return
}
val = fv.Int()
}
return
}
// monitorCompSatisfy 验证值是否满足监控表达式
func (s *Service) monitorCompSatisfy(com string, val int64) (is bool, err error) {
var (
v int64
vals []int64
)
//暂时支持!=、>、<、=、in
if strings.Contains(com, "!=") {
//"!=10"
if v, err = strconv.ParseInt(strings.Replace(com, "!=", "", -1), 10, 64); err != nil {
return
}
is = v != val
} else if strings.Contains(com, ">=") {
//">=10"
if v, err = strconv.ParseInt(strings.Replace(com, ">=", "", -1), 10, 64); err != nil {
return
}
is = val >= v
} else if strings.Contains(com, "<=") {
//"<=10"
if v, err = strconv.ParseInt(strings.Replace(com, "<=", "", -1), 10, 64); err != nil {
return
}
is = val <= v
} else if strings.Contains(com, "=") {
//"=10"
if v, err = strconv.ParseInt(strings.Replace(com, "=", "", -1), 10, 64); err != nil {
return
}
is = v == val
} else if strings.Contains(com, "!in") {
//"in(1,2,3)"
com = strings.Replace(com, "!in(", "", -1)
com = strings.Replace(com, ")", "", -1)
if vals, err = xstr.SplitInts(com); err != nil {
return
}
is = true
for _, v := range vals {
if val == v {
is = false
break
}
}
} else if strings.Contains(com, "in") {
//"in(1,2,3)"
com = strings.Replace(com, "in(", "", -1)
com = strings.Replace(com, ")", "", -1)
if vals, err = xstr.SplitInts(com); err != nil {
return
}
for _, v := range vals {
if val == v {
is = true
break
}
}
} else if strings.Contains(com, ">") {
//">10"
if v, err = strconv.ParseInt(strings.Replace(com, ">", "", -1), 10, 64); err != nil {
return
}
is = val > v
} else if strings.Contains(com, "<") {
//"<10"
if v, err = strconv.ParseInt(strings.Replace(com, "<", "", -1), 10, 64); err != nil {
return
}
is = val < v
} else {
err = errors.New("unknown comparison")
}
return
}
// monitorSendNotify 发送监控通知
func (s *Service) monitorSendNotify(c context.Context, way int8, members []string, title, content string) (err error) {
switch way {
case moniMdl.NotifyTypeEmail:
log.Info("s.monitorSendNotify() begin. way:%d members:%v title:%s content:%s", way, members, title, content)
if err = s.email.MonitorEmailAsync(c, members, title, content); err != nil {
log.Error("s.email.SendMonitorNotify(%v,%s,%s) error:%v", members, title, content, err)
return
}
default:
err = errors.New("unknown notify way")
log.Error("s.monitorSendNotify(%d,%v,%s,%s) unknown notify way", way, members, title, content)
return
}
return
}
// multiAccounts 批量获取用户数据
func (s *Service) multiAccounts(c context.Context, mids []int64) (res map[int64]*accApi.ProfileStatReply, err error) {
var (
mark map[int64]bool
)
res = make(map[int64]*accApi.ProfileStatReply)
mark = make(map[int64]bool)
if len(mids) == 0 {
return
}
for _, v := range mids {
if mark[v] {
continue
}
mark[v] = true
var r *accApi.ProfileStatReply
if r, err = s.acc.ProfileWithStat3(c, &accApi.MidReq{Mid: v}); err != nil {
log.Error("s.multiAccounts() s.acc.ProfileWithStat3(%d) error(%v)", v, err)
continue
}
res[v] = r
}
return
}
// monitorEmailProc 发送监控邮件任务
func (s *Service) monitorEmailProc() {
defer s.wg.Done()
for {
s.email.MonitorEmailProc()
time.Sleep(200 * time.Millisecond)
}
}
// secondsFormat 将秒转成 时:分:秒
func secondsFormat(sec int) (str string) {
if sec < 0 {
return "--:--:--"
}
if sec == 0 {
return "00:00:00"
}
h := math.Floor(float64(sec) / 3600)
m := math.Floor((float64(sec) - 3600*h) / 60)
se := sec % 60
return fmt.Sprintf("%02d:%02d:%02d", int64(h), int64(m), se)
}

View File

@@ -0,0 +1,134 @@
package service
import (
"context"
"github.com/golang/mock/gomock"
. "github.com/smartystreets/goconvey/convey"
"go-common/app/job/main/aegis/model/monitor"
accApi "go-common/app/service/main/account/api"
"testing"
)
func WithMock(t *testing.T, f func(mock *gomock.Controller)) func() {
return func() {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
f(mockCtrl)
}
}
func TestService_monitorArchive(t *testing.T) {
var (
na = &monitor.BinlogArchive{
ID: 10111555,
State: -100,
Round: 10,
MID: 666,
TypeID: 2422,
}
)
Convey("monitorUpDelArc", t, func(ctx C) {
errs := s.monitorArchive("update", nil, na)
So(errs, ShouldNotBeEmpty)
})
}
func TestService_monitorUpDelArc(t *testing.T) {
var (
na = &monitor.BinlogArchive{
ID: 10111555,
State: -100,
Round: 10,
MID: 666,
TypeID: 24,
}
logs []string
)
Convey("monitorUpDelArc", t, func(ctx C) {
_, logs, _ = s.monitorUpDelArc(1, na)
So(logs, ShouldNotBeEmpty)
})
}
func TestService_monitorVideo(t *testing.T) {
var (
na = &monitor.BinlogVideo{
ID: 10134809,
Status: 0,
}
)
Convey("monitorVideo", t, func(ctx C) {
errs := s.monitorVideo("update", nil, na)
So(errs, ShouldBeEmpty)
})
}
func TestService_reflectIntVal(t *testing.T) {
var (
a = &monitor.BinlogArchive{
ID: 123,
State: 0,
Round: 10,
MID: 666,
TypeID: 22,
Addit: &monitor.ArchiveAddit{
MissionID: 999,
},
}
)
Convey("reflectIntVal", t, func(ctx C) {
_, err := s.reflectIntVal(a, "Addit.MissionID", 0)
So(err, ShouldBeNil)
_, err = s.reflectIntVal(a, "Addit111", 0)
So(err, ShouldNotBeNil)
_, err = s.reflectIntVal(a, "ID", 0)
So(err, ShouldBeNil)
})
}
func TestService_monitorCompSatisfy(t *testing.T) {
Convey("monitorCompSatisfy >=", t, func(ctx C) {
is, err := s.monitorCompSatisfy(">=10", 11)
So(err, ShouldBeNil)
So(is, ShouldBeTrue)
is, err = s.monitorCompSatisfy(">10", 10)
So(err, ShouldBeNil)
So(is, ShouldBeFalse)
is, err = s.monitorCompSatisfy("=10", 10)
So(err, ShouldBeNil)
So(is, ShouldBeTrue)
is, err = s.monitorCompSatisfy("in(10,20,30)", 10)
So(err, ShouldBeNil)
So(is, ShouldBeTrue)
is, err = s.monitorCompSatisfy("in(10,20,30)", 40)
So(err, ShouldBeNil)
So(is, ShouldBeFalse)
})
}
func TestService_monitorSave(t *testing.T) {
Convey("monitorSave", t, func(ctx C) {
_, errs := s.monitorSave([]string{"monitor_test_1"}, []string{"monitor_test_2"}, 123)
So(errs, ShouldBeEmpty)
})
}
func TestService_multiAccounts(t *testing.T) {
var c = context.Background()
Convey("multiAccounts", t, WithMock(t, func(mockCtrl *gomock.Controller) {
mock := accApi.NewMockAccountClient(mockCtrl)
s.acc = mock
mockReq := &accApi.MidReq{
Mid: 123,
}
mock.EXPECT().ProfileWithStat3(gomock.Any(), mockReq).Return(&accApi.ProfileStatReply{}, nil)
_, err := s.multiAccounts(c, []int64{123})
So(err, ShouldBeNil)
}))
}

View File

@@ -0,0 +1,77 @@
package service
import (
"context"
"fmt"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
)
func (s *Service) reportSubmit(c context.Context, old, new *model.Task) {
s.reportTaskFinish(c, new)
stfield := fmt.Sprintf(model.Submit, new.State, old.UID)
s.dao.IncresByField(c, new.BusinessID, new.FlowID, new.UID, stfield, 1)
s.dao.IncresByField(c, new.BusinessID, new.FlowID, new.UID, model.UseTime, new.Utime)
//统计资源的通过,打回什么的,只统计任务列表操作; 异步统计,免得干扰缓存的同步速度
if old.UID == new.UID && new.State == model.TaskStateSubmit {
select {
case s.chanReport <- &model.RIR{
BizID: new.BusinessID,
FlowID: new.FlowID,
UID: new.UID,
RID: new.RID,
}:
case <-time.NewTimer(time.Millisecond * 10).C:
log.Error("reportSubmit chanfull")
}
}
}
func (s *Service) reportResource(c context.Context, bizid, flowid, rid, uid int64) {
st, err := s.dao.RscState(c, rid)
if err != nil {
log.Error("reportResource RscState(%d) error(%v)", rid, err)
return
}
field := fmt.Sprintf(model.RscState, st)
s.dao.IncresByField(c, bizid, flowid, uid, field, 1)
}
func (s *Service) syncReport(c context.Context) {
datas, err := s.dao.FlushReport(c)
if err != nil {
log.Error("FlushReport error(%v)", err)
return
}
if len(datas) == 0 {
return
}
for key, val := range datas {
tp, bizid, flowid, uid, err := model.ParseKey(key)
if err != nil {
log.Error("syncReport ParseKey(%s)", key)
continue
}
rt := &model.Report{
BusinessID: int64(bizid),
FlowID: int64(flowid),
UID: int64(uid),
TYPE: tp,
Content: val,
}
s.dao.Report(c, rt)
}
}
func (s *Service) reportTaskCreate(c context.Context, new *model.Task) {
s.dao.IncresTaskInOut(c, new.BusinessID, new.FlowID, "in")
}
func (s *Service) reportTaskFinish(c context.Context, new *model.Task) {
s.dao.IncresTaskInOut(c, new.BusinessID, new.FlowID, "out")
}

View File

@@ -0,0 +1,147 @@
package service
import (
"context"
"strings"
"sync"
"go-common/app/job/main/aegis/conf"
"go-common/app/job/main/aegis/dao"
"go-common/app/job/main/aegis/dao/email"
"go-common/app/job/main/aegis/dao/monitor"
"go-common/app/job/main/aegis/model"
accApi "go-common/app/service/main/account/api"
upApi "go-common/app/service/main/up/api/v1"
"go-common/library/queue/databus"
"go-common/library/queue/databus/databusutil"
)
// Service struct
type Service struct {
c *conf.Config
acc accApi.AccountClient
up upApi.UpClient
dao *dao.Dao
moniDao *monitor.Dao
email *email.Dao
// databus
binLogDataBus *databus.Databus
archiveDataBus *databus.Databus
aegisRscDataBus *databus.Databus
aegisTaskDataBus *databus.Databus
//channel
chanReport chan *model.RIR
// cache
Cache
//权重计算器
wmHash map[string]*WeightManager
rschandle map[string]RscHandler
taskhandle map[string]TaskHandler
wg sync.WaitGroup
//databus group
resourceGroup *databusutil.Group
taskGroup *databusutil.Group
}
// New init
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
dao: dao.New(c),
moniDao: monitor.New(c),
email: email.New(c),
binLogDataBus: databus.New(c.DataBus.BinLogSub),
chanReport: make(chan *model.RIR, 1024),
archiveDataBus: databus.New(c.DataBus.ArchiveSub),
aegisRscDataBus: databus.New(c.DataBus.ResourceSub),
aegisTaskDataBus: databus.New(c.DataBus.TaskSub),
}
if !s.c.Debug {
var err error
if s.acc, err = accApi.NewClient(c.GRPC.Acc); err != nil {
panic(err)
}
if s.up, err = upApi.NewClient(c.GRPC.Up); err != nil {
panic(err)
}
}
initHandler(s)
s.initCache()
s.startWeightManager()
s.resourceGroup = databusutil.NewGroup(c.Databusutil.Resource, s.aegisRscDataBus.Messages())
s.resourceGroup.New = s.newrsc
s.resourceGroup.Split = s.splitrsc
s.resourceGroup.Do = s.dorsc
s.resourceGroup.Start()
s.taskGroup = databusutil.NewGroup(c.Databusutil.Task, s.aegisTaskDataBus.Messages())
s.taskGroup.New = s.newtask
s.taskGroup.Split = s.splittask
s.taskGroup.Do = s.dotask
s.taskGroup.Start()
go s.cacheProc()
go s.taskProc()
go s.monitorNotify()
s.wg.Add(1)
go s.taskconsumeproc()
s.wg.Add(1)
go s.archiveConsumeProc()
s.wg.Add(1)
go s.monitorEmailProc()
return s
}
// Cache .
type Cache struct {
upCache map[int64]map[int64]struct{}
rangeWeightCfg map[int64]map[string]*model.RangeWeightConfig
equalWeightCfg map[string][]*model.EqualWeightConfig
assignConfig map[string][]*model.AssignConfig
consumerCache map[string]map[int64]struct{}
ccMux sync.RWMutex
oldactiveBizFlow map[string]struct{}
newactiveBizFlow map[string]struct{}
}
// DebugCache .
func (s *Service) DebugCache(keys string) map[string]interface{} {
dc := map[string]interface{}{
"upCache": s.upCache,
"rangeWeightCfg": s.rangeWeightCfg,
"equalWeightCfg": s.equalWeightCfg,
"assignConfig": s.assignConfig,
"consumerCache": s.consumerCache,
"oldactiveBizFlow": s.oldactiveBizFlow,
"newactiveBizFlow": s.newactiveBizFlow,
}
res := make(map[string]interface{})
if len(keys) > 0 {
for _, key := range strings.Split(keys, ",") {
res[key] = dc[key]
}
}
return res
}
// Ping Service
func (s *Service) Ping(c context.Context) (err error) {
return s.dao.Ping(c)
}
// Close Service
func (s *Service) Close() {
s.binLogDataBus.Close()
s.archiveDataBus.Close()
s.aegisRscDataBus.Close()
s.aegisTaskDataBus.Close()
s.resourceGroup.Close()
s.taskGroup.Close()
s.wg.Wait()
s.dao.Close()
s.moniDao.Close()
}

View File

@@ -0,0 +1,31 @@
package service
import (
"context"
"flag"
"testing"
"go-common/app/job/main/aegis/conf"
"go-common/library/log"
)
var (
s *Service
)
func initConf() {
if err := conf.Init(); err != nil {
panic(err)
}
log.Init(conf.Conf.Log)
}
func init() {
flag.Set("conf", "../cmd/aegis-job.toml")
initConf()
s = New(conf.Conf)
}
func Test_syncReport(t *testing.T) {
s.syncReport(context.Background())
}

View File

@@ -0,0 +1,60 @@
package service
import (
"context"
"strconv"
"strings"
"time"
"go-common/app/job/main/aegis/model"
)
func (s *Service) taskProc() {
for {
// 检索超时任务,进行释放
s.dao.TaskRelease(context.Background(), time.Now().Add(-10*time.Minute))
// 检索过期登陆用户,进行踢出
s.checkKickOut(context.Background())
time.Sleep(10 * time.Minute)
s.syncReport(context.Background())
s.taskClear()
}
}
func (s *Service) checkKickOut(c context.Context) {
s.ccMux.RLock()
defer s.ccMux.RUnlock()
for bizfwid, uidm := range s.consumerCache {
for uid := range uidm {
pos := strings.Index(bizfwid, "-")
bizid, _ := strconv.Atoi(bizfwid[:pos])
flowid, _ := strconv.Atoi(bizfwid[pos+1:])
if on, err := s.dao.IsConsumerOn(c, bizid, flowid, uid); err == nil && !on {
delete(s.consumerCache[bizfwid], uid)
s.KickOut(c, int64(bizid), int64(flowid), uid)
}
}
}
}
// KickOut 踢出过期用户并释放任务
func (s *Service) KickOut(c context.Context, bizid, flowid, uid int64) {
// 1. 踢出用户
s.dao.KickOutConsumer(c, int64(bizid), int64(flowid), uid)
s.sendTaskLog(c, &model.Task{BusinessID: bizid, FlowID: flowid}, model.LogTypeTaskConsumer, "kickout", uid, "")
// 2. 释放任务
s.dao.ReleaseByConsumer(c, bizid, flowid, uid)
}
func (s *Service) taskClear() {
mt := time.Now().Add(-3 * 24 * time.Hour)
for {
rows, err := s.dao.TaskClear(context.Background(), mt, 1000)
if err != nil || rows == 0 {
break
}
time.Sleep(time.Second)
}
}

View File

@@ -0,0 +1,49 @@
package service
import (
"context"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
"go-common/library/queue/databus/report"
)
// send to log service
func (s *Service) sendTaskLog(c context.Context, task *model.Task, tp int, action string, uid int64, uname string) (err error) {
logData := &report.ManagerInfo{
UID: uid,
Uname: uname,
Business: model.LogBusinessTask,
Type: tp,
Oid: task.ID,
Action: action,
Ctime: time.Now(),
Index: []interface{}{task.BusinessID, task.FlowID, task.State},
Content: map[string]interface{}{
"task": task,
},
}
err = report.Manager(logData)
log.Info("sendTaskLog logData(%+v) errmsg(%v)", logData, err)
return
}
func (s *Service) sendWeightLog(c context.Context, task *model.Task, wl *model.WeightLog) (err error) {
logData := &report.ManagerInfo{
UID: 399,
Uname: "aegis-job",
Business: model.LogBusinessTask,
Type: model.LogTYpeTaskWeight,
Oid: task.ID,
Action: "weight",
Ctime: time.Now(),
Index: []interface{}{task.BusinessID, task.FlowID, task.State},
Content: map[string]interface{}{
"weightlog": wl,
},
}
err = report.Manager(logData)
log.Info("sendWeightLog logData(%+v) errmsg(%v)", logData, err)
return
}

View File

@@ -0,0 +1,362 @@
package service
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
)
// WeightManager weight manager
type WeightManager struct {
s *Service
businessID, flowID int64
toplen, batchlen int64
minute int64
// cache
topweightList []*model.WeightItem
// channel
redisWeightList chan *model.WeightItem
dbWeightList chan *model.WeightItem
asignList chan *model.Task
//dbstartSig, dbstopSig chan struct{}
redisFinish chan struct{}
//closeChan chan struct{}
close bool
}
var _defaultopt = &model.WeightOPT{
TopListLen: 1000,
BatchListLen: 1000,
RedisListLen: 10000,
DbListLen: 2000,
AssignLen: 100,
Minute: 3,
}
// NewWeightManager new
func NewWeightManager(s *Service, opt *model.WeightOPT, key string) (wm *WeightManager) {
if opt == nil {
opt = _defaultopt
} else {
if opt.TopListLen <= 0 {
opt.TopListLen = _defaultopt.TopListLen
}
if opt.BatchListLen <= 0 {
opt.BatchListLen = _defaultopt.BatchListLen
}
if opt.RedisListLen <= 0 {
opt.RedisListLen = _defaultopt.RedisListLen
}
if opt.DbListLen <= 0 {
opt.DbListLen = _defaultopt.DbListLen
}
if opt.AssignLen <= 0 {
opt.AssignLen = _defaultopt.AssignLen
}
if opt.Minute <= 0 {
opt.Minute = _defaultopt.Minute
}
}
if len(key) > 0 {
bizid, flowid := parseKey(key)
opt.BusinessID = int64(bizid)
opt.FlowID = int64(flowid)
}
wm = &WeightManager{
s: s,
businessID: opt.BusinessID,
flowID: opt.FlowID,
toplen: opt.TopListLen,
batchlen: opt.BatchListLen,
minute: opt.Minute,
redisWeightList: make(chan *model.WeightItem, opt.RedisListLen),
dbWeightList: make(chan *model.WeightItem, opt.DbListLen),
asignList: make(chan *model.Task, opt.AssignLen),
redisFinish: make(chan struct{}),
}
go wm.weightProc()
go wm.weightWatcher()
log.Info("启动权重计算器 bizid(%d) flowid(%d) opt(%+v)", wm.businessID, wm.flowID, opt)
return
}
func parseKey(key string) (bizid, flowid int) {
pos := strings.Index(key, "-")
bizids := key[:pos]
flowids := key[pos+1:]
bizid, _ = strconv.Atoi(bizids)
flowid, _ = strconv.Atoi(flowids)
return
}
func (s *Service) startWeightManager() {
// 1.当前的所有业务线,需要计算权重的先枚举出来
s.wmHash = make(map[string]*WeightManager)
for key := range s.newactiveBizFlow {
bizid, _ := parseKey(key)
s.wmHash[key] = NewWeightManager(s, s.getWeightOpt(bizid), key)
}
}
func (w *WeightManager) weightProc() {
for !w.close {
if err := w.weightRedisProcess(); err != nil {
w.weightDBProcess()
}
time.Sleep(time.Duration(w.minute) * time.Minute)
}
}
func (w *WeightManager) weightWatcher() {
for !w.close {
select {
case <-w.redisFinish: //取出权重最大的一批,更新到数据库
log.Info("redisFinish(%d-%d:%d)", w.businessID, w.flowID, w.toplen)
w.handleRedisFinish(context.Background())
case wi := <-w.redisWeightList:
w.handleRedisWeightList(context.Background(), wi)
case wi := <-w.dbWeightList:
w.handleDBWeightList(context.Background(), wi)
case task := <-w.asignList:
w.handleAssign(context.Background(), task)
}
}
}
func (w *WeightManager) weightRedisProcess() (err error) {
var c = context.Background()
if err = w.s.dao.CreateUnionSet(c, w.businessID, w.flowID); err != nil {
return
}
var (
start = int64(0)
stop = w.batchlen
)
for {
wis, err := w.s.dao.RangeUinonSet(c, w.businessID, w.flowID, start, stop)
if err != nil {
return err
}
log.Info("weightRedisProcess length(%d) start(%d) stop(%d)", len(wis), start, stop)
start += w.batchlen
stop += w.batchlen
if len(wis) == 0 {
break
}
for _, wi := range wis {
if w.caculateWeight(c, wi) {
log.Warn("weightRedisProcess 任务未找到 wi(%+v)", wi)
continue
}
w.s.dao.SetWeight(c, w.businessID, w.flowID, wi.ID, wi.Weight)
}
time.Sleep(time.Second)
}
w.redisFinish <- struct{}{}
w.s.dao.DeleteUinonSet(c, w.businessID, w.flowID)
return nil
}
func (w *WeightManager) caculateWeight(c context.Context, wi *model.WeightItem) (skip bool) {
task, err := w.s.dao.GetTask(c, wi.ID)
if err != nil {
return true
}
w.reAssign(c, task)
wm := int64(time.Since(task.Ctime.Time()).Minutes())
wl := &model.WeightLog{
UPtime: time.Now().Format("2006-01-02 15:04:05"),
Mid: task.MID,
Fans: task.Fans,
Group: task.Group,
WaitTime: model.WaitTime(task.Ctime.Time()),
}
var wtRange, wtEqual int64
wci, ewc := w.s.getWeightCache(c, task.BusinessID, task.FlowID)
if wci != nil {
wtRange = w.rangeCaculate(c, wci, task, wm, wl)
}
if ewc != nil {
wtEqual = w.equalCaculate(c, ewc, task, wm, wl)
}
wi.Weight = wtRange + wtEqual
wl.Weight = wi.Weight
w.s.sendWeightLog(c, task, wl)
return
}
func (w *WeightManager) rangeCaculate(c context.Context, wci map[string]*model.RangeWeightConfig, task *model.Task, wt int64, wl *model.WeightLog) (weight int64) {
var wtWeight, fanWeight, groupWeight int64
if cfg, ok := wci["waittime"]; ok {
if wtlen := len(cfg.Range); wtlen > 0 { // 等待时长,要把之前等级的权重加上去
for i := wtlen - 1; i >= 0; i-- {
if wt >= cfg.Range[i].Threshold { // 命中配置
wtWeight += cfg.Range[i].Weight * ((wt - cfg.Range[i].Threshold) / w.minute)
// 计算0 到 (i-1) 累计权重
for j := 0; j <= i-1; j++ {
wtWeight += cfg.Range[j].Weight * ((cfg.Range[j+1].Threshold - cfg.Range[j].Threshold) / w.minute)
}
break
}
}
}
}
if cfg, ok := wci["fans"]; ok {
if fanLen := len(cfg.Range); fanLen > 0 {
for i := fanLen - 1; i >= 0; i-- {
if task.Fans >= cfg.Range[i].Threshold {
fanWeight = cfg.Range[i].Weight * (wt / w.minute)
break
}
}
}
}
if cfg, ok := wci["group"]; ok {
if len(cfg.Range) > 0 {
for _, item := range cfg.Range {
if strings.Contains(","+task.Group+",", fmt.Sprintf(",%d,", item.Threshold)) {
groupWeight = item.Weight * (wt / w.minute)
}
}
}
}
weight = wtWeight + fanWeight + groupWeight
wl.WaitWeight = wtWeight
wl.FansWeight = fanWeight
wl.GroupWeight = groupWeight
return
}
func (w *WeightManager) equalCaculate(c context.Context, ewc []*model.EqualWeightConfig, task *model.Task, wt int64, wl *model.WeightLog) (weight int64) {
var midweight, taskweight int64
for _, item := range ewc {
if item.Name == "mid" {
if strings.Contains(","+item.IDs+",", fmt.Sprintf(",%d,", task.MID)) {
if item.Type == model.WeightTypeCycle {
midweight += item.Weight * (wt / w.minute)
} else {
midweight += item.Weight
}
log.Info("equalCaculate task(%+v) hit (%+v)", task, item)
wl.ConfigItems = append(wl.ConfigItems, &model.ConfigItem{
Name: item.Name,
Desc: item.Description,
Uname: item.Uname,
})
}
}
if item.Name == "taskid" || item.Name == "task_id" {
if strings.Contains(","+item.IDs+",", fmt.Sprintf(",%d,", task.ID)) {
if item.Type == model.WeightTypeCycle {
taskweight += item.Weight * (wt / w.minute)
} else {
taskweight += item.Weight
}
log.Info("equalCaculate task(%+v) hit (%+v)", task, item)
wl.ConfigItems = append(wl.ConfigItems, &model.ConfigItem{
Name: item.Name,
Desc: item.Description,
Uname: item.Uname,
})
}
}
}
weight = midweight + taskweight
wl.EqualWeight = weight
return
}
func (w *WeightManager) reAssign(c context.Context, task *model.Task) {
if task.UID == 0 {
select {
case w.asignList <- task:
log.Info("指派判断 reAssign%+v", task)
case <-time.NewTimer(10 * time.Millisecond).C:
log.Warn("chan asignList full,len:%d", len(w.dbWeightList))
}
}
}
func (w *WeightManager) weightDBProcess() (err error) {
// TODO 只用db更新权重的策略
return nil
}
func (w *WeightManager) handleAssign(c context.Context, task *model.Task) (err error) {
if w.s.setAssign(c, task) {
if rows, err := w.s.dao.AssignTask(c, task); err == nil && rows == 1 {
w.s.dao.SetTask(c, task)
}
}
return
}
func (w *WeightManager) handleRedisWeightList(c context.Context, wi *model.WeightItem) (err error) {
return w.s.dao.SetWeight(c, w.businessID, w.flowID, wi.ID, wi.Weight)
}
func (w *WeightManager) handleDBWeightList(c context.Context, wi *model.WeightItem) (rows int64, err error) {
return w.s.dao.SetWeightDB(c, wi.ID, wi.Weight)
}
func (w *WeightManager) handleRedisFinish(c context.Context) (err error) {
log.Info("handleRedisFinish")
wis, err := w.s.dao.TopWeights(c, w.businessID, w.flowID, w.toplen)
if err != nil {
return
}
tempMap := make(map[int64]struct{})
for _, wi := range wis {
log.Info("handleRedisFinish:(%+v)", wi)
w.addToDBList(wi)
tempMap[wi.ID] = struct{}{}
}
for _, wi := range w.topweightList {
if _, ok := tempMap[wi.ID]; !ok {
weight, err := w.s.dao.GetWeight(c, w.businessID, w.flowID, wi.ID)
if err != nil {
continue
}
wi.Weight = weight
w.addToDBList(wi)
}
}
w.topweightList = wis
log.Info("handleRedisFinish:topweightList(%d)", len(wis))
return
}
func (w *WeightManager) addToDBList(wi *model.WeightItem) {
select {
case w.dbWeightList <- wi:
log.Info("addToDBList (%+v)", wi)
case <-time.NewTimer(10 * time.Millisecond).C:
log.Warn("chan dbWeightList full,len:%d", len(w.dbWeightList))
}
}