Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

21
app/job/main/aegis/BUILD Normal file
View File

@@ -0,0 +1,21 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/aegis/cmd:all-srcs",
"//app/job/main/aegis/conf:all-srcs",
"//app/job/main/aegis/dao:all-srcs",
"//app/job/main/aegis/model:all-srcs",
"//app/job/main/aegis/server/http:all-srcs",
"//app/job/main/aegis/service:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,47 @@
# v1.1.2
1. 同步消费稿件databus
# v1.1.1
1. 去除不必要的错误日志
2. 控制发监控邮件失败时的频率
# v1.1.0
1. 监控平台迁移+优化
# v1.0.11
1. 使用orm避免死锁
# v1.0.10
1. 修复使用databusgroup导致重复任务
# v1.0.9
1. 去掉不必要的报错日志
# v1.0.8
1. 分开资源和任务的databus
# v1.0.7
1. 使用up-service grpc
# v1.0.6
1. 添加databus导入资源
# v1.0.5
1. 添加任务进出审核总报表
# v1.0.4
1. 已完成任务只保留3天免得堆积多了影响task表效率
2. hash tag改为 businessid-flowid
3. 创建任务前先判断资源状态避免资源在任务前被提交或cancel
# v1.0.3
1. 新增权重计算器配置
# v1.0.2
1. 修复空资源导致panic
# v1.0.1
1. 使用hash tag集群下权重不生效问题
# v1.0.0
1. 上线功能xxx

View File

@@ -0,0 +1,14 @@
# Owner
shencen
wangzhe01
# Author
shencen
chenxuefeng
chenxi01
# Reviewer
shencen
chenxuefeng
chenxi01

17
app/job/main/aegis/OWNERS Normal file
View File

@@ -0,0 +1,17 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- chenxi01
- chenxuefeng
- shencen
- wangzhe01
labels:
- job
- job/main/aegis
- main
options:
no_parent_owners: true
reviewers:
- chenxi01
- chenxuefeng
- shencen

View File

@@ -0,0 +1,12 @@
# aegis-job
# 项目简介
1.
# 编译环境
# 依赖包
# 编译执行

View File

@@ -0,0 +1 @@
# HTTP API文档

View File

@@ -0,0 +1,45 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "cmd",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
data = ["aegis-job.toml"],
importpath = "go-common/app/job/main/aegis/cmd",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/server/http:go_default_library",
"//app/job/main/aegis/service:go_default_library",
"//library/ecode/tip:go_default_library",
"//library/log:go_default_library",
"//library/net/trace:go_default_library",
"//library/queue/databus/report:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,185 @@
debug = true
[app]
key = "c1a1cb2d89c33794"
secret = "dda47eeca111e03e6845017505baea13"
[orm]
dsn = "test:test@tcp(127.0.0.1:3306)/bilibili_aegis?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 5
idle = 5
idleTimeout = "4h"
[mysql]
[mysql.fast]
addr = "calibur"
dsn = "test:test@tcp(127.0.0.1:3306)/bilibili_aegis?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8,utf8mb4"
readDSN = ["test:test@tcp(127.0.0.1:3306)/bilibili_aegis?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8,utf8mb4"]
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "100ms"
execTimeout = "100ms"
tranTimeout = "200ms"
[mysql.slow]
addr = "calibur"
dsn = "test:test@tcp(127.0.0.1:3306)/bilibili_aegis?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8,utf8mb4"
readDSN = ["test:test@tcp(127.0.0.1:3306)/bilibili_aegis?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8,utf8mb4"]
active = 20
idle = 10
idleTimeout ="4h"
queryTimeout = "200ms"
execTimeout = "200ms"
tranTimeout = "400ms"
[redis]
name = "aegis-admin"
proto = "tcp"
addr = "127.0.0.1:6379"
idle = 10
active = 10
dialTimeout = "100ms"
readTimeout = "100s"
writeTimeout = "100ms"
idleTimeout = "10s"
expire = "1m"
[memcache]
name = "aegis-admin"
proto = "tcp"
addr = "127.0.0.1:11211"
active = 50
idle = 10
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "10s"
expire = "24h"
[rpc]
[rpc.rel]
[grpc]
[grpc.up]
[host]
videoup = "http://uat-archive.api.bilibili.co"
[databus]
[databus.binLogSub]
key = "4c76cbb7a985ac90"
secret = "43bb22ce34a6b13e7814f09cb8116522"
group = "AegisBinlog-MainArchive-S"
topic = "AegisBinlog-T"
action = "sub"
buffer = 2048
name = "aegis-job/databus"
proto = "tcp"
addr = "172.18.33.50:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.taskSub]
key = "4c76cbb7a985ac90"
secret = "43bb22ce34a6b13e7814f09cb8116522"
group = "Aegis-MainArchive-S"
topic = "Aegis-T"
action = "sub"
buffer = 128
name = "databus"
proto = "tcp"
addr = "172.18.33.50:6205"
active = 1
idle = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.resourceSub]
key = "4c76cbb7a985ac90"
secret = "43bb22ce34a6b13e7814f09cb8116522"
group = "AegisResource-MainArchive-S"
topic = "AegisResource-T"
action = "sub"
buffer = 128
name = "databus"
proto = "tcp"
addr = "172.18.33.50:6205"
active = 1
idle = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databus.archiveSub]
key = "4c76cbb7a985ac90"
secret = "43bb22ce34a6b13e7814f09cb8116522"
group = "Archive-MainArchive-Aegis-S"
topic = "Archive-T"
action = "sub"
buffer = 128
name = "databus"
proto = "tcp"
addr = "172.18.33.50:6205"
active = 1
idle = 1
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[databusutil]
[databusutil.task]
size = 100
chan = 1024
num = 10
ticker="1s"
[databusutil.resource]
size = 100
chan = 1024
num = 10
ticker="1s"
[http]
[http.fast]
key = "b8f239ca38a53308"
secret = "5460ef72fe13c10dfb53442b9111427e"
dial = "40ms"
timeout = "400ms"
keepAlive = "60s"
[http.fast.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[http.slow]
key = "b8f239ca38a53308"
secret = "5460ef72fe13c10dfb53442b9111427e"
dial = "200ms"
timeout = "1000ms"
keepAlive = "60s"
[http.slow.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[bizcfg]
[[bizcfg.weightopt]]
businessid=1
minute = 1
[[bizcfg.weightopt]]
businessid=2
minute = 3
[mail]
host = "smtp.exmail.qq.com"
port = 465
username = "manager@bilibili.com"
password = "a1654OQigEsyHz2I"

View File

@@ -0,0 +1,53 @@
package main
import (
"flag"
"os"
"os/signal"
"syscall"
"time"
"go-common/app/job/main/aegis/conf"
"go-common/app/job/main/aegis/server/http"
"go-common/app/job/main/aegis/service"
ecode "go-common/library/ecode/tip"
"go-common/library/log"
"go-common/library/net/trace"
"go-common/library/queue/databus/report"
)
func main() {
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
log.Init(conf.Conf.Log)
defer log.Close()
log.Info("aegis-job start")
trace.Init(conf.Conf.Tracer)
defer trace.Close()
if !conf.Conf.Debug {
ecode.Init(conf.Conf.Ecode)
}
// report init
report.InitManager(nil)
svc := service.New(conf.Conf)
http.Init(svc, conf.Conf)
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
for {
s := <-c
log.Info("get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
svc.Close()
log.Info("aegis-job exit")
time.Sleep(time.Second)
return
case syscall.SIGHUP:
default:
return
}
}
}

View File

@@ -0,0 +1,46 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["conf.go"],
importpath = "go-common/app/job/main/aegis/conf",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/aegis/model:go_default_library",
"//library/cache/memcache:go_default_library",
"//library/cache/redis:go_default_library",
"//library/conf:go_default_library",
"//library/database/orm:go_default_library",
"//library/database/sql:go_default_library",
"//library/ecode/tip:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/http/blademaster/middleware/verify:go_default_library",
"//library/net/rpc:go_default_library",
"//library/net/rpc/warden:go_default_library",
"//library/net/trace:go_default_library",
"//library/queue/databus:go_default_library",
"//library/queue/databus/databusutil:go_default_library",
"//vendor/github.com/BurntSushi/toml:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,169 @@
package conf
import (
"errors"
"flag"
"go-common/app/job/main/aegis/model"
"go-common/library/cache/memcache"
"go-common/library/cache/redis"
"go-common/library/conf"
"go-common/library/database/orm"
"go-common/library/database/sql"
ecode "go-common/library/ecode/tip"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/http/blademaster/middleware/verify"
"go-common/library/net/rpc"
"go-common/library/net/rpc/warden"
"go-common/library/net/trace"
"go-common/library/queue/databus"
"go-common/library/queue/databus/databusutil"
"github.com/BurntSushi/toml"
)
var (
confPath string
client *conf.Client
// Conf config
Conf = &Config{}
)
// Config .
type Config struct {
Debug bool
Log *log.Config
BM *bm.ServerConfig
Verify *verify.Config
Tracer *trace.Config
Redis *redis.Config
Memcache *memcache.Config
MySQL *MySQL
Ecode *ecode.Config
// ORM
ORM *orm.Config
// DataBus databus
DataBus *DataBus
// Databusutil
Databusutil *Databusutil
// RPC
RPC *RPC
//GRPC
GRPC *GRPC
// BizConfiger
BizCfg BizConfiger
HTTP *HTTP
Host *Host
// mail
Mail *Mail
}
//MySQL .
type MySQL struct {
Slow *sql.Config
Fast *sql.Config
}
//Host .
type Host struct {
API string
Videoup string
}
//HTTP .
type HTTP struct {
Fast *bm.ClientConfig
Slow *bm.ClientConfig
}
//BizConfiger .
type BizConfiger struct {
WeightOpt []*model.WeightOPT
}
//RPC .
type RPC struct {
Rel *rpc.ClientConfig
Up *rpc.ClientConfig
}
//GRPC .
type GRPC struct {
Up *warden.ClientConfig
Acc *warden.ClientConfig
}
// DataBus databus infomation
type DataBus struct {
BinLogSub *databus.Config
ResourceSub *databus.Config
TaskSub *databus.Config
ArchiveSub *databus.Config
}
//Mail 邮件配置
type Mail struct {
Host string
Port int
Username, Password string
}
// Databusutil databus group
type Databusutil struct {
Task *databusutil.Config
Resource *databusutil.Config
}
func init() {
flag.StringVar(&confPath, "conf", "", "default config path")
}
// Init init conf
func Init() error {
if confPath != "" {
return local()
}
return remote()
}
func local() (err error) {
_, err = toml.DecodeFile(confPath, &Conf)
return
}
func remote() (err error) {
if client, err = conf.New(); err != nil {
return
}
if err = load(); err != nil {
return
}
go func() {
for range client.Event() {
log.Info("config reload")
if load() != nil {
log.Error("config reload error (%v)", err)
}
}
}()
return
}
func load() (err error) {
var (
s string
ok bool
tmpConf *Config
)
if s, ok = client.Toml2(); !ok {
return errors.New("load config center error")
}
if _, err = toml.Decode(s, &tmpConf); err != nil {
return errors.New("could not decode config")
}
*Conf = *tmpConf
return
}

View File

@@ -0,0 +1,80 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"dao.go",
"task_db.go",
"task_http.go",
"task_mc.go",
"task_orm.go",
"task_redis.go",
"task_report.go",
"task_rpc.go",
],
importpath = "go-common/app/job/main/aegis/dao",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/model:go_default_library",
"//app/service/main/relation/model:go_default_library",
"//app/service/main/relation/rpc/client:go_default_library",
"//app/service/main/up/api/v1:go_default_library",
"//library/cache/memcache:go_default_library",
"//library/cache/redis:go_default_library",
"//library/database/orm:go_default_library",
"//library/database/sql:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//vendor/github.com/jinzhu/gorm:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/aegis/dao/email:all-srcs",
"//app/job/main/aegis/dao/monitor:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"dao_test.go",
"task_db_test.go",
"task_mc_test.go",
"task_orm_test.go",
"task_redis_test.go",
"task_rpc_test.go",
"task_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/model:go_default_library",
"//library/sync/errgroup:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)

View File

@@ -0,0 +1,72 @@
package dao
import (
"context"
"go-common/app/job/main/aegis/conf"
relrpc "go-common/app/service/main/relation/rpc/client"
uprpc "go-common/app/service/main/up/api/v1"
"go-common/library/cache/memcache"
"go-common/library/cache/redis"
"go-common/library/database/orm"
xsql "go-common/library/database/sql"
bm "go-common/library/net/http/blademaster"
"github.com/jinzhu/gorm"
)
// Dao dao
type Dao struct {
c *conf.Config
mc *memcache.Pool
redis *redis.Pool
slowdb *xsql.DB
fastdb *xsql.DB
orm *gorm.DB
//gorpc
relRPC *relrpc.Service
//grpc
upRPC uprpc.UpClient
httpFast *bm.Client
}
// New init mysql db
func New(c *conf.Config) (dao *Dao) {
dao = &Dao{
c: c,
mc: memcache.NewPool(c.Memcache),
redis: redis.NewPool(c.Redis),
fastdb: xsql.NewMySQL(c.MySQL.Fast),
slowdb: xsql.NewMySQL(c.MySQL.Slow),
orm: orm.NewMySQL(c.ORM),
httpFast: bm.NewClient(c.HTTP.Fast),
}
// rpc
if !c.Debug {
dao.relRPC = relrpc.New(c.RPC.Rel)
var err error
if dao.upRPC, err = uprpc.NewClient(c.GRPC.Up); err != nil {
panic(err)
}
}
dao.orm.LogMode(true)
return
}
// Close close the resource.
func (d *Dao) Close() {
d.mc.Close()
d.redis.Close()
d.slowdb.Close()
d.fastdb.Close()
d.orm.Close()
}
// Ping dao ping
func (d *Dao) Ping(c context.Context) error {
return d.fastdb.Ping(c)
}

View File

@@ -0,0 +1,40 @@
package dao
import (
"flag"
"os"
"testing"
"time"
"go-common/app/job/main/aegis/conf"
"go-common/app/job/main/aegis/model"
)
var (
d *Dao
task1 = &model.Task{ID: 1, BusinessID: 1, FlowID: 1, UID: 1, Weight: 3, Ctime: model.IntTime(time.Now().Unix())}
task2 = &model.Task{ID: 2, BusinessID: 1, FlowID: 1, UID: 1, Weight: 2, Ctime: model.IntTime(time.Now().Unix())}
task3 = &model.Task{ID: 3, BusinessID: 1, FlowID: 1, UID: 1, Weight: 1, Ctime: model.IntTime(time.Now().Unix())}
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.archive.aegis-job")
flag.Set("conf_token", "aed3cc21ca345ffc284c6036da32352b")
flag.Set("tree_id", "61819")
flag.Set("conf_version", "1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/aegis-job.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}

View File

@@ -0,0 +1,47 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["dao.go"],
importpath = "go-common/app/job/main/aegis/dao/email",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/model:go_default_library",
"//library/cache/redis:go_default_library",
"//library/log:go_default_library",
"//vendor/gopkg.in/gomail.v2:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["dao_test.go"],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)

View File

@@ -0,0 +1,98 @@
package email
import (
"context"
"encoding/json"
"go-common/app/job/main/aegis/conf"
"go-common/app/job/main/aegis/model"
"go-common/library/cache/redis"
"go-common/library/log"
"gopkg.in/gomail.v2"
"time"
)
type Dao struct {
c *conf.Config
redis *redis.Pool
email *gomail.Dialer
}
const (
// MoniEmailKey 监控邮件队列key
MoniEmailKey = "monitor_stats_email"
)
// New is new redis dao.
func New(c *conf.Config) (d *Dao) {
d = &Dao{
c: c,
email: gomail.NewDialer(c.Mail.Host, c.Mail.Port, c.Mail.Username, c.Mail.Password),
redis: redis.NewPool(c.Redis),
}
return d
}
// MonitorEmailAsync 异步发送监控邮件
func (d *Dao) MonitorEmailAsync(c context.Context, members []string, title, content string) (err error) {
var (
conn = d.redis.Get(c)
bs []byte
)
defer conn.Close()
temp := &model.MoniTemp{
From: d.c.Mail.Username,
Members: members,
Subject: title,
Body: content,
}
if bs, err = json.Marshal(temp); err != nil {
log.Error("d.MonitorEmailAsync() json.Marshal(%+v) error(%v) key(%s)", temp, err, MoniEmailKey)
return
}
if _, err = conn.Do("RPUSH", MoniEmailKey, bs); err != nil {
log.Error("d.MonitorEmailAsync() conn.Do(RPUSH, %s, %s) error(%v)", MoniEmailKey, bs, err)
}
return
}
// MonitorEmailProc 发送监控邮件
func (d *Dao) MonitorEmailProc() (err error) {
var (
bs []byte
temp *model.MoniTemp
headers map[string][]string
)
headers = make(map[string][]string)
bs, err = d.PopRedis(context.TODO(), MoniEmailKey)
if err != nil || bs == nil {
log.Warn("d.MonitorEmailProc() warn:%v content:%s", err, bs)
time.Sleep(5 * time.Second)
return
}
if err = json.Unmarshal(bs, &temp); err != nil {
log.Error("d.MonitorEmailProc() json.unmarshal error(%v) content(%s)", err, bs)
return
}
msg := gomail.NewMessage()
headers["From"] = []string{d.c.Mail.Username}
headers["To"] = temp.Members
headers["Subject"] = []string{temp.Subject}
msg.SetHeaders(headers)
msg.SetBody("text/html", temp.Body)
if err = d.email.DialAndSend(msg); err != nil {
log.Error("d.email.DialAndSend(%+v) error:%v", msg, err)
return
}
return
}
// PopRedis lpop fail item from redis
func (d *Dao) PopRedis(c context.Context, key string) (bs []byte, err error) {
var conn = d.redis.Get(c)
defer conn.Close()
if bs, err = redis.Bytes(conn.Do("LPOP", key)); err != nil && err != redis.ErrNil {
log.Error("d.PopRedis() redis.Bytes(conn.Do(LPOP, %s)) error(%v)", key, err)
}
return
}

View File

@@ -0,0 +1,60 @@
package email
import (
"context"
"flag"
"github.com/smartystreets/goconvey/convey"
"go-common/app/job/main/aegis/conf"
"os"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.archive.aegis-job")
flag.Set("conf_token", "aed3cc21ca345ffc284c6036da32352b")
flag.Set("tree_id", "61819")
flag.Set("conf_version", "1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../../cmd/aegis-job.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
func TestDao_MonitorEmailAsync(t *testing.T) {
convey.Convey("MonitorEmailAsync", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.MonitorEmailAsync(c, []string{"abc@bilibili.com"}, "测试标题", "测试内容<a href=\"https://www.bilibili.com\">link</a>")
ctx.Convey("Then err should be nil.tasks,lastid should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDao_MonitorEmailProc(t *testing.T) {
convey.Convey("MonitorEmailProc", t, func(ctx convey.C) {
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.MonitorEmailProc()
ctx.Convey("Then err should be nil.tasks,lastid should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,62 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"api.go",
"dao.go",
"db.go",
"redis.go",
],
importpath = "go-common/app/job/main/aegis/dao/monitor",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/model/monitor:go_default_library",
"//library/cache/redis:go_default_library",
"//library/database/sql:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"api_test.go",
"dao_test.go",
"db_test.go",
"redis_test.go",
],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/model/monitor:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
"//vendor/gopkg.in/h2non/gock.v1:go_default_library",
],
)

View File

@@ -0,0 +1,40 @@
package monitor
import (
"context"
"go-common/app/job/main/aegis/model/monitor"
"go-common/library/ecode"
"go-common/library/log"
"net/url"
"strconv"
)
const (
_arcAdditURL = "/videoup/archive/addit"
)
// ArchiveAttr 获取稿件都附加属性
func (d *Dao) ArchiveAttr(c context.Context, aid int64) (addit *monitor.ArchiveAddit, err error) {
params := url.Values{}
params.Set("aid", strconv.FormatInt(aid, 10))
var res struct {
Code int `json:"code"`
Data *monitor.ArchiveAddit `json:"data"`
}
if err = d.http.Get(c, d.URLArcAddit, "", params, &res); err != nil {
log.Error("d.ArchiveAttr(%s) error(%v)", d.URLArcAddit+"?"+params.Encode(), err)
return
}
if res.Code != 0 {
err = ecode.Int(res.Code)
log.Error("d.ArchiveAttr(%s) Code=(%d)", d.URLArcAddit+"?"+params.Encode(), res.Code)
return
}
if res.Data == nil {
err = ecode.NothingFound
log.Warn("d.ArchiveAttr(%s) Code=(%d) data nil", d.URLArcAddit+"?"+params.Encode(), res.Code)
return
}
addit = res.Data
return
}

View File

@@ -0,0 +1,26 @@
package monitor
import (
"context"
"gopkg.in/h2non/gock.v1"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestMonitorArchiveAttr(t *testing.T) {
convey.Convey("ArchiveAttr", t, func(convCtx convey.C) {
var (
c = context.Background()
aid = int64(1212)
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
defer gock.OffAll()
httpMock("GET", d.URLArcAddit).Reply(200).JSON(`{"code":0,"data":{}}`)
_, err := d.ArchiveAttr(c, aid)
convCtx.Convey("Then err should be nil.addit should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}

View File

@@ -0,0 +1,41 @@
package monitor
import (
"context"
"go-common/app/job/main/aegis/conf"
"go-common/library/cache/redis"
xsql "go-common/library/database/sql"
bm "go-common/library/net/http/blademaster"
)
type Dao struct {
c *conf.Config
redis *redis.Pool
db *xsql.DB
http *bm.Client
URLArcAddit string
}
// New init mysql db
func New(c *conf.Config) (dao *Dao) {
dao = &Dao{
c: c,
redis: redis.NewPool(c.Redis),
db: xsql.NewMySQL(c.MySQL.Fast),
http: bm.NewClient(c.HTTP.Fast),
URLArcAddit: c.Host.Videoup + _arcAdditURL,
}
return
}
// Close close the resource.
func (d *Dao) Close() {
d.redis.Close()
d.db.Close()
}
// Ping dao ping
func (d *Dao) Ping(c context.Context) error {
// TODO: if you need use mc,redis, please add
return d.db.Ping(c)
}

View File

@@ -0,0 +1,43 @@
package monitor
import (
"flag"
"go-common/app/job/main/aegis/conf"
"gopkg.in/h2non/gock.v1"
"os"
"strings"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.archive.aegis-job")
flag.Set("conf_token", "aed3cc21ca345ffc284c6036da32352b")
flag.Set("tree_id", "61819")
flag.Set("conf_version", "1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../../cmd/aegis-job.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
func httpMock(method, url string) *gock.Request {
r := gock.New(url)
r.Method = strings.ToUpper(method)
d.http.SetTransport(gock.DefaultTransport)
return r
}

View File

@@ -0,0 +1,74 @@
package monitor
import (
"context"
"encoding/json"
"github.com/pkg/errors"
"go-common/app/job/main/aegis/model/monitor"
xsql "go-common/library/database/sql"
"go-common/library/log"
"time"
)
const (
_rulesByBid = "SELECT id,type,bid,name,state,stime,etime,rule,uid,ctime,mtime FROM monitor_rule WHERE bid = ? AND state = 1 AND stime < ? AND etime > ?"
_allValidRules = "SELECT id,type,bid,name,state,stime,etime,rule,uid,ctime,mtime FROM monitor_rule WHERE state = 1 AND stime < ? AND etime > ?"
)
// RulesByBid 获取某业务的监控
func (d *Dao) RulesByBid(c context.Context, bid int64) (rules []*monitor.Rule, err error) {
var (
rows *xsql.Rows
now = time.Now()
)
if rows, err = d.db.Query(c, _rulesByBid, bid, now, now); err != nil {
log.Error("d.db.Exec error(%v)", errors.WithStack(err))
return
}
defer rows.Close()
for rows.Next() {
rule := &monitor.Rule{}
var confStr string
if err = rows.Scan(&rule.ID, &rule.Type, &rule.BID, &rule.Name, &rule.State, &rule.STime, &rule.ETime, &confStr, &rule.UID, &rule.CTime, &rule.MTime); err != nil {
log.Error("rows.Scan error(%v)", err)
return
}
conf := &monitor.RuleConf{}
if err = json.Unmarshal([]byte(confStr), conf); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", confStr, err)
return
}
rule.RuleConf = conf
rules = append(rules, rule)
}
return
}
// ValidRules 获取有效的监控
func (d *Dao) ValidRules(c context.Context) (rules []*monitor.Rule, err error) {
var (
rows *xsql.Rows
now = time.Now()
)
if rows, err = d.db.Query(c, _allValidRules, now, now); err != nil {
log.Error("d.db.Exec error(%v)", errors.WithStack(err))
return
}
defer rows.Close()
for rows.Next() {
rule := &monitor.Rule{}
var confStr string
if err = rows.Scan(&rule.ID, &rule.Type, &rule.BID, &rule.Name, &rule.State, &rule.STime, &rule.ETime, &confStr, &rule.UID, &rule.CTime, &rule.MTime); err != nil {
log.Error("rows.Scan error(%v)", err)
return
}
conf := &monitor.RuleConf{}
if err = json.Unmarshal([]byte(confStr), conf); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", confStr, err)
return
}
rule.RuleConf = conf
rules = append(rules, rule)
}
return
}

View File

@@ -0,0 +1,37 @@
package monitor
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestMonitorRulesByBid(t *testing.T) {
convey.Convey("RulesByBid", t, func(convCtx convey.C) {
var (
c = context.Background()
bid = int64(2)
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
_, err := d.RulesByBid(c, bid)
convCtx.Convey("Then err should be nil.rules should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestMonitorValidRules(t *testing.T) {
convey.Convey("ValidRules", t, func(convCtx convey.C) {
var (
c = context.Background()
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
_, err := d.ValidRules(c)
convCtx.Convey("Then err should be nil.rules should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}

View File

@@ -0,0 +1,204 @@
package monitor
import (
"context"
"encoding/json"
"fmt"
"go-common/app/job/main/aegis/model/monitor"
"go-common/library/cache/redis"
"go-common/library/log"
"strconv"
"time"
)
const (
// _maxAge Sorted
_maxAge = 604800 //7天
)
// AddToSet add monitor stats
func (d *Dao) AddToSet(c context.Context, keys []string, oid int64) (logs []string, err error) {
if len(keys) == 0 {
return
}
var (
conn = d.redis.Get(c)
now = time.Now().Unix()
)
defer conn.Close()
for _, key := range keys {
//先判断key是否存在存在则忽略
if v, _ := redis.Int(conn.Do("ZSCORE", key, oid)); v != 0 {
logs = append(logs, fmt.Sprintf("AddToSet() conn.Do(ZSCORE, %s, %d) member exists success", key, oid))
continue
}
if _, err = conn.Do("ZADD", key, now, oid); err != nil {
log.Error("conn.Do(ZADD, %s, %d, %d) error(%v)", key, now, oid, err)
logs = append(logs, fmt.Sprintf("AddToSet() conn.Do(ZADD, %s, %d, %d) error(%v)", key, now, oid, err))
} else {
logs = append(logs, fmt.Sprintf("AddToSet() conn.Do(ZADD, %s, %d, %d) success", key, now, oid))
}
if _, err = conn.Do("EXPIRE", key, _maxAge); err != nil {
log.Error("conn.Do(EXPIRE, %s, %d) error(%v)", key, _maxAge, err)
logs = append(logs, fmt.Sprintf("AddToSet() conn.Do(EXPIRE, %s, %d) error(%v)", key, _maxAge, err))
} else {
logs = append(logs, fmt.Sprintf("AddToSet() conn.Do(EXPIRE, %s, %d) success", key, _maxAge))
}
}
return
}
// RemFromSet remove monitor stats
func (d *Dao) RemFromSet(c context.Context, keys []string, oid int64) (logs []string, err error) {
if len(keys) == 0 {
return
}
var (
conn = d.redis.Get(c)
)
defer conn.Close()
for _, key := range keys {
if _, er := conn.Do("ZREM", key, oid); er != nil {
err = er
log.Error("conn.Do(ZREM, %s, %d) error(%v)", key, oid, err)
logs = append(logs, fmt.Sprintf("RemFromSet() conn.Do(ZREM, %s, %d) error(%v)", key, oid, err))
continue
}
logs = append(logs, fmt.Sprintf("RemFromSet() conn.Do(ZREM, %s, %d) success", key, oid))
}
return
}
// ClearExpireSet clear expire stats
func (d *Dao) ClearExpireSet(c context.Context, keys []string) (logs []string, err error) {
if len(keys) == 0 {
return
}
var (
conn = d.redis.Get(c)
now = time.Now().Unix()
min int64
max = now - _maxAge
)
defer conn.Close()
for _, key := range keys {
if _, er := conn.Do("ZREMRANGEBYSCORE", key, min, max); er != nil {
err = er
log.Error("conn.Do(ZREMRANGEBYSCORE, %s, %d, %d) error(%v)", key, min, max, err)
logs = append(logs, fmt.Sprintf("ClearExpireSet() key: %s min:%d max:%d error:%v", key, min, max, err))
continue
}
logs = append(logs, fmt.Sprintf("ClearExpireSet() key: %s min:%d max:%d success", key, min, max))
}
return
}
// AddToDelArc 添加稿件信息到
func (d *Dao) AddToDelArc(c context.Context, a *monitor.BinlogArchive) (err error) {
var (
conn = d.redis.Get(c)
bs []byte
)
defer conn.Close()
info := &monitor.DelArcInfo{
AID: a.ID,
MID: a.MID,
Time: a.MTime,
Title: a.Title,
}
if bs, err = json.Marshal(info); err != nil {
log.Error("json.Marshal(%+v) error:%v", info, err)
return
}
if _, err = conn.Do("HSET", monitor.RedisDelArcInfo, a.ID, string(bs)); err != nil {
log.Error("conn.Send(HSET,%s,%d,%s) error(%v)", monitor.RedisDelArcInfo, a.ID, bs, err)
return
}
return
}
// ArcDelInfos 获取被删除稿件的信息
func (d *Dao) ArcDelInfos(c context.Context, aids []int64) (infos map[int64]*monitor.DelArcInfo, err error) {
var (
conn = d.redis.Get(c)
strs []string
)
defer conn.Close()
infos = make(map[int64]*monitor.DelArcInfo)
if len(aids) == 0 {
return
}
args := redis.Args{}
args = args.Add(monitor.RedisDelArcInfo)
for _, id := range aids {
args = args.Add(id)
}
log.Info("s.monitorNotify() ArcDelInfos. aids(%v) args(%+v)", aids, args)
if strs, err = redis.Strings(conn.Do("HMGET", args...)); err != nil {
log.Error("conn.Send(HMGET,%v) error(%v)", args, err)
return
}
log.Info("s.monitorNotify() ArcDelInfos. aids(%v) strs(%v)", aids, strs)
for _, v := range strs {
info := &monitor.DelArcInfo{}
if err = json.Unmarshal([]byte(v), info); err != nil {
log.Error("json.Unmarshal(%s) error:%v", v, err)
continue
}
infos[info.AID] = info
}
return
}
// MoniRuleStats 获取监控统计
func (d *Dao) MoniRuleStats(c context.Context, id int64, min, max int64) (stats *monitor.Stats, err error) {
var (
conn = d.redis.Get(c)
key = fmt.Sprintf(monitor.RedisPrefix, id)
now = time.Now().Unix()
)
stats = &monitor.Stats{}
defer conn.Close()
if stats.TotalCount, err = redis.Int(conn.Do("ZCOUNT", key, 0, now)); err != nil {
log.Error("conn.Do(ZCOUNT,%s,0,%d) error(%v)", key, now, err)
return
}
if stats.MoniCount, err = redis.Int(conn.Do("ZCOUNT", key, min, max)); err != nil {
log.Error("conn.Do(ZCOUNT,%s,%d,%d) error(%v)", key, min, max, err)
return
}
var oldest map[string]string //进入列表最久的项
oldest, err = redis.StringMap(conn.Do("ZRANGE", key, 0, 0, "WITHSCORES"))
for _, t := range oldest {
var i int
if i, err = strconv.Atoi(t); err != nil {
return
}
stats.MaxTime = int(now) - i
}
return
}
// MoniRuleOids 获取监控的id
func (d *Dao) MoniRuleOids(c context.Context, id int64, min, max int64) (oidMap map[int64]int, err error) {
var (
conn = d.redis.Get(c)
key = fmt.Sprintf(monitor.RedisPrefix, id)
intMap map[string]int
)
oidMap = make(map[int64]int)
intMap = make(map[string]int)
defer conn.Close()
if intMap, err = redis.IntMap(conn.Do("ZRANGEBYSCORE", key, min, max, "WITHSCORES")); err != nil {
log.Error("redis.IntMap(conn.Do(\"ZRANGEBYSCORE\", %s, %d, %d, \"WITHSCORES\")) error(%v)", key, min, max, err)
return
}
for k, v := range intMap {
oid := 0
if oid, err = strconv.Atoi(k); err != nil {
log.Error("strconv.Atoi(%s) error(%v)", k, err)
}
oidMap[int64(oid)] = v
}
return
}

View File

@@ -0,0 +1,122 @@
package monitor
import (
"context"
"go-common/app/job/main/aegis/model/monitor"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestMonitorAddToSet(t *testing.T) {
convey.Convey("AddToSet", t, func(convCtx convey.C) {
var (
c = context.Background()
keys = []string{"monitor_test"}
oid = int64(123)
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
_, err := d.AddToSet(c, keys, oid)
convCtx.Convey("Then err should be nil.logs should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestMonitorRemFromSet(t *testing.T) {
convey.Convey("RemFromSet", t, func(convCtx convey.C) {
var (
c = context.Background()
keys = []string{"monitor_test"}
oid = int64(123)
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
_, err := d.RemFromSet(c, keys, oid)
convCtx.Convey("Then err should be nil.logs should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestMonitorClearExpireSet(t *testing.T) {
convey.Convey("ClearExpireSet", t, func(convCtx convey.C) {
var (
c = context.Background()
keys = []string{"monitor_test"}
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
_, err := d.ClearExpireSet(c, keys)
convCtx.Convey("Then err should be nil.logs should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestMonitorAddToDelArc(t *testing.T) {
convey.Convey("AddToDelArc", t, func(convCtx convey.C) {
var (
c = context.Background()
a = &monitor.BinlogArchive{
ID: 123,
}
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
err := d.AddToDelArc(c, a)
convCtx.Convey("Then err should be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestMonitorArcDelInfos(t *testing.T) {
convey.Convey("ArcDelInfos", t, func(convCtx convey.C) {
var (
c = context.Background()
aids = []int64{123}
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
_, err := d.ArcDelInfos(c, aids)
convCtx.Convey("Then err should be nil.infos should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestMonitorMoniRuleStats(t *testing.T) {
convey.Convey("MoniRuleStats", t, func(convCtx convey.C) {
var (
c = context.Background()
id = int64(1)
min = int64(0)
max = int64(0)
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
_, err := d.MoniRuleStats(c, id, min, max)
convCtx.Convey("Then err should be nil.stats should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestMonitorMoniRuleOids(t *testing.T) {
convey.Convey("MoniRuleOids", t, func(convCtx convey.C) {
var (
c = context.Background()
id = int64(1)
min = int64(0)
max = int64(0)
)
convCtx.Convey("When everything goes positive", func(convCtx convey.C) {
_, err := d.MoniRuleOids(c, id, min, max)
convCtx.Convey("Then err should be nil.oidMap should not be nil.", func(convCtx convey.C) {
convCtx.So(err, convey.ShouldBeNil)
})
})
})
}

View File

@@ -0,0 +1,79 @@
package dao
import (
"context"
"database/sql"
"time"
"go-common/app/job/main/aegis/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"github.com/pkg/errors"
)
const (
_queryTaskSQL = "SELECT id,business_id,flow_id,rid,admin_id,uid,state,weight,utime,gtime,mid,fans,`group`,reason,ctime,mtime FROM task WHERE state=? AND mtime<=? AND id>? ORDER BY id LIMIT ?"
_upSetWeightSQL = "UPDATE task SET weight=? WHERE id=?"
_assignTaskSQL = "UPDATE task SET admin_id=?,uid=? WHERE id=? AND state=?"
_checkTaskSQL = "SELECT id FROM task WHERE flow_id=? AND rid=? AND state<?"
)
// CheckTask 某资源存在未完成任务,不重复添加
func (d *Dao) CheckTask(c context.Context, flowid, rid int64) (id int64) {
if err := d.fastdb.QueryRow(c, _checkTaskSQL, flowid, rid, model.TaskStateSubmit).Scan(&id); err != nil {
if err == sql.ErrNoRows {
err = nil
} else {
log.Error("d.db.QueryRow error(%v)", err)
}
}
return
}
// AssignTask .
func (d *Dao) AssignTask(c context.Context, task *model.Task) (rows int64, err error) {
var res sql.Result
if res, err = d.fastdb.Exec(c, _assignTaskSQL, task.AdminID, task.UID, task.ID, model.TaskStateInit); err != nil {
log.Error("d.db.Exec error(%v)", errors.WithStack(err))
return
}
return res.RowsAffected()
}
// QueryTask .
func (d *Dao) QueryTask(c context.Context, state int8, mtime time.Time, id, limit int64) (tasks []*model.Task, lastid int64, err error) {
var rows *xsql.Rows
rows, err = d.slowdb.Query(c, _queryTaskSQL, state, mtime, id, limit)
if err != nil {
log.Error("db.Query error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
task := &model.Task{}
if err = rows.Scan(&task.ID, &task.BusinessID, &task.FlowID, &task.RID, &task.AdminID, &task.UID, &task.State, &task.Weight,
&task.Utime, &task.Gtime, &task.MID, &task.Fans, &task.Group, &task.Reason, &task.Ctime, &task.Mtime); err != nil {
log.Error("rows.Scan error(%v)", err)
return
}
tasks = append(tasks, task)
lastid = task.ID
}
return
}
// SetWeightDB .
func (d *Dao) SetWeightDB(c context.Context, taskid, weight int64) (rows int64, err error) {
res, err := d.fastdb.Exec(c, _upSetWeightSQL, weight, taskid)
if err != nil {
log.Error("db.Exec error(%v)", err)
return
}
return res.LastInsertId()
}

View File

@@ -0,0 +1,64 @@
package dao
import (
"context"
"testing"
"time"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoAssignTask(t *testing.T) {
convey.Convey("AssignTask", t, func(ctx convey.C) {
var (
c = context.Background()
task = task1
)
task.AdminID = 1
task.UID = 1
ctx.Convey("When everything goes positive", func(ctx convey.C) {
rows, err := d.AssignTask(c, task)
ctx.Convey("Then err should be nil.rows should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(rows, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoQueryTask(t *testing.T) {
convey.Convey("QueryTask", t, func(ctx convey.C) {
var (
c = context.Background()
state = int8(0)
mtime = time.Now()
id = int64(0)
limit = int64(1000)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
tasks, lastid, err := d.QueryTask(c, state, mtime, id, limit)
ctx.Convey("Then err should be nil.tasks,lastid should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(lastid, convey.ShouldNotBeNil)
ctx.So(tasks, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoSetWeightDB(t *testing.T) {
convey.Convey("SetWeightDB", t, func(ctx convey.C) {
var (
c = context.Background()
taskid = int64(1)
weight = int64(10)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
rows, err := d.SetWeightDB(c, taskid, weight)
ctx.Convey("Then err should be nil.rows should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(rows, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,51 @@
package dao
import (
"context"
"net/url"
"go-common/app/job/main/aegis/model"
"go-common/library/ecode"
"go-common/library/log"
)
const (
_addURL = "/x/internal/aegis/add"
_updateURL = "/x/internal/aegis/update"
_cancelURL = "/x/internal/aegis/cancel"
)
//RscAdd resource add
func (d *Dao) RscAdd(c context.Context, opt *model.AddOption) error {
uri := d.c.Host.API + _addURL
params := opt.ToQueryURI()
return d.commonPost(c, uri, params)
}
//RscUpdate resource update
func (d *Dao) RscUpdate(c context.Context, opt *model.UpdateOption) error {
uri := d.c.Host.API + _updateURL
params := opt.ToQueryURI()
return d.commonPost(c, uri, params)
}
//RscCancel resource cancel
func (d *Dao) RscCancel(c context.Context, opt *model.CancelOption) error {
uri := d.c.Host.API + _cancelURL
params := opt.ToQueryURI()
return d.commonPost(c, uri, params)
}
func (d *Dao) commonPost(c context.Context, uri string, params url.Values) error {
res := new(model.BaseResponse)
if err := d.httpFast.Post(c, uri, "", params, res); err != nil {
log.Error("d.httpFast.Post(%s) params(%s) error(%v)", uri, params.Encode(), err)
return err
}
if res.Code != 0 {
log.Error("d.httpFast.Post(%s) params(%s) res(%+v)", uri, params.Encode(), res)
return ecode.Code(res.Code)
}
return nil
}

View File

@@ -0,0 +1,30 @@
package dao
import (
"context"
"fmt"
gmc "go-common/library/cache/memcache"
"go-common/library/log"
)
// IsConsumerOn .
func (d *Dao) IsConsumerOn(c context.Context, bizid, flowid int, uid int64) (isOn bool, err error) {
conn := d.mc.Get(c)
defer conn.Close()
key := mcKey(bizid, flowid, uid)
if _, err = conn.Get(key); err != nil {
if err == gmc.ErrNotFound {
err = nil
} else {
log.Error("IsConsumerOn error(%v)", err)
}
return
}
isOn = true
return
}
func mcKey(bizid, flowid int, uid int64) string {
return fmt.Sprintf("aegis%d_%d_%d", bizid, flowid, uid)
}

View File

@@ -0,0 +1,28 @@
package dao
import (
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoIsConsumerOn(t *testing.T) {
convey.Convey("IsConsumerOn", t, func(ctx convey.C) {
})
}
func TestDaomcKey(t *testing.T) {
convey.Convey("mcKey", t, func(ctx convey.C) {
var (
bizid = int(0)
flowid = int(0)
uid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := mcKey(bizid, flowid, uid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,135 @@
package dao
import (
"context"
"database/sql"
"fmt"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
"github.com/jinzhu/gorm"
)
const (
_taskReleaseSQL = "update task SET admin_id=0,state=0,uid=0,gtime=0 where state=? AND mtime<=?"
_taskClearSQL = "DELETE FROM task WHERE mtime<=? AND state>=3 LIMIT ?"
)
// TaskActiveConfigs list config
func (d *Dao) TaskActiveConfigs(c context.Context) (configs []*model.TaskConfig, err error) {
db := d.orm.Model(&model.TaskConfig{}).Where("state=?", model.ConfigStateOn)
if err = db.Find(&configs).Error; err != nil {
log.Error("query error(%v)", err)
return
}
return
}
// TaskActiveConsumer list consumer
func (d *Dao) TaskActiveConsumer(c context.Context) (consumerCache map[string]map[int64]struct{}, err error) {
rows, err := d.orm.Table("task_consumer").Select("business_id,flow_id,uid").Where("state=?", model.ConsumerStateOn).Rows()
if err != nil {
return
}
defer rows.Close()
consumerCache = make(map[string]map[int64]struct{})
for rows.Next() {
var bizID, flowID, UID int64
if err = rows.Scan(&bizID, &flowID, &UID); err != nil {
log.Error("rows.Scan error(%v)", err)
continue
}
key := fmt.Sprintf("%d-%d", bizID, flowID)
if _, ok := consumerCache[key]; ok {
consumerCache[key][UID] = struct{}{}
} else {
consumerCache[key] = map[int64]struct{}{UID: {}}
}
}
return
}
// KickOutConsumer 踢出用户
func (d *Dao) KickOutConsumer(c context.Context, bizid, flowid, uid int64) (err error) {
return d.orm.Table("task_consumer").Where("business_id=? AND flow_id=? AND uid=?", bizid, flowid, uid).
Update("state", model.ConsumerStateOff).Error
}
// Resource .
func (d *Dao) Resource(c context.Context, rid int64) (res *model.Resource, err error) {
res = &model.Resource{}
if err = d.orm.Where("id = ?", rid).First(res).Error; err == gorm.ErrRecordNotFound {
res = nil
err = nil
}
return
}
//RscState resource state
func (d *Dao) RscState(c context.Context, rid int64) (state int64, err error) {
err = d.orm.Table("resource_result").Select("state").Where("rid=?", rid).Row().Scan(&state)
return
}
// TaskRelease .
func (d *Dao) TaskRelease(c context.Context, mtime time.Time) (err error) {
return d.orm.Exec(_taskReleaseSQL, model.TaskStateDispatch, mtime).Error
}
// ReleaseByConsumer .
func (d *Dao) ReleaseByConsumer(c context.Context, bizid, flowid, uid int64) (err error) {
return d.orm.Table("task").Where("business_id=? AND flow_id=? AND uid=? AND (state=1 or (admin_id>0 AND state=0))", bizid, flowid, uid).Update(
map[string]interface{}{
"uid": 0,
"state": 0,
"gtime": 0,
"admin_id": 0,
}).Error
}
//Report .
func (d *Dao) Report(c context.Context, rt *model.Report) (err error) {
return d.orm.Create(rt).Error
}
//TaskClear 已完成任务最多保留3天
func (d *Dao) TaskClear(c context.Context, mtime time.Time, limit int64) (rows int64, err error) {
db := d.orm.Exec(_taskClearSQL, mtime, limit)
rows, err = db.RowsAffected, db.Error
return
}
//CheckFlow 检查资源是否在对应流程上
func (d *Dao) CheckFlow(c context.Context, rid, flowid int64) (ok bool, err error) {
var id int64
err = d.orm.Table("net_flow_resource").Select("id").
Where("rid=? AND flow_id=? AND state!=-1", rid, flowid).Row().Scan(&id)
if err != nil {
if err == sql.ErrNoRows {
err = nil
} else {
log.Error("CheckFlow(%d,%d) error(%v)", rid, flowid)
}
return
}
if id > 0 {
ok = true
}
return
}
// CreateTask .
func (d *Dao) CreateTask(c context.Context, task *model.Task) error {
return d.orm.Table("task").Where("rid=? AND flow_id=? AND state<?", task.RID, task.FlowID, model.TaskStateSubmit).
Assign(map[string]interface{}{
"mid": task.MID,
"fans": task.Fans,
"group": task.Group,
}).FirstOrCreate(task).Error
}

View File

@@ -0,0 +1,145 @@
package dao
import (
"context"
"testing"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/sync/errgroup"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoTaskActiveConfigs(t *testing.T) {
convey.Convey("TaskActiveConfigs", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
configs, err := d.TaskActiveConfigs(c)
ctx.Convey("Then err should be nil.configs should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(configs, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoTaskActiveConsumer(t *testing.T) {
convey.Convey("TaskActiveConsumer", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
consumerCache, err := d.TaskActiveConsumer(c)
ctx.Convey("Then err should be nil.consumerCache should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(consumerCache, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoResource(t *testing.T) {
convey.Convey("Resource", t, func(ctx convey.C) {
var (
c = context.Background()
rid = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.Resource(c, rid)
ctx.Convey("Then err should be nil.res should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoTaskRelease(t *testing.T) {
convey.Convey("TaskRelease", t, func(ctx convey.C) {
var (
c = context.Background()
mtime = time.Now().Add(-10 * time.Second)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.TaskRelease(c, mtime)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoReport(t *testing.T) {
convey.Convey("Report", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
rt := &model.Report{
BusinessID: 1,
FlowID: 1,
UID: 1,
Content: []byte("sguyiuo"),
}
err := d.Report(c, rt)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoTaskClear(t *testing.T) {
convey.Convey("TaskClear", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.TaskClear(c, time.Now().Add(-3*24*time.Hour), 1000)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoCheckFlow(t *testing.T) {
convey.Convey("CheckFlow", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := d.CheckFlow(c, 1, 1)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoCreateTask(t *testing.T) {
f := func() error {
for i := 1; i < 100; i++ {
task := &model.Task{
BusinessID: int64(i),
RID: int64(i),
FlowID: int64(i),
}
if err := d.CreateTask(context.Background(), task); err != nil {
return err
}
}
return nil
}
wg := errgroup.Group{}
wg.Go(f)
wg.Go(f)
wg.Go(f)
if err := wg.Wait(); err != nil {
t.Fail()
}
}

View File

@@ -0,0 +1,289 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"github.com/pkg/errors"
"go-common/app/job/main/aegis/model"
"go-common/library/cache/redis"
"go-common/library/log"
)
const (
_hashexpire = 24 * 60 * 60
)
func personalKey(businessID, flowID int64, uid int64) string {
return fmt.Sprintf("personal_%d_%d_%d", businessID, flowID, uid)
}
func publicKey(businessID, flowID int64) string {
return fmt.Sprintf("{%d-%d}public_%d_%d", businessID, flowID, businessID, flowID)
}
func publicBackKey(businessID, flowID int64) string {
return fmt.Sprintf("{%d-%d}publicBackup_%d_%d", businessID, flowID, businessID, flowID)
}
func delayKey(businessID, flowID int64, uid int64) string {
return fmt.Sprintf("delay_%d_%d_%d", businessID, flowID, uid)
}
func haskKey(taskid int64) string {
return fmt.Sprintf("task_%d", taskid)
}
func zsetKey(taskid int64) string {
return fmt.Sprintf("%.11d", taskid)
}
// SetTask .
func (d *Dao) SetTask(c context.Context, task *model.Task) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
var bs []byte
key := haskKey(task.ID)
if bs, err = json.Marshal(task); err != nil {
log.Error("json.Marshal(%+v) error(%v)", task, err)
return
}
if err = conn.Send("SET", key, bs); err != nil {
log.Error("HSET error(%v)", err)
return
}
if err = conn.Send("EXPIRE", key, _hashexpire); err != nil {
log.Error("EXPIRE error(%v)", err)
return
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive error(%v)", err)
return
}
}
return
}
// GetTask .
func (d *Dao) GetTask(c context.Context, id int64) (task *model.Task, err error) {
var bs []byte
conn := d.redis.Get(c)
defer conn.Close()
key := haskKey(id)
if bs, err = redis.Bytes(conn.Do("GET", key)); err != nil {
log.Error("conn.Get(%s) error(%v)", key, err)
return
}
task = new(model.Task)
if err = json.Unmarshal(bs, task); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", string(bs), err)
return
}
return
}
// RemovePersonalTask 任务延迟或完成
func (d *Dao) RemovePersonalTask(c context.Context, businessID, flowID int64, uid, taskid int64) (err error) {
key := personalKey(businessID, flowID, uid)
return d.removeList(c, key, taskid)
}
// RemoveDelayTask 延迟任务完成
func (d *Dao) RemoveDelayTask(c context.Context, businessID, flowID int64, uid, taskid int64) (err error) {
key := delayKey(businessID, flowID, uid)
return d.removeList(c, key, taskid)
}
func (d *Dao) removeList(c context.Context, key string, id int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
if _, err = conn.Do("LREM", key, 0, id); err != nil {
log.Error("LREM error(%v)", errors.WithStack(err))
}
return
}
// PushPersonalTask 放入本人任务池
func (d *Dao) PushPersonalTask(c context.Context, businessID, flowID int64, uid, taskid int64) (err error) {
key := personalKey(businessID, flowID, uid)
return d.pushList(c, key, taskid)
}
// PushDelayTask 延迟任务队列
func (d *Dao) PushDelayTask(c context.Context, businessID, flowID int64, uid, taskid int64) (err error) {
key := delayKey(businessID, flowID, uid)
return d.pushList(c, key, taskid)
}
func (d *Dao) pushList(c context.Context, key string, values ...interface{}) (err error) {
var (
conn = d.redis.Get(c)
)
defer conn.Close()
args1 := []interface{}{key, 0}
args1 = append(args1, values...)
if _, err = conn.Do("LREM", args1...); err != nil {
log.Error("conn.Do(RPUSH, %v) error(%v)", args1, err)
return
}
args2 := []interface{}{key}
args2 = append(args2, values...)
if _, err = conn.Do("RPUSH", args2...); err != nil {
log.Error("conn.Do(RPUSH, %v) error(%v)", args2, err)
}
return
}
// RemovePublicTask 移除
func (d *Dao) RemovePublicTask(c context.Context, businessID, flowID int64, taskid int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := publicKey(businessID, flowID)
args := []interface{}{key}
args = append(args, zsetKey(taskid))
if _, err = conn.Do("ZREM", args...); err != nil {
log.Error("(ZREM,%v) error(%v)", args, errors.WithStack(err))
}
return err
}
// PushPublicTask 放入实时任务池
func (d *Dao) PushPublicTask(c context.Context, tasks ...*model.Task) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
for _, task := range tasks {
key := publicKey(task.BusinessID, task.FlowID)
if _, err = conn.Do("ZADD", key, -task.Weight, zsetKey(task.ID)); err != nil {
log.Error("conn.Do(ZADD,%s) error(%v)", key, errors.WithStack(err))
}
}
return
}
// SetWeight set weight
func (d *Dao) SetWeight(c context.Context, businessID, flowID int64, id, weight int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
var (
ow int64
key = publicKey(businessID, flowID)
zid = zsetKey(id)
)
if ow, err = redis.Int64(conn.Do("ZSCORE", key, zid)); err != nil {
if err == redis.ErrNil {
err = nil
} else {
log.Error("redis (ZSCORE,%s,%s) error(%v)", key, zid, err)
}
return
}
// 为了从大到小排序weight取负值
nw := -(weight + ow)
if _, err = conn.Do("ZINCRBY", key, nw, zid); err != nil {
log.Error("redis (ZINCRBY,%s,%s,%d) error(%v)", key, nw, zid, err)
return
}
return
}
// GetWeight get Weight
func (d *Dao) GetWeight(c context.Context, businessID, flowID int64, id int64) (weight int64, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := publicKey(businessID, flowID)
weight, err = redis.Int64(conn.Do("ZSCORE", key, zsetKey(id)))
if err != nil {
if err == redis.ErrNil {
err = nil
} else {
log.Error("conn.Do(ZSCORE %s %s) error(%v)", key, zsetKey(id), errors.WithStack(err))
}
}
weight = -weight
return
}
// TopWeights .
func (d *Dao) TopWeights(c context.Context, businessID, flowID int64, toplen int64) (wis []*model.WeightItem, err error) {
key := publicKey(businessID, flowID)
return d.zrange(c, key, 0, toplen)
}
// CreateUnionSet 创建分身集合
func (d *Dao) CreateUnionSet(c context.Context, businessID, flowID int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := publicKey(businessID, flowID)
backKey := publicBackKey(businessID, flowID)
if _, err = conn.Do("ZUNIONSTORE", backKey, 1, key); err != nil {
log.Error("conn.Do(ZUNIONSTORE,%s) error(%v)", key, errors.WithStack(err))
}
return
}
// RangeUinonSet 批次取出
func (d *Dao) RangeUinonSet(c context.Context, businessID, flowID int64, start, stop int64) (wis []*model.WeightItem, err error) {
key := publicBackKey(businessID, flowID)
return d.zrange(c, key, start, stop)
}
// DeleteUinonSet 清空分身
func (d *Dao) DeleteUinonSet(c context.Context, businessID, flowID int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
backKey := publicBackKey(businessID, flowID)
if _, err = conn.Do("DEL", backKey); err != nil {
log.Error("conn.Do(ZUNIODELNSTORE,%s) error(%v)", backKey, errors.WithStack(err))
}
return
}
func (d *Dao) zrange(c context.Context, key string, start, stop int64) (wis []*model.WeightItem, err error) {
conn := d.redis.Get(c)
defer conn.Close()
reply, err := redis.Int64s(conn.Do("ZRANGE", key, start, stop, "WITHSCORES"))
if err != nil {
log.Error("conn.Do(ZADD,%s) error(%v)", key, errors.WithStack(err))
return
}
// 单数是id,双数是weight
for i := 0; i < len(reply); i += 2 {
wi := &model.WeightItem{}
wi.ID = reply[i]
wi.Weight = -reply[i+1]
wis = append(wis, wi)
}
return
}

View File

@@ -0,0 +1,247 @@
package dao
import (
"context"
"fmt"
"testing"
"go-common/app/job/main/aegis/model"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoSetTask(t *testing.T) {
convey.Convey("SetTask", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err1 := d.SetTask(c, task1)
err2 := d.SetTask(c, task2)
err3 := d.SetTask(c, task3)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err1, convey.ShouldBeNil)
ctx.So(err2, convey.ShouldBeNil)
ctx.So(err3, convey.ShouldBeNil)
})
})
})
}
func TestDaoGetTask(t *testing.T) {
convey.Convey("GetTask", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
task, err := d.GetTask(c, id)
ctx.Convey("Then err should be nil.task should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(task, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoPushPublicTask(t *testing.T) {
convey.Convey("PushPublicTask", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.PushPublicTask(c, task1, task2, task3)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoRemovePublicTask(t *testing.T) {
convey.Convey("RemovePublicTask", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.RemovePublicTask(c, 1, 1, 1)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoPushPersonalTask(t *testing.T) {
convey.Convey("PushPersonalTask", t, func(ctx convey.C) {
var (
c = context.Background()
businessID = int64(1)
flowID = int64(1)
uid = int64(1)
taskid = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.PushPersonalTask(c, businessID, flowID, uid, taskid)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoRemovePersonalTask(t *testing.T) {
convey.Convey("RemovePersonalTask", t, func(ctx convey.C) {
var (
c = context.Background()
businessID = int64(1)
flowID = int64(1)
uid = int64(1)
taskid = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.RemovePersonalTask(c, businessID, flowID, uid, taskid)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoPushDelayTask(t *testing.T) {
convey.Convey("PushDelayTask", t, func(ctx convey.C) {
var (
c = context.Background()
businessID = int64(1)
flowID = int64(1)
uid = int64(1)
taskid = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.PushDelayTask(c, businessID, flowID, uid, taskid)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoRemoveDelayTask(t *testing.T) {
convey.Convey("RemoveDelayTask", t, func(ctx convey.C) {
var (
c = context.Background()
businessID = int64(1)
flowID = int64(1)
uid = int64(1)
taskid = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.RemoveDelayTask(c, businessID, flowID, uid, taskid)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoSetWeight(t *testing.T) {
convey.Convey("SetWeight", t, func(ctx convey.C) {
var (
c = context.Background()
businessID = int64(1)
flowID = int64(1)
id = int64(1)
weight = int64(10)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
d.SetWeight(c, businessID, flowID, id, weight)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
})
})
})
}
func TestDaoGetWeight(t *testing.T) {
convey.Convey("GetWeight", t, func(ctx convey.C) {
})
}
func TestDaoTopWeights(t *testing.T) {
convey.Convey("TopWeights", t, func(ctx convey.C) {
})
}
func TestDaoCreateUnionSet(t *testing.T) {
convey.Convey("CreateUnionSet", t, func(ctx convey.C) {
var (
c = context.Background()
businessID = int64(1)
flowID = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.CreateUnionSet(c, businessID, flowID)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoRangeUinonSet(t *testing.T) {
convey.Convey("RangeUinonSet", t, func(ctx convey.C) {
})
}
func TestDaoDeleteUinonSet(t *testing.T) {
convey.Convey("DeleteUinonSet", t, func(ctx convey.C) {
var (
c = context.Background()
businessID = int64(1)
flowID = int64(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.DeleteUinonSet(c, businessID, flowID)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoIncresByField(t *testing.T) {
convey.Convey("IncresByField", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := d.IncresByField(c, 1, 1, 1, model.Dispatch, 1)
err = d.IncresByField(c, 1, 1, 1, model.Release, 1)
err = d.IncresByField(c, 1, 1, 1, model.Submit, 1)
err = d.IncresByField(c, 1, 1, 1, model.Delay, 1)
err = d.IncresByField(c, 1, 1, 1, fmt.Sprintf(model.RscState, 1), 1)
err = d.IncresByField(c, 1, 1, 1, model.UseTime, 112)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoFlushReport(t *testing.T) {
convey.Convey("FlushReport", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
data, err := d.FlushReport(c)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
for key, val := range data {
fmt.Println("key:", key)
fmt.Println("val:", string(val))
}
})
})
})
}

View File

@@ -0,0 +1,143 @@
package dao
import (
"context"
"encoding/json"
"sync"
"go-common/app/job/main/aegis/model"
"go-common/library/cache/redis"
"go-common/library/log"
)
var rmux sync.Mutex
//IncresByField .
func (d *Dao) IncresByField(c context.Context, bizid, flowid, uid int64, field string, value int64) (err error) {
var (
conn = d.redis.Get(c)
hk = model.PersonalHashKey(bizid, flowid, uid)
)
rmux.Lock()
defer rmux.Unlock()
defer conn.Close()
if err = d.setSet(conn, hk); err != nil {
return
}
if err = d.setHash(conn, hk, "ds"); err != nil {
return
}
return d.setField(conn, hk, field, value)
}
//IncresTaskInOut 总进审量-出审量
func (d *Dao) IncresTaskInOut(c context.Context, bizid, flowid int64, inOrOut string) (err error) {
var (
conn = d.redis.Get(c)
hk = model.TotalHashKey(bizid, flowid)
)
rmux.Lock()
defer rmux.Unlock()
defer conn.Close()
if err = d.setSet(conn, hk); err != nil {
return
}
if err = d.setHash(conn, hk, inOrOut); err != nil {
return
}
return d.setField(conn, hk, inOrOut, 1)
}
//FlushReport .
func (d *Dao) FlushReport(c context.Context) (data map[string][]byte, err error) {
data = make(map[string][]byte)
rmux.Lock()
defer rmux.Unlock()
conn := d.redis.Get(c)
defer conn.Close()
keys, err := redis.Strings(conn.Do("SMEMBERS", model.SetKey))
if err != nil {
log.Error("SMEMBERS %s error(%v)", model.SetKey, err)
return
}
if len(keys) == 0 {
log.Info("FlushReport empty")
return
}
for _, key := range keys {
if err = conn.Send("HGETALL", key); err != nil {
log.Error("HGETALL %s error(%v)", key, err)
return
}
}
conn.Flush()
for _, key := range keys {
var (
bs []byte
mp map[string]int64
)
if mp, err = redis.Int64Map(conn.Receive()); err != nil {
log.Error("Receive error(%v)", err)
return
}
if bs, err = json.Marshal(mp); err != nil {
log.Error("Marshal mp(%+v) error(%v)", mp, err)
}
data[key] = bs
}
for _, key := range keys {
conn.Do("DEL", key)
}
conn.Do("DEL", model.SetKey)
return
}
//记录key
func (d *Dao) setSet(conn redis.Conn, hk string) (err error) {
if _, err := conn.Do("SADD", model.SetKey, hk); err != nil {
log.Error("setSet SADD(%s,%s) error(%v)", model.SetKey, hk, err)
}
return
}
//创建hash
func (d *Dao) setHash(conn redis.Conn, key string, defaultfield string) (err error) {
var exist bool
if exist, err = redis.Bool(conn.Do("EXISTS", key)); err != nil {
log.Error("setHash EXISTS(%s) error(%v)", key, err)
return
}
if !exist {
if _, err = conn.Do("HMSET", key, defaultfield, 0); err != nil {
log.Error("setHash HMSET(%s,%s,%d) error(%v)", key, defaultfield, 0, err)
}
}
return
}
//每个field赋值
func (d *Dao) setField(conn redis.Conn, key string, field string, value int64) (err error) {
var exist bool
if exist, err = redis.Bool(conn.Do("HEXISTS", key, field)); err != nil {
log.Error("setField HEXISTS(%s,%s,%s) error(%v)", key, field, err)
return
}
if !exist {
if _, err = conn.Do("HMSET", key, field, 0); err != nil {
log.Error("setField HMSET(%s,%s,%d) error(%v)", key, field, 0, err)
}
}
if _, err = conn.Do("HINCRBY", key, field, value); err != nil {
log.Error("setField HINCRBY(%s,%s,%d) error(%v)", key, field, 1, err)
}
return nil
}

View File

@@ -0,0 +1,43 @@
package dao
import (
"context"
relmod "go-common/app/service/main/relation/model"
uprpc "go-common/app/service/main/up/api/v1"
"go-common/library/log"
terrors "github.com/pkg/errors"
)
// FansCount 粉丝数
func (d *Dao) FansCount(c context.Context, mid int64) (fans int64, err error) {
if d.c.Debug {
return 10086, nil
}
arg := &relmod.ArgMid{Mid: mid}
stat, err := d.relRPC.Stat(c, arg)
if err != nil || stat == nil {
log.Error("FansCount error(%v)", terrors.WithStack(err))
return
}
fans = stat.Follower
return
}
// UpSpecial 分组信息
func (d *Dao) UpSpecial(c context.Context, mid int64) (groupids []int64, err error) {
if d.c.Debug {
return
}
req := &uprpc.UpSpecialReq{Mid: mid}
var reply *uprpc.UpSpecialReply
if reply, err = d.upRPC.UpSpecial(c, req); err != nil || reply == nil {
log.Error("UpSpecial(%d) error(%+v)", mid, err)
return
}
if reply.UpSpecial != nil {
groupids = reply.UpSpecial.GroupIDs
}
return
}

View File

@@ -0,0 +1,24 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoFansCount(t *testing.T) {
convey.Convey("FansCount", t, func(ctx convey.C) {
var (
c = context.Background()
mid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
fans, err := d.FansCount(c, mid)
ctx.Convey("Then err should be nil.fans should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(fans, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,22 @@
package dao
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func Test_TopWeights(t *testing.T) {
Convey("TopWeights", t, func() {
})
}
func Test_SetWeight(t *testing.T) {
Convey("TopWeights", t, func() {
})
}
func Test_TaskActiveConsumer(t *testing.T) {
Convey("TaskActiveConsumer", t, func() {
})
}

View File

@@ -0,0 +1,40 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"common.go",
"databus.go",
"email.go",
"message.go",
"report.go",
"resource.go",
"task.go",
],
importpath = "go-common/app/job/main/aegis/model",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//library/time:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/aegis/model/monitor:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,78 @@
package model
import (
"database/sql/driver"
"fmt"
"strconv"
xtime "time"
)
// WaitTime 计算等待时长
func WaitTime(ctime xtime.Time) string {
wt := xtime.Since(ctime)
h := int(wt.Hours())
m := int(wt.Minutes()) % 60
s := int(wt.Seconds()) % 60
return fmt.Sprintf("%.2d:%.2d:%.2d", h, m, s)
}
//IntTime .
type IntTime int64
// Scan scan time.
func (jt *IntTime) Scan(src interface{}) (err error) {
switch sc := src.(type) {
case xtime.Time:
*jt = IntTime(sc.Unix())
case string:
var i int64
i, err = strconv.ParseInt(sc, 10, 64)
*jt = IntTime(i)
}
return
}
// Value get time value.
func (jt IntTime) Value() (driver.Value, error) {
return xtime.Unix(int64(jt), 0), nil
}
// Time get time.
func (jt IntTime) Time() xtime.Time {
return xtime.Unix(int64(jt), 0)
}
// UnmarshalJSON implement Unmarshaler
func (jt *IntTime) UnmarshalJSON(data []byte) error {
if data == nil || len(data) <= 1 {
*jt = 0
return nil
}
if data[0] != '"' {
// 1.直接判断数字
sti, err := strconv.Atoi(string(data))
if err == nil {
*jt = IntTime(sti)
}
return nil
}
str := string(data[1 : len(data)-1])
// 2.标准格式判断
st, err := xtime.ParseInLocation("2006-01-02 15:04:05", str, xtime.Local)
if err == nil {
*jt = IntTime(st.Unix())
return nil
}
*jt = IntTime(0)
return nil
}
//BaseResponse .
type BaseResponse struct {
Code int64 `json:"code"`
Message string `json:"message"`
}

View File

@@ -0,0 +1,9 @@
package model
// CreateTaskMsg databus msg
type CreateTaskMsg struct {
BizID int64 `json:"business_id"`
RID int64 `json:"rid"`
FlowID int64 `json:"flow_id"`
DispatchLimit int64 `json:"dispatch_limit"`
}

View File

@@ -0,0 +1,9 @@
package model
//MoniTemp 监控邮件模板
type MoniTemp struct {
From string `json:"from"`
Members []string `json:"members"`
Subject string `json:"subject"`
Body string `json:"body"`
}

View File

@@ -0,0 +1,27 @@
package model
import (
"encoding/json"
)
const (
BinlogInsert = "insert"
BinlogUpdate = "update"
BinlogDelete = "delete"
)
// BinLog databus binlog message.
type BinLog struct {
Action string `json:"action"`
Table string `json:"table"`
New json.RawMessage `json:"new"`
Old json.RawMessage `json:"old"`
MTS int64
}
//RscMsg databus resource message
type RscMsg struct {
Action string `json:"action"`
BizID int64 `json:"business_id"`
Raw json.RawMessage `json:"raw"`
}

View File

@@ -0,0 +1,28 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["monitor.go"],
importpath = "go-common/app/job/main/aegis/model/monitor",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,186 @@
package monitor
const (
// RedisPrefix 参数:business。参数1bid参数2监控ID
RedisPrefix = "monitor_stats_%d"
// RedisDelArcInfo 稿件删除监控key
RedisDelArcInfo = "monitor_stats_del_arc"
// BusVideo 视频业务
BusVideo = 1
// BusArc 稿件业务
BusArc = 2
// NotifyTypeEmail 邮件通知
NotifyTypeEmail = 1
// NotityTypeSms 短信通知
NotityTypeSms = 2
// 稿件业务常量
// ArchiveBitPGC 稿件PGC属性位
ArchiveBitPGC = 9
// ArchiveStateDel 稿件删除状态
ArchiveStateDel = -100
// ArchiveOriginal 自制稿件
ArchiveOriginal = 1
// RuleHighUpDelArc 高能联盟UP主大量删除稿件监控
RuleHighUpDelArc = 1
// RuleFamUpDelArc 大UP主大量删除稿件监控
RuleFamUpDelArc = 17
// CompGT 大于
CompGT = ">"
// CompLT 小于
CompLT = "<"
// CompGET 大于等于
CompGET = ">="
// CompLET 小于等于
CompLET = "<="
// CompNE 不等于
CompNE = "!="
// CompE 等于
CompE = "="
)
var (
// SpecialTypeIDs 特殊分区(变更非常不频繁)
SpecialTypeIDs = map[int64]int8{
15: 1, 34: 1, 32: 1, 82: 1, 33: 1, 83: 1, 145: 1, 146: 1,
147: 1, 153: 1, 185: 1, 186: 1, 187: 1, 37: 1, 178: 1, 179: 1,
180: 1, 128: 1, 85: 1, 86: 1, 183: 1,
}
)
// BinlogArchive 稿件 binlog 结构
type BinlogArchive struct {
ID int64 `json:"id"`
State int64 `json:"state"`
Round int64 `json:"round"`
MID int64 `json:"mid"`
Attr int64 `json:"attribute"`
TypeID int64 `json:"typeid"`
IsSpecTID int8 `json:"is_special_tid"`
HumanRank int `json:"humanrank"`
Duration int `json:"duration"`
Desc string `json:"desc"`
Title string `json:"title"`
Cover string `json:"cover"`
Content string `json:"content"`
Tag string `json:"tag"`
Copyright int8 `json:"copyright"`
AreaLimit int8 `json:"arealimit"`
Author string `json:"author"`
Access int `json:"access"`
Forward int `json:"forward"`
PubTime string `json:"pubtime"`
Reason string `json:"reject_reason"`
CTime string `json:"ctime"`
MTime string `json:"mtime"`
PTime string `json:"ptime"`
Addit *ArchiveAddit `json:"_"`
}
// BinlogVideo 视频binlog结构
type BinlogVideo struct {
ID int64 `json:"id"`
Filename string `json:"filename"`
Cid int64 `json:"cid"`
Aid int64 `json:"aid"`
Title string `json:"eptitle"`
Desc string `json:"description"`
SrcType string `json:"src_type"`
Duration int64 `json:"duration"`
Filesize int64 `json:"filesize"`
Resolutions string `json:"resolutions"`
Playurl string `json:"playurl"`
FailCode int8 `json:"failinfo"`
Index int `json:"index_order"`
Attribute int32 `json:"attribute"`
XcodeState int8 `json:"xcode_state"`
State int8 `json:"state"`
Status int16 `json:"status"`
CTime string `json:"ctime"`
MTime string `json:"mtime"`
}
// ArchiveAddit 稿件附加属性
type ArchiveAddit struct {
Aid int64 `json:"aid"`
MissionID int64 `json:"mission_id"`
UpFrom int8 `json:"up_from"`
FromIP int64 `json:"from_ip"`
IPv6 []byte `json:"ipv6"`
Source string `json:"source"`
OrderID int64 `json:"order_id"`
RecheckReason string `json:"recheck_reason"`
RedirectURL string `json:"redirect_url"`
FlowID int64 `json:"flow_id"`
Advertiser string `json:"advertiser"`
FlowRemark string `json:"flow_remark"`
DescFormatID int64 `json:"desc_format_id"`
Desc string `json:"desc"`
Dynamic string `json:"dynamic"`
}
// RuleResultRes 监控结果
type RuleResultRes struct {
Code int `json:"code"`
Data []*RuleResultData `json:"data"`
}
// RuleResultData 监控结果
type RuleResultData struct {
Rule *Rule `json:"rule"`
Stats *Stats `json:"stats"`
}
// Rule 监控规则信息
type Rule struct {
ID int64 `json:"id"`
Type int8 `json:"type"`
BID int8 `json:"bid"`
Name string `json:"name"`
State int8 `json:"state"`
STime string `json:"stime"`
ETime string `json:"etime"`
CTime string `json:"ctime"`
MTime string `json:"mtime"`
UID int64 `json:"uid"`
RuleConf *RuleConf `json:"rule"`
}
// RuleConf 监控方案配置结构体
type RuleConf struct {
Name string `json:"name"`
MoniCdt map[string]struct { //监控方案的监控条件
Comp string `json:"comparison"`
} `json:"moni_cdt"`
NotifyCdt map[string]struct { //达到发送通知的条件
Comp string `json:"comparison"`
Value int64 `json:"value"`
} `json:"notify_cdt"`
Notify struct { //通知类型配置
Way int8 `json:"way"`
Member []string `json:"member"`
} `json:"notify"`
}
// Stats 监控统计
type Stats struct {
TotalCount int `json:"total_count"`
MoniCount int `json:"moni_count"`
MaxTime int `json:"max_time"`
}
// FieldsConf 监控字段配置
type FieldsConf struct {
Comparison string
}
// DelArcInfo UP主删稿信息
type DelArcInfo struct {
AID int64 `json:"aid"`
MID int64 `json:"mid"`
Time string `json:"time"`
Title string `json:"title"`
}

View File

@@ -0,0 +1,88 @@
package model
import (
"fmt"
"strconv"
"strings"
)
//hash fields
const (
Dispatch = "ds"
Delay = "dy"
Submit = "st_%d_%d" // 参数1:提交状态(任务提交,资源提交,任务关闭) 参数2:提交前属于谁
Release = "rl"
RscState = "rs_%d"
UseTime = "ut"
SetKey = "report_set"
//type
TypeMeta = int8(0)
TypeTotal = int8(1)
)
//RIR resource item report
type RIR struct {
BizID int64
FlowID int64
UID int64
RID int64
}
//Report .
type Report struct {
ID int64 `gorm:"AUTO_INCREMENT;primary_key;"`
BusinessID int64 `gorm:"column:business_id"`
FlowID int64 `gorm:"column:flow_id"`
UID int64 `gorm:"column:uid"`
TYPE int8 `gorm:"column:type"`
Content []byte `gorm:"column:content"`
}
//TableName .
func (r Report) TableName() string {
return "task_report"
}
//PersonalHashKey .
func PersonalHashKey(bizid, flowid, uid int64) string {
return fmt.Sprintf("report_hash_%d_%d_%d", bizid, flowid, uid)
}
//TotalHashKey .
func TotalHashKey(bizid, flowid int64) string {
return fmt.Sprintf("total_inout_%d_%d_%d", bizid, flowid, 0)
}
//ParseKey .
func ParseKey(key string) (tp int8, bizid, flowid, uid int, err error) {
arr := strings.Split(key, "_")
if len(arr) != 5 {
err = fmt.Errorf(key)
return
}
prefix := arr[0] + "_" + arr[1]
switch prefix {
case "report_hash":
tp = TypeMeta
case "total_inout":
tp = TypeTotal
default:
err = fmt.Errorf(key)
return
}
if bizid, err = strconv.Atoi(arr[2]); err != nil || bizid == 0 {
err = fmt.Errorf(key)
return
}
if flowid, err = strconv.Atoi(arr[3]); err != nil || flowid == 0 {
err = fmt.Errorf(key)
return
}
if uid, err = strconv.Atoi(arr[4]); err != nil {
err = fmt.Errorf(key)
return
}
return
}

View File

@@ -0,0 +1,110 @@
package model
import (
"encoding/json"
"net/url"
"strconv"
"strings"
xtime "go-common/library/time"
)
// Resource .
type Resource struct {
ID int64 `json:"id" gorm:"primary_key" form:"id"`
BusinessID int64 `json:"business_id" gorm:"column:business_id" form:"business_id"`
OID string `json:"oid" gorm:"column:oid" form:"oid"`
MID int64 `json:"mid" gorm:"column:mid" form:"mid"`
Content string `json:"content" gorm:"column:content" form:"content"`
Extra1 int64 `json:"extra1" gorm:"column:extra1" form:"extra1"`
Extra2 int64 `json:"extra2" gorm:"column:extra2" form:"extra2"`
Extra3 int64 `json:"extra3" gorm:"column:extra3" form:"extra3"`
Extra4 int64 `json:"extra4" gorm:"column:extra4" form:"extra4"`
Extra1s string `json:"extra1s" gorm:"column:extra1s" form:"extra1s"`
Extra2s string `json:"extra2s" gorm:"column:extra2s" form:"extra2s"`
MetaData string `json:"metadata" gorm:"column:metadata" form:"metadata"`
Ctime xtime.Time `json:"ctime" gorm:"column:ctime"`
Mtime xtime.Time `json:"mtime" gorm:"column:mtime"`
Extra5 int64 `json:"extra5" gorm:"column:extra5" form:"extra5"`
Extra6 int64 `json:"extra6" gorm:"column:extra6" form:"extra6"`
Extra3s string `json:"extra3s" gorm:"column:extra3s" form:"extra3s"`
Extra4s string `json:"extra4s" gorm:"column:extra4s" form:"extra4s"`
ExtraTime1 string `json:"extratime1" gorm:"column:extratime1" form:"extratime1"`
OCtime string `json:"octime" gorm:"column:octime" form:"octime"`
Ptime string `json:"ptime" gorm:"column:ptime" form:"ptime"`
}
// TableName .
func (t *Resource) TableName() string {
return "resource"
}
//AddOption add option
type AddOption struct {
Resource
State int `form:"state" json:"state"`
NetID int64 `form:"net_id" json:"net_id"`
}
// ToQueryURI convert field to uri.
func (opt AddOption) ToQueryURI() url.Values {
var params = url.Values{}
params.Add("business_id", strconv.Itoa(int(opt.BusinessID)))
params.Add("net_id", strconv.Itoa(int(opt.NetID)))
params.Add("oid", opt.OID)
params.Add("mid", strconv.Itoa(int(opt.MID)))
params.Add("content", opt.Content)
params.Add("extra1", strconv.Itoa(int(opt.Extra1)))
params.Add("extra2", strconv.Itoa(int(opt.Extra2)))
params.Add("extra3", strconv.Itoa(int(opt.Extra3)))
params.Add("extra4", strconv.Itoa(int(opt.Extra4)))
params.Add("extra5", strconv.Itoa(int(opt.Extra5)))
params.Add("extra5", strconv.Itoa(int(opt.Extra6)))
params.Add("extra1s", opt.Extra1s)
params.Add("extra2s", opt.Extra2s)
params.Add("extra3s", opt.Extra3s)
params.Add("extra4s", opt.Extra4s)
params.Add("extratime1", opt.ExtraTime1)
params.Add("octime", opt.OCtime)
params.Add("ptime", opt.Ptime)
params.Add("metadata", opt.MetaData)
return params
}
//UpdateOption update option
type UpdateOption struct {
BusinessID int64 `json:"business_id"`
NetID int64 `json:"net_id"`
OID string `json:"oid"`
Update map[string]interface{} `json:"update"`
}
//ToQueryURI convert field to uri.
func (opt UpdateOption) ToQueryURI() url.Values {
var params = url.Values{}
params.Add("business_id", strconv.Itoa(int(opt.BusinessID)))
params.Add("net_id", strconv.Itoa(int(opt.NetID)))
params.Add("oid", opt.OID)
if bs, err := json.Marshal(opt.Update); err == nil && len(bs) > 0 {
params.Add("update", string(bs))
}
return params
}
//CancelOption .
type CancelOption struct {
BusinessID int64 `json:"business_id"`
Oids []string `json:"oids"`
Reason string `json:"reason"`
}
// ToQueryURI convert field to uri.
func (opt CancelOption) ToQueryURI() url.Values {
var params = url.Values{}
params.Add("business_id", strconv.Itoa(int(opt.BusinessID)))
params.Add("oids", strings.Join(opt.Oids, ","))
params.Add("reason", opt.Reason)
return params
}

View File

@@ -0,0 +1,184 @@
package model
import (
libtime "go-common/library/time"
)
//..
const (
//初始状态
TaskStateInit = int8(0)
//已派发
TaskStateDispatch = int8(1)
//延迟
TaskStateDelay = int8(2)
//任务提交
TaskStateSubmit = int8(3)
//资源列表提交
TaskStateRscSb = int8(4)
//任务关闭
TaskStateClosed = int8(5)
// ActionCreate 生成任务
ActionCreate = uint8(0)
// ActionSeize 抢占任务
ActionSeize = uint8(1)
// ActionRelease 释放任务
ActionRelease = uint8(2)
// ActionDelay 延迟任务
ActionDelay = uint8(3)
// ActionSubmit 提交任务
ActionSubmit = uint8(4)
// ActionUnknow 其他变更
ActionUnknow = uint8(5)
LogBusinessTask = int(232)
LogTypeTaskDispatch = int(1)
LogTypeTaskConsumer = int(2)
LogTYpeTaskWeight = int(3)
// WeightTypeCycle 周期权重
WeightTypeCycle = int8(0)
// WeightTypeConst 定值权重
WeightTypeConst = int8(1)
)
const (
// ConfigStateOn .
ConfigStateOn = int8(0)
// ConfigStateOff .
ConfigStateOff = int8(1)
// ConsumerStateOn on
ConsumerStateOn = int8(1)
// ConsumerStateOff off
ConsumerStateOff = int8(0)
// ActionConsumerOff .
ActionConsumerOff = int8(0)
// ActionConsumerOn .
ActionConsumerOn = int8(1)
// TaskConfigAssign 指派
TaskConfigAssign = int8(1)
// TaskConfigRangeWeight 权重
TaskConfigRangeWeight = int8(2)
// TaskConfigEqualWeight 权重
TaskConfigEqualWeight = int8(3)
// TaskRoleMember 组员
TaskRoleMember = int8(1)
// TaskRoleLeader 组长
TaskRoleLeader = int8(2)
)
// WeightItem 权重值
type WeightItem struct {
ID int64
Weight int64
}
// Task ..
type Task struct {
ID int64 `form:"id" json:"id" gorm:"AUTO_INCREMENT;primary_key;"`
BusinessID int64 `form:"business_id" json:"business_id" gorm:"column:business_id"`
FlowID int64 `form:"flow_id" json:"flow_id" gorm:"column:flow_id"`
RID int64 `form:"rid" json:"rid" gorm:"column:rid"`
AdminID int64 `form:"admin_id" json:"admin_id" gorm:"column:admin_id"`
UID int64 `form:"uid" json:"uid" gorm:"column:uid"`
State int8 `form:"state" json:"state" gorm:"column:state"`
Weight int64 `form:"weight" json:"weight" gorm:"column:weight"`
Utime int64 `form:"utime" json:"utime" gorm:"column:utime"`
Gtime IntTime `form:"gtime" json:"gtime" gorm:"column:gtime"`
MID int64 `form:"mid" json:"mid" gorm:"column:mid"`
Fans int64 `form:"fans" json:"fans" gorm:"column:fans"`
Group string `form:"group" json:"group" gorm:"column:group"`
Reason string `form:"reason" json:"reason" grom:"column:reason"`
Ctime IntTime `form:"ctime" json:"ctime" gorm:"column:ctime"`
Mtime IntTime `form:"mtime" json:"mtime" gorm:"column:mtime"`
}
// WeightLog task log
type WeightLog struct {
UPtime string `json:"uptime"`
Mid int64 `json:"mid"`
Fans int64 `json:"fans"`
FansWeight int64 `json:"fans_weight"`
Group string `json:"group"`
GroupWeight int64 `json:"group_weight"`
WaitTime string `json:"wait_time"`
WaitWeight int64 `json:"wait_weight"`
EqualWeight int64 `json:"config_weight"`
ConfigItems []*ConfigItem `json:"config_items"`
Weight int64 `json:"weight"`
}
// ConfigItem .
type ConfigItem struct {
Name string `json:"name"`
Desc string `json:"desc"`
Uname string `json:"uname"`
}
// EqualWeightConfig 等值权重
type EqualWeightConfig struct {
Uname string // 配置人
Description string // 描述
Name string `json:"name"` // taskid 或者 mid
IDs string `json:"ids"`
Weight int64 `json:"weight"`
Type int8 `json:"type"` // 周期或者定值
}
// RangeWeightConfig 权重
type RangeWeightConfig struct {
Name string `json:"name"`
Range []*RangeConfig `json:"range"`
}
// RangeConfig 范围配置
type RangeConfig struct {
Threshold int64 `json:"threshold"`
Weight int64 `json:"weight"`
}
// AssignConfig 指派
type AssignConfig struct {
Admin int64 `json:"-"`
Mids []int64 `json:"mids"`
Uids []int64 `json:"uids"`
}
// TaskConfig .
type TaskConfig struct {
ID int64 `form:"id" json:"id" gorm:"AUTO_INCREMENT;primary_key;"`
ConfJSON string `json:"conf_json" gorm:"column:conf_json"`
ConfType int8 `form:"conf_type" json:"conf_type" gorm:"column:conf_type"`
BusinessID int64 `form:"business_id" json:"business_id" gorm:"column:business_id"`
FlowID int64 `form:"flow_id" json:"flow_id" gorm:"column:flow_id"`
Btime libtime.Time `form:"btime" json:"btime" gorm:"column:btime"`
Etime libtime.Time `form:"etime" json:"etime" gorm:"column:etime"`
State int8 `form:"state" json:"state" gorm:"column:state"`
UID int64 `form:"uid" json:"uid" gorm:"column:uid"`
Uname string `form:"uname" json:"uname" gorm:"column:uname"`
Description string `form:"description" json:"description" gorm:"column:description"`
Ctime libtime.Time `form:"ctime" json:"ctime" gorm:"column:ctime"`
Mtime libtime.Time `form:"mtime" json:"mtime" gorm:"column:mtime"`
}
// TableName for orm
func (TaskConfig) TableName() string {
return "task_config"
}
// WeightOPT .
type WeightOPT struct {
BusinessID int64
FlowID int64
TopListLen int64
BatchListLen int64
RedisListLen int64
DbListLen int64
AssignLen int64
Minute int64
}

View File

@@ -0,0 +1,38 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"debug.go",
"http.go",
],
importpath = "go-common/app/job/main/aegis/server/http",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/service:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/http/blademaster/middleware/verify:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,15 @@
package http
import (
bm "go-common/library/net/http/blademaster"
)
func debugCache(c *bm.Context) {
opt := new(struct {
Keys string `form:"keys" validate:"required"`
})
if err := c.Bind(opt); err != nil {
return
}
c.JSONMap(srv.DebugCache(opt.Keys), nil)
}

View File

@@ -0,0 +1,54 @@
package http
import (
"net/http"
"go-common/app/job/main/aegis/conf"
"go-common/app/job/main/aegis/service"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/http/blademaster/middleware/verify"
)
var (
srv *service.Service
vfy *verify.Verify
)
// Init init
func Init(s *service.Service, c *conf.Config) {
srv = s
vfy = verify.New(c.Verify)
engine := bm.DefaultServer(c.BM)
route(engine)
if err := engine.Start(); err != nil {
log.Error("bm Start error(%v)", err)
panic(err)
}
}
func route(e *bm.Engine) {
e.Ping(ping)
e.Register(register)
g := e.Group("/x/aegis")
{
g.GET("/start", vfy.Verify, howToStart)
g.GET("/debug/cache", debugCache)
}
}
func ping(c *bm.Context) {
if err := srv.Ping(c); err != nil {
log.Error("ping error(%v)", err)
c.AbortWithStatus(http.StatusServiceUnavailable)
}
}
func register(c *bm.Context) {
c.JSON(map[string]interface{}{}, nil)
}
// example for http request handler
func howToStart(c *bm.Context) {
c.String(0, "Golang 大法好 !!!")
}

View File

@@ -0,0 +1,76 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"cache.go",
"databus.go",
"handler.go",
"handler_resource.go",
"handler_task.go",
"monitor.go",
"report.go",
"service.go",
"task_job.go",
"task_log.go",
"weight.go",
],
importpath = "go-common/app/job/main/aegis/service",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/dao:go_default_library",
"//app/job/main/aegis/dao/email:go_default_library",
"//app/job/main/aegis/dao/monitor:go_default_library",
"//app/job/main/aegis/model:go_default_library",
"//app/job/main/aegis/model/monitor:go_default_library",
"//app/service/main/account/api:go_default_library",
"//app/service/main/up/api/v1:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/queue/databus:go_default_library",
"//library/queue/databus/databusutil:go_default_library",
"//library/queue/databus/report:go_default_library",
"//library/xstr:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = [
"monitor_test.go",
"service_test.go",
],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = [
"//app/job/main/aegis/conf:go_default_library",
"//app/job/main/aegis/model/monitor:go_default_library",
"//app/service/main/account/api:go_default_library",
"//library/log:go_default_library",
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)

View File

@@ -0,0 +1,219 @@
package service
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
"go-common/library/xstr"
)
func (s *Service) initCache() {
s.newactiveBizFlow = make(map[string]struct{})
s.syncConfigCache(context.Background())
s.syncConsumerCache(context.Background())
s.oldactiveBizFlow = s.newactiveBizFlow
}
func (s *Service) cacheProc() {
for {
s.syncTaskCache()
time.Sleep(3 * time.Minute)
s.syncConfigCache(context.Background())
s.syncWeightWatch(context.Background())
s.syncConsumerCache(context.Background())
}
}
func (s *Service) syncTaskCache() {
var (
tasks []*model.Task
lastid = int64(0)
err error
)
// 1.停滞任务,10分钟未变化检查是否遗漏
lastid = 0
for {
mtime := time.Now().Add(-10 * time.Minute)
if tasks, lastid, err = s.dao.QueryTask(context.Background(), model.TaskStateDispatch, mtime, lastid, 1000); err != nil || len(tasks) == 0 {
break
}
for _, task := range tasks {
log.Info("检测到遗漏 停滞任务(%+v)", task)
s.dao.SetTask(context.Background(), task)
s.dao.PushPersonalTask(context.Background(), task.BusinessID, task.FlowID, task.UID, task.ID)
}
time.Sleep(time.Second)
}
// 2.延迟任务,半小时未变化,检查是否遗漏
lastid = 0
for {
mtime := time.Now().Add(-30 * time.Minute)
if tasks, lastid, err = s.dao.QueryTask(context.Background(), model.TaskStateDelay, mtime, lastid, 1000); err != nil || len(tasks) == 0 {
break
}
for _, task := range tasks {
log.Info("检测到遗漏 延迟任务(%+v)", task)
s.dao.SetTask(context.Background(), task)
s.dao.PushDelayTask(context.Background(), task.BusinessID, task.FlowID, task.UID, task.ID)
}
time.Sleep(time.Second)
}
// 3.实时任务,1小时未变化检查是否遗漏
lastid = 0
for {
mtime := time.Now().Add(-60 * time.Minute)
if tasks, lastid, err = s.dao.QueryTask(context.Background(), model.TaskStateInit, mtime, lastid, 1000); err != nil || len(tasks) == 0 {
break
}
for _, task := range tasks {
log.Info("检测到遗漏 实时任务(%+v)", task)
s.dao.SetTask(context.Background(), task)
}
s.dao.PushPublicTask(context.Background(), tasks...)
time.Sleep(time.Second)
}
}
func (s *Service) syncConfigCache(c context.Context) (err error) {
s.oldactiveBizFlow = s.newactiveBizFlow
configs, err := s.dao.TaskActiveConfigs(c)
if err != nil {
return
}
rangeWCCache := make(map[int64]map[string]*model.RangeWeightConfig)
equalWCCache := make(map[string][]*model.EqualWeightConfig)
assignCache := make(map[string][]*model.AssignConfig)
activeBizFlow := make(map[string]struct{})
for _, item := range configs {
key := fmt.Sprintf("%d-%d", item.BusinessID, item.FlowID)
activeBizFlow[key] = struct{}{}
switch item.ConfType {
case model.TaskConfigAssign:
assign := new(struct {
Mids string `json:"mids"`
Uids string `json:"uids"`
})
if err = json.Unmarshal([]byte(item.ConfJSON), assign); err != nil {
log.Error("json.Unmarshal error(%v)", err)
continue
}
ac := &model.AssignConfig{}
if item.UID > 0 {
ac.Admin = item.UID
} else {
ac.Admin = 399
}
assign.Mids = strings.TrimSpace(assign.Mids)
assign.Uids = strings.TrimSpace(assign.Uids)
if ac.Mids, err = xstr.SplitInts(assign.Mids); err != nil {
log.Error("xstr.SplitInts error(%v)", err)
continue
}
if ac.Uids, err = xstr.SplitInts(assign.Uids); err != nil {
log.Error("xstr.SplitInts error(%v)", err)
continue
}
if _, ok := assignCache[key]; ok {
assignCache[key] = append(assignCache[key], ac)
} else {
assignCache[key] = []*model.AssignConfig{ac}
}
case model.TaskConfigRangeWeight:
wcitem := &model.RangeWeightConfig{}
if err = json.Unmarshal([]byte(item.ConfJSON), wcitem); err != nil {
log.Error("json.Unmarshal error(%v)", err)
continue
}
if _, ok := rangeWCCache[item.BusinessID]; ok {
rangeWCCache[item.BusinessID][wcitem.Name] = wcitem
} else {
rangeWCCache[item.BusinessID] = map[string]*model.RangeWeightConfig{
wcitem.Name: wcitem,
}
}
case model.TaskConfigEqualWeight:
ewcitem := &model.EqualWeightConfig{}
if err = json.Unmarshal([]byte(item.ConfJSON), ewcitem); err != nil {
log.Error("json.Unmarshal error(%v)", err)
continue
}
ewcitem.Uname = item.Uname
ewcitem.Description = item.Description
ewcitem.IDs = strings.TrimSpace(ewcitem.IDs)
if _, ok := equalWCCache[key]; ok {
equalWCCache[key] = append(equalWCCache[key], ewcitem)
} else {
equalWCCache[key] = []*model.EqualWeightConfig{ewcitem}
}
}
}
s.rangeWeightCfg = rangeWCCache
s.equalWeightCfg = equalWCCache
s.assignConfig = assignCache
s.newactiveBizFlow = activeBizFlow
return
}
func (s *Service) syncWeightWatch(c context.Context) {
for key := range s.oldactiveBizFlow {
if _, ok := s.newactiveBizFlow[key]; !ok {
if wm, ok := s.wmHash[key]; ok {
wm.close = true
log.Info("关闭权重计算器 bizid(%d) flowid(%d)", wm.businessID, wm.flowID)
delete(s.wmHash, key)
}
}
}
for key := range s.newactiveBizFlow {
if _, ok := s.oldactiveBizFlow[key]; !ok {
bizid, _ := parseKey(key)
s.wmHash[key] = NewWeightManager(s, s.getWeightOpt(bizid), key)
}
}
}
func (s *Service) getWeightOpt(bizid int) *model.WeightOPT {
for _, item := range s.c.BizCfg.WeightOpt {
if item.BusinessID == int64(bizid) {
return item
}
}
return nil
}
func (s *Service) syncConsumerCache(c context.Context) (err error) {
s.ccMux.Lock()
defer s.ccMux.Unlock()
consumerCache, err := s.dao.TaskActiveConsumer(c)
if err != nil {
return
}
s.consumerCache = consumerCache
return
}
// getWeightCache .
func (s *Service) getWeightCache(c context.Context, businessid, flowid int64) (rwc map[string]*model.RangeWeightConfig, ewc []*model.EqualWeightConfig) {
key := fmt.Sprintf("%d-%d", businessid, flowid)
rwc = s.rangeWeightCfg[businessid]
ewc = s.equalWeightCfg[key]
return
}

View File

@@ -0,0 +1,322 @@
package service
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"time"
"go-common/app/job/main/aegis/model"
moniMdl "go-common/app/job/main/aegis/model/monitor"
"go-common/library/log"
"go-common/library/queue/databus"
)
var (
_taskTable = "task"
)
func (s *Service) taskconsumeproc() {
defer func() {
log.Warn("taskconsumeproc exited.")
s.wg.Done()
}()
var (
binLogMsgs = s.binLogDataBus.Messages()
)
for {
select {
case msg, ok := <-binLogMsgs:
if !ok {
log.Warn("binLogDataBus has been closed.")
return
}
log.Info("binLogDataBus key(%s) offset(%d) message(%s)",
msg.Key, msg.Offset, msg.Value)
s.handleBinLog(msg)
case rpi := <-s.chanReport:
s.reportResource(context.Background(), rpi.BizID, rpi.FlowID, rpi.RID, rpi.UID)
default:
time.Sleep(time.Second)
}
}
}
func (s *Service) archiveConsumeProc() {
defer func() {
log.Warn("archiveConsumeProc exited.")
s.wg.Done()
}()
var (
msgs = s.archiveDataBus.Messages()
)
for {
var (
msg *databus.Message
ok bool
err error
)
if msg, ok = <-msgs; !ok {
log.Error("s.archiveDataBus.Messages() closed.")
return
}
msg.Commit()
m := &model.BinLog{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if m.Table == "archive" {
s.handleArchiveBinlog(m)
} else if m.Table == "archive_video" {
s.handleVideoBinlog(m)
}
}
}
func (s *Service) handleArchiveBinlog(m *model.BinLog) {
var (
err error
)
na := &moniMdl.BinlogArchive{}
oa := &moniMdl.BinlogArchive{}
if err = json.Unmarshal(m.New, na); err != nil {
log.Error("json.Unmarshal(%s,%+v) error(%v)", m.New, na, err)
return
}
if err = json.Unmarshal(m.New, oa); err != nil {
log.Error("json.Unmarshal(%s,%+v) error(%v)", m.New, oa, err)
return
}
s.monitorArchive(m.Action, na, oa)
}
func (s *Service) handleVideoBinlog(m *model.BinLog) {
var (
err error
)
nv := &moniMdl.BinlogVideo{}
ov := &moniMdl.BinlogVideo{}
if err = json.Unmarshal(m.New, nv); err != nil {
log.Error("json.Unmarshal(%s,%+v) error(%v)", m.New, nv, err)
return
}
if err = json.Unmarshal(m.New, ov); err != nil {
log.Error("json.Unmarshal(%s,%+v) error(%v)", m.New, ov, err)
return
}
s.monitorVideo(m.Action, nv, ov)
}
func (s *Service) handleBinLog(msg *databus.Message) {
defer msg.Commit()
bmsg := new(model.BinLog)
if err := json.Unmarshal(msg.Value, bmsg); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", string(msg.Value), err)
return
}
if bmsg.Table == _taskTable {
old := new(model.Task)
new := new(model.Task)
if err := json.Unmarshal(bmsg.New, new); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", string(bmsg.New), err)
return
}
if bmsg.Action == "update" {
if err := json.Unmarshal(bmsg.Old, old); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", string(bmsg.New), err)
return
}
}
s.handleBinLogMsg(context.Background(), bmsg.Action, old, new)
}
// use specify goroutine to merge messages
log.Info("handleBinlog table:%s key:%s partition:%d offset:%d", bmsg.Table, msg.Key, msg.Partition, msg.Offset)
}
//各种状态简写
const (
INT = model.TaskStateInit
DSP = model.TaskStateDispatch
DEY = model.TaskStateDelay
SUB = model.TaskStateSubmit
RSB = model.TaskStateRscSb
CLO = model.TaskStateClosed
LTD = model.LogTypeTaskDispatch
)
func (s *Service) handleRelease(c context.Context, old, new *model.Task) {
s.dao.RemovePersonalTask(c, old.BusinessID, old.FlowID, old.UID, old.ID)
s.dao.PushPublicTask(c, new)
s.sendTaskLog(c, new, LTD, "release", new.UID, "")
s.dao.IncresByField(c, old.BusinessID, old.FlowID, old.UID, model.Release, 1)
}
func (s *Service) handleDisptach(c context.Context, old, new *model.Task) {
//这里不做缓存同步,顺序会乱
s.sendTaskLog(c, new, LTD, "dispatch", new.UID, "")
s.dao.IncresByField(c, new.BusinessID, new.FlowID, new.UID, model.Dispatch, 1)
}
func (s *Service) handleDelay(c context.Context, old, new *model.Task) {
//这里不做缓存同步,顺序会乱
s.sendTaskLog(c, new, LTD, "delay", new.UID, "")
s.dao.IncresByField(c, new.BusinessID, new.FlowID, new.UID, model.Delay, 1)
}
/*
数据统计时,容易产生误差的几种数据
1. 任务被a领取被b在资源列表提交
2. 任务被a延迟被b在资源列表 或者 延迟列表提交
*/
func (s *Service) handleSubmit(c context.Context, old, new *model.Task) {
switch old.State {
case INT: // 未分配直接提交,资源列表里操作
s.dao.RemovePublicTask(c, old.BusinessID, old.FlowID, old.ID)
case DSP: // 领取后提交,也可能是资源列表操作
s.dao.RemovePersonalTask(c, old.BusinessID, old.FlowID, old.UID, old.ID)
case DEY: // 延迟列表提交,也可能是资源列表操作
s.dao.RemoveDelayTask(c, old.BusinessID, old.FlowID, old.UID, old.ID)
default: // 其他未知情况
log.Error("handleSubmit UNEXPECTED old(%+v) new(%v)", old, new)
}
switch new.State {
case SUB:
s.sendTaskLog(c, new, LTD, "tasksubmit", new.UID, "")
case RSB:
s.sendTaskLog(c, new, LTD, "rscsubmit", new.UID, "")
case CLO:
s.sendTaskLog(c, new, LTD, "close", new.UID, "")
}
s.reportSubmit(c, old, new)
}
func (s *Service) handleCreate(c context.Context, new *model.Task) {
s.dao.PushPublicTask(c, new)
s.sendTaskLog(c, new, LTD, "create", 399, "aegis-job")
s.reportTaskCreate(c, new)
}
func (s *Service) handleBinLogMsg(c context.Context, act string, old, new *model.Task) {
log.Info("handleTaskBinlog act(%s) old(%+v) new(%+v)", act, old, new)
s.dao.SetTask(c, new)
if act == "insert" {
s.handleCreate(c, new)
}
if act == "update" {
switch {
case old.State != new.State: //状态变更
switch new.State {
case INT: // 初始
switch old.State {
case DSP: //释放
s.handleRelease(c, old, new)
default: //其他情况
s.dao.PushPublicTask(c, new)
log.Error("handleTaskBinlog UNEXPECTED INT old(%+v) new(%+v)", old, new)
}
case DSP: // 领取
switch old.State {
case INT:
s.handleDisptach(c, old, new)
default:
log.Error("handleTaskBinlog UNEXPECTED DSP old(%+v) new(%+v)", old, new)
}
case DEY: // 延迟
switch old.State {
case DSP:
s.handleDelay(c, old, new)
default:
log.Error("handleTaskBinlog UNEXPECTED DEY old(%+v) new(%+v)", old, new)
}
case SUB, RSB, CLO: // 提交,关闭
s.handleSubmit(c, old, new)
}
case old.AdminID != new.AdminID: //指派变更
default:
log.Info("其他变更 old(%+v)->new(%+v)", old, new)
}
}
}
func (s *Service) setAssign(c context.Context, task *model.Task) bool {
log.Info("指派判断 setAssign(%+v)", task)
auids := s.hitAssignUids(c, task)
log.Info("指派判断 hitAssignUids(%v)", auids)
if len(auids) == 0 {
return false
}
log.Info("task(%d) 命中指派配置 (%v)", task.ID, auids)
var huids []int64
for auid, uids := range auids {
task.AdminID = auid
huids = s.hitActiveUids(c, task, uids)
length := len(huids)
if length != 0 {
break
}
}
log.Info("task(%d) 指派在线 (%v)", task.ID, huids)
length := len(huids)
if length == 0 {
return false
}
if length == 1 {
task.UID = huids[0]
} else {
// 随机数选一个
task.UID = huids[rand.Intn(length)]
}
log.Info("task(%d) admin(%d) 指派成功 (%d)", task.ID, task.AdminID, task.UID)
return true
}
func (s *Service) hitAssignUids(c context.Context, task *model.Task) (uids map[int64][]int64) {
key := fmt.Sprintf("%d-%d", task.BusinessID, task.FlowID)
uids = make(map[int64][]int64)
if assignC, ok := s.assignConfig[key]; ok {
for _, item := range assignC {
log.Info("指派判断 task(%+v) item(%+v)", task, item)
for _, mid := range item.Mids {
if mid == task.MID {
if aus, ok := uids[item.Admin]; ok {
uids[item.Admin] = append(aus, item.Uids...)
} else {
uids[item.Admin] = item.Uids
}
}
}
}
}
return
}
func (s *Service) hitActiveUids(c context.Context, task *model.Task, uids []int64) (hitid []int64) {
s.ccMux.RLock()
defer s.ccMux.RUnlock()
key := fmt.Sprintf("%d-%d", task.BusinessID, task.FlowID)
if uidCache, ok := s.consumerCache[key]; ok {
for _, uid := range uids {
if _, ok := uidCache[uid]; ok {
if on, _ := s.dao.IsConsumerOn(c, int(task.BusinessID), int(task.FlowID), uid); on {
hitid = append(hitid, uid)
}
}
}
}
return
}

View File

@@ -0,0 +1,373 @@
package service
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"sync"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
"go-common/library/queue/databus"
pkgerr "github.com/pkg/errors"
)
//RscHandler .
type RscHandler interface {
CheckMessage(json.RawMessage) (interface{}, error)
HandleMessage(context.Context, interface{}) error
}
//TaskHandler .
type TaskHandler interface {
CheckMessage(*databus.Message) (interface{}, error)
HandleMessage(context.Context, interface{}) error
}
var (
_ TaskHandler = baseTaskHandler{}
_ TaskHandler = dynamicTaskHandler{}
_ RscHandler = baseResourceAddHandler{}
_ RscHandler = mangaResourceAddHandler{}
_ RscHandler = baseResourceUpdateHandler{}
_ RscHandler = baseResourceCancelHandler{}
)
//单例
var (
basehandleTask *baseTaskHandler
basehandleRscAdd *baseResourceAddHandler
basehandleRscUpdate *baseResourceUpdateHandler
basehandleRscCancel *baseResourceCancelHandler
dynamicHandleTask *dynamicTaskHandler
mangaHandelRscAdd *mangaResourceAddHandler
once sync.Once
)
//ERROR
var (
ErrTaskDuplicate = errors.New("重复任务")
ErrTaskFlowInvalid = errors.New("流程失效")
ErrTaskResourceInvalid = errors.New("资源失效")
ErrInvalidMsg = errors.New("无效消息")
ErrHandlerMiss = errors.New("handler NotFound")
)
//prefix
var (
_prefixTask = "task_"
_prefixRscAdd = "add_"
_prefixRscUpdate = "update_"
_prefixRscCancel = "cancel_"
)
//业务ID
var (
_bizidDynamic = 1
_bizidManga = 2
)
func (s *Service) registerRscHandler(key string, handler RscHandler) {
s.rschandle[key] = handler
}
func (s *Service) registerTaskHandler(key string, handler TaskHandler) {
s.taskhandle[key] = handler
}
func (s *Service) findTaskHandler(key string) TaskHandler {
if handler, ok := s.taskhandle[key]; ok {
return handler
}
log.Warn("key(%s)没找到任务的处理器根据类型使用默认handler", key)
return s.getdynamicTaskHandler()
}
func (s *Service) findRscHandler(key string) RscHandler {
if handler, ok := s.rschandle[key]; ok {
return handler
}
log.Warn("key(%s)没找到业务的处理器根据类型使用默认handler", key)
switch {
case strings.HasPrefix(key, _prefixRscAdd):
return s.getbaseResourceAddHandler()
case strings.HasPrefix(key, _prefixRscUpdate):
return s.getbaseResourceUpdateHandler()
case strings.HasPrefix(key, _prefixRscCancel):
return s.getbaseResourceCancelHandler()
default:
return nil
}
}
//TODO 先写死吧,之后可以根据配置里面的类名用反射实例化
func initHandler(s *Service) {
var (
dynamicTask = fmt.Sprintf("%s%d", _prefixTask, _bizidDynamic)
dynamicRscAdd = fmt.Sprintf("%s%d", _prefixRscAdd, _bizidDynamic)
dynamicRscUpdate = fmt.Sprintf("%s%d", _prefixRscUpdate, _bizidDynamic)
dynamicRscCancel = fmt.Sprintf("%s%d", _prefixRscCancel, _bizidDynamic)
managaTask = fmt.Sprintf("%s%d", _prefixTask, _bizidManga)
managaRscAdd = fmt.Sprintf("%s%d", _prefixRscAdd, _bizidManga)
managaRscUpdate = fmt.Sprintf("%s%d", _prefixRscUpdate, _bizidManga)
managaRscCancel = fmt.Sprintf("%s%d", _prefixRscCancel, _bizidManga)
)
s.rschandle = make(map[string]RscHandler)
s.taskhandle = make(map[string]TaskHandler)
once.Do(func() {
basehandleTask = &baseTaskHandler{Service: s}
basehandleRscAdd = &baseResourceAddHandler{Service: s}
basehandleRscUpdate = &baseResourceUpdateHandler{Service: s}
basehandleRscCancel = &baseResourceCancelHandler{Service: s}
dynamicHandleTask = &dynamicTaskHandler{baseTaskHandler: baseTaskHandler{Service: s}}
mangaHandelRscAdd = &mangaResourceAddHandler{baseResourceAddHandler: baseResourceAddHandler{Service: s}}
})
s.registerRscHandler(dynamicRscAdd, s.getbaseResourceAddHandler())
s.registerRscHandler(dynamicRscUpdate, s.getbaseResourceUpdateHandler())
s.registerRscHandler(dynamicRscCancel, s.getbaseResourceCancelHandler())
s.registerRscHandler(managaRscAdd, s.getmangaResourceAddHandler())
s.registerRscHandler(managaRscUpdate, s.getbaseResourceUpdateHandler())
s.registerRscHandler(managaRscCancel, s.getbaseResourceCancelHandler())
s.registerTaskHandler(managaTask, s.getbaseTaskHandler())
s.registerTaskHandler(dynamicTask, s.getdynamicTaskHandler())
}
func (s *Service) getbaseTaskHandler() *baseTaskHandler {
return basehandleTask
}
func (s *Service) getbaseResourceAddHandler() *baseResourceAddHandler {
return basehandleRscAdd
}
func (s *Service) getbaseResourceUpdateHandler() *baseResourceUpdateHandler {
return basehandleRscUpdate
}
func (s *Service) getbaseResourceCancelHandler() *baseResourceCancelHandler {
return basehandleRscCancel
}
func (s *Service) getdynamicTaskHandler() *dynamicTaskHandler {
return dynamicHandleTask
}
func (s *Service) getmangaResourceAddHandler() *mangaResourceAddHandler {
return mangaHandelRscAdd
}
//解析验证message
/*
TODO
根据DispatchLimit动态设置分发数量
*/
func (s *Service) checkTaskMsg(msg *databus.Message) (*model.Task, error) {
taskMsg := new(model.CreateTaskMsg)
if err := json.Unmarshal(msg.Value, taskMsg); err != nil {
log.Error("checkTaskMsg key(%s) value(%s)", msg.Key, string(msg.Value))
return nil, err
}
if taskMsg.DispatchLimit == 0 || taskMsg.FlowID == 0 || taskMsg.RID == 0 {
log.Error("checkTaskMsg key(%s) value(%s)", msg.Key, string(msg.Value))
return nil, ErrTaskResourceInvalid
}
if s.dao.CheckTask(context.Background(), taskMsg.FlowID, taskMsg.RID) > 0 {
return nil, ErrTaskDuplicate
}
ok, err := s.dao.CheckFlow(context.TODO(), taskMsg.RID, taskMsg.FlowID)
if !ok || err != nil {
return nil, ErrTaskFlowInvalid
}
//先兼容旧的task消息没有传bizid
if taskMsg.BizID == 0 {
res, err := s.dao.Resource(context.Background(), taskMsg.RID)
if err != nil || res == nil {
return nil, ErrTaskResourceInvalid
}
taskMsg.BizID = res.BusinessID
}
return &model.Task{
BusinessID: taskMsg.BizID,
FlowID: taskMsg.FlowID,
RID: taskMsg.RID,
}, nil
}
func (s *Service) writeTaskToDB(c context.Context, task *model.Task) error {
return s.dao.CreateTask(c, task)
}
func (s *Service) checkRscAddMsg(msg json.RawMessage) (*model.AddOption, error) {
addMsg := new(model.AddOption)
if err := json.Unmarshal(msg, addMsg); err != nil {
return nil, err
}
if addMsg.BusinessID == 0 || len(addMsg.OID) == 0 {
return nil, ErrInvalidMsg
}
return addMsg, nil
}
func (s *Service) writeRscAdd(c context.Context, opt *model.AddOption) error {
//TODO 根据错误号重试
return s.dao.RscAdd(c, opt)
}
func (s *Service) checkRscUpdateMsg(msg json.RawMessage) (*model.UpdateOption, error) {
updateMsg := new(model.UpdateOption)
if err := json.Unmarshal(msg, updateMsg); err != nil {
return nil, err
}
if updateMsg.BusinessID == 0 || len(updateMsg.OID) == 0 || len(updateMsg.Update) == 0 {
return nil, ErrInvalidMsg
}
return updateMsg, nil
}
func (s *Service) writeRscUpdate(c context.Context, opt *model.UpdateOption) error {
return s.dao.RscUpdate(c, opt)
}
func (s *Service) checkRscCancelMsg(msg json.RawMessage) (*model.CancelOption, error) {
cancelMsg := new(model.CancelOption)
if err := json.Unmarshal(msg, cancelMsg); err != nil {
return nil, err
}
if cancelMsg.BusinessID == 0 || len(cancelMsg.Oids) == 0 {
return nil, ErrInvalidMsg
}
return cancelMsg, nil
}
func (s *Service) writeRscCancel(c context.Context, opt *model.CancelOption) error {
return s.dao.RscCancel(c, opt)
}
func (s *Service) newrsc(msg *databus.Message) (interface{}, error) {
log.Info("databusgroup new msg key(%+v) partition(%d) offset(%d) value(%s) ", msg.Key, msg.Partition, msg.Offset, string(msg.Value))
rscmsg := new(model.RscMsg)
if err := json.Unmarshal(msg.Value, rscmsg); err != nil {
log.Error("databusgroup json.Unmarshal for msg(%+v)", string(msg.Value))
return nil, ErrInvalidMsg
}
key := fmt.Sprintf("%s_%d", rscmsg.Action, rscmsg.BizID)
handler := s.findRscHandler(key)
if handler == nil {
log.Error("databusgroup can not find handler for msg key(%+v)", key)
return nil, ErrHandlerMiss
}
data, err := handler.CheckMessage(rscmsg.Raw)
if err != nil {
log.Error("databusgroup new msg key(%+v) partition(%d) offset(%d) value(%s) CheckMessage(%v)", msg.Key, msg.Partition, msg.Offset, string(msg.Value), pkgerr.WithStack(err))
}
return data, err
}
func (s *Service) splitrsc(msg *databus.Message, data interface{}) int {
switch t := data.(type) {
case *model.AddOption:
return int(t.BusinessID)
case *model.UpdateOption:
return int(t.BusinessID)
case *model.CancelOption:
return int(t.BusinessID)
default:
return 0
}
}
func (s *Service) dorsc(bmsgs []interface{}) {
for _, msg := range bmsgs {
log.Info("databusgroup do msg(%+v)", msg)
var key string
switch t := msg.(type) {
case *model.AddOption:
key = fmt.Sprintf("%s%d", _prefixRscAdd, t.BusinessID)
case *model.UpdateOption:
key = fmt.Sprintf("%s%d", _prefixRscUpdate, t.BusinessID)
case *model.CancelOption:
key = fmt.Sprintf("%s%d", _prefixRscCancel, t.BusinessID)
default:
log.Error("databusgroup unknow msg(%+v)", msg)
continue
}
handler := s.findRscHandler(key)
if handler == nil {
log.Error("databusgroup msg(%+v) handler NotFound", msg)
continue
}
if err := handler.HandleMessage(context.Background(), msg); err != nil {
log.Error("databusgroup msg(%+v) handler err(%v)", msg, pkgerr.WithStack(err))
continue
}
}
}
func (s *Service) newtask(msg *databus.Message) (interface{}, error) {
log.Info("databusgroup newtask msg key(%+v) partition(%d) offset(%d) value(%s) ", msg.Key, msg.Partition, msg.Offset, string(msg.Value))
taskmsg := new(model.CreateTaskMsg)
if err := json.Unmarshal(msg.Value, taskmsg); err != nil {
log.Error("databusgroup newtask json.Unmarshal for msg(%+v)", string(msg.Value))
return nil, ErrInvalidMsg
}
key := fmt.Sprintf("%s%d", _prefixTask, taskmsg.BizID)
handler := s.findTaskHandler(key)
if handler == nil {
log.Error("databusgroup can not find handler for msg key(%+v)", key)
return nil, ErrHandlerMiss
}
data, err := handler.CheckMessage(msg)
if err != nil {
errmsg := fmt.Sprintf("databusgroup new msg key(%+v) partition(%d) offset(%d) value(%s) CheckMessage(%v)", msg.Key, msg.Partition, msg.Offset, string(msg.Value), pkgerr.WithStack(err))
if err == ErrTaskDuplicate {
log.Warn(errmsg)
} else {
log.Error(errmsg)
}
}
return data, err
}
func (s *Service) splittask(msg *databus.Message, data interface{}) int {
if t, ok := data.(*model.Task); ok {
return int(t.BusinessID)
}
return 0
}
func (s *Service) dotask(bmsgs []interface{}) {
for _, msg := range bmsgs {
log.Info("databusgroup dotask msg(%+v)", msg)
var key string
if t, ok := msg.(*model.Task); ok {
key = fmt.Sprintf("%s%d", _prefixTask, t.BusinessID)
} else {
log.Error("databusgroup dotask unknow msg(%+v)", msg)
continue
}
handler := s.findTaskHandler(key)
if handler == nil {
log.Error("databusgroup dotask msg(%+v) handler NotFound", msg)
continue
}
if err := handler.HandleMessage(context.Background(), msg); err != nil {
log.Error("databusgroup dotask msg(%+v) handler err(%v)", msg, pkgerr.WithStack(err))
continue
}
}
}

View File

@@ -0,0 +1,76 @@
package service
import (
"context"
"encoding/json"
"errors"
"fmt"
"go-common/app/job/main/aegis/model"
)
//ERROR
var (
ErrMangaNoIndex = errors.New("漫画无图")
)
type baseResourceAddHandler struct {
*Service
}
type mangaResourceAddHandler struct {
baseResourceAddHandler
}
type baseResourceUpdateHandler struct {
*Service
}
type baseResourceCancelHandler struct {
*Service
}
func (h baseResourceAddHandler) CheckMessage(msg json.RawMessage) (addObj interface{}, err error) {
return h.checkRscAddMsg(msg)
}
func (h baseResourceAddHandler) HandleMessage(c context.Context, addObj interface{}) error {
return h.writeRscAdd(c, addObj.(*model.AddOption))
}
//漫画的,校验是否有图
func (h mangaResourceAddHandler) CheckMessage(msg json.RawMessage) (addObj interface{}, err error) {
if addObj, err = h.baseResourceAddHandler.CheckMessage(msg); err != nil {
return
}
addopt := addObj.(*model.AddOption)
metas := make(map[string]interface{})
if err = json.Unmarshal([]byte(addopt.MetaData), &metas); err != nil {
return
}
if index, ok := metas["index"]; !ok || len(fmt.Sprint(index)) == 0 {
return nil, ErrMangaNoIndex
}
return
}
func (h mangaResourceAddHandler) HandleMessage(c context.Context, addObj interface{}) error {
return h.baseResourceAddHandler.HandleMessage(c, addObj.(*model.AddOption))
}
func (h baseResourceUpdateHandler) CheckMessage(msg json.RawMessage) (updateObj interface{}, err error) {
return h.checkRscUpdateMsg(msg)
}
func (h baseResourceUpdateHandler) HandleMessage(c context.Context, updateObj interface{}) error {
return h.writeRscUpdate(c, updateObj.(*model.UpdateOption))
}
func (h baseResourceCancelHandler) CheckMessage(msg json.RawMessage) (cancelObj interface{}, err error) {
return h.checkRscCancelMsg(msg)
}
func (h baseResourceCancelHandler) HandleMessage(c context.Context, cancelObj interface{}) error {
return h.writeRscCancel(c, cancelObj.(*model.CancelOption))
}

View File

@@ -0,0 +1,54 @@
package service
import (
"context"
"go-common/app/job/main/aegis/model"
"go-common/library/queue/databus"
"go-common/library/xstr"
)
type baseTaskHandler struct {
*Service
}
type dynamicTaskHandler struct {
baseTaskHandler
}
func (h baseTaskHandler) CheckMessage(msg *databus.Message) (taskObj interface{}, err error) {
return h.checkTaskMsg(msg)
}
func (h baseTaskHandler) HandleMessage(c context.Context, taskObj interface{}) error {
return h.writeTaskToDB(c, taskObj.(*model.Task))
}
func (h dynamicTaskHandler) CheckMessage(msg *databus.Message) (taskObj interface{}, err error) {
var c = context.Background()
if taskObj, err = h.baseTaskHandler.CheckMessage(msg); err != nil {
return
}
//补充mid相关信息
task := taskObj.(*model.Task)
res, err := h.dao.Resource(c, task.RID)
if err != nil || res == nil {
return nil, ErrTaskResourceInvalid
}
task.MID = res.MID
if task.MID > 0 {
groupids, _ := h.dao.UpSpecial(c, task.MID)
task.Group = xstr.JoinInts(groupids)
task.Fans, _ = h.dao.FansCount(c, task.MID)
}
taskObj = task
return
}
func (h dynamicTaskHandler) HandleMessage(c context.Context, obj interface{}) error {
return h.baseTaskHandler.HandleMessage(c, obj.(*model.Task))
}

View File

@@ -0,0 +1,639 @@
package service
import (
"context"
"errors"
"fmt"
moniMdl "go-common/app/job/main/aegis/model/monitor"
accApi "go-common/app/service/main/account/api"
upApi "go-common/app/service/main/up/api/v1"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/xstr"
"math"
"reflect"
"strconv"
"strings"
"time"
)
// monitorArchive 稿件业务监控
// 注意oa有可能是nil使用前必须判断
func (s *Service) monitorArchive(act string, oa, na *moniMdl.BinlogArchive) (errs []error) {
var (
c = context.TODO()
logs []string
err error
errs2 []error
nAddit *moniMdl.ArchiveAddit
)
defer func() {
logStr := strings.Join(logs, "\n")
if x := recover(); x != nil {
log.Error("s.monitorArchive() unknown panic(%v)", x)
} else if len(errs) > 0 {
log.Error("s.monitorArchive(\n act: %s \n oa: %+v \n na: %+v) \n logStr:\n %v \n error:%v", act, oa, na, logStr, errs)
} else {
log.Info("s.monitorArchive(\n act: %s \n oa: %+v \n na: %+v) \n logStr:\n %v", act, oa, na, logStr)
}
}()
if (na.Attr>>moniMdl.ArchiveBitPGC)&int64(1) == 1 {
logs = append(logs, "忽略PGC稿件")
return
}
if na == nil {
err = errors.New("new msg nil")
errs = append(errs, err)
logs = append(logs, "databus数据异常new msg nil")
return
}
na.IsSpecTID = moniMdl.SpecialTypeIDs[na.TypeID]
if nAddit, err = s.moniDao.ArchiveAttr(c, na.ID); err != nil {
logs = append(logs, fmt.Sprintf("warn:稿件Addit获取失败aid:%d error:%v", na.ID, err))
if err != ecode.NothingFound {
errs = append(errs, err)
return
}
err = nil
} else {
na.Addit = nAddit
}
errs2 = s.monitorHandle(moniMdl.BusArc, *na, na.ID)
errs = append(errs, errs2...)
return
}
// monitorUpDelArc 监控UP主删除稿件。监控指定的UP主。1、在高能联盟的up主特殊用户组2、粉丝数超过50w
func (s *Service) monitorUpDelArc(id int64, obj interface{}) (satisfy bool, logs []string, err error) {
var (
c = context.TODO()
pReply *accApi.ProfileStatReply
upReply *upApi.HighAllyUpsReply
a *moniMdl.BinlogArchive
)
logs = append(logs, "s.monitorUpDelArc() begin")
if obj == nil {
logs = append(logs, "\t obj是nil")
err = errors.New("obj is nil")
return
}
switch obj.(type) {
case *moniMdl.BinlogArchive:
a = obj.(*moniMdl.BinlogArchive)
case moniMdl.BinlogArchive:
ac := obj.(moniMdl.BinlogArchive)
a = &ac
default:
logs = append(logs, fmt.Sprintf("\t 未知类型:%+v", obj))
err = errors.New("unknown interface type")
return
}
logs = append(logs, fmt.Sprintf("\t archive:%+v", a))
if a == nil {
err = errors.New("archive is nil")
return
}
if a.State != moniMdl.ArchiveStateDel {
logs = append(logs, "\t 非删除,忽略")
return
}
if a.Copyright != moniMdl.ArchiveOriginal {
logs = append(logs, "\t 非自制,忽略")
return
}
if err = s.moniDao.AddToDelArc(c, a); err != nil {
logs = append(logs, fmt.Sprintf("\t 添加删除信息到redis失败。error:%v", err))
err = nil
}
if id == moniMdl.RuleHighUpDelArc {
if upReply, err = s.up.GetHighAllyUps(c, &upApi.HighAllyUpsReq{Mids: []int64{a.MID}}); err != nil {
logs = append(logs, fmt.Sprintf("\t 获取UP主高能信息失败。error:%v", err))
log.Error("\t s.monitorUpDelArc() s.up.GetHighAllyUps() error:%v", err)
}
logs = append(logs, fmt.Sprintf("\t 用户信息:%+v", upReply))
if upReply != nil {
if _, ok := upReply.Lists[a.MID]; ok {
logs = append(logs, "\t UP主属于高能联盟")
satisfy = true
return
}
}
} else if id == moniMdl.RuleFamUpDelArc {
if pReply, err = s.acc.ProfileWithStat3(c, &accApi.MidReq{Mid: a.MID}); err != nil {
logs = append(logs, fmt.Sprintf("\t 获取UP主信息失败。error:%v", err))
log.Error("\t s.monitorUpDelArc() s.acc.ProfileWithStat3() error:%v", err)
}
logs = append(logs, fmt.Sprintf("\t 用户信息:%+v", pReply))
if pReply != nil && pReply.Follower >= 500000 {
logs = append(logs, "\t UP主属于大UP主")
satisfy = true
return
}
}
return
}
// monitorVideo 视频监控
func (s *Service) monitorVideo(act string, ov, nv *moniMdl.BinlogVideo) (errs []error) {
errs = s.monitorHandle(moniMdl.BusVideo, nv, nv.ID)
return
}
// monitorHandle 处理监控数据
func (s *Service) monitorHandle(bid int64, nObj interface{}, oid int64) (errs []error) {
var (
c = context.TODO()
rules []*moniMdl.Rule
logs, logs2 []string
err error
errs2 []error
oKeys, nKeys []string
)
defer func() {
logStr := strings.Join(logs, "\n")
if x := recover(); x != nil {
log.Error("s.monitorHandle() unknown panic(%v)", x)
} else if len(errs) > 0 {
log.Error("s.monitorHandle(\n na: %+v) \n logStr:\n %v \n error:%v", nObj, logStr, errs)
} else {
log.Info("s.monitorHandle(\n na: %+v) \n logStr:\n %v", nObj, logStr)
}
}()
if nObj == nil {
err = errors.New("new msg nil")
errs = append(errs, err)
logs = append(logs, "databus数据异常new msg nil")
return
}
if rules, err = s.moniDao.RulesByBid(c, bid); err != nil {
logs = append(logs, "获取监控配置失败")
return
}
if len(rules) == 0 {
logs = append(logs, "监控配置不存在")
return
}
for _, rule := range rules {
var allSatisfy = true
//如果是监控UP主大量删稿则特殊处理
if rule.ID == moniMdl.RuleHighUpDelArc || rule.ID == moniMdl.RuleFamUpDelArc {
if allSatisfy, logs2, err = s.monitorUpDelArc(rule.ID, nObj); err != nil {
errs = append(errs, err)
}
logs = append(logs, logs2...)
} else {
for field, cdt := range rule.RuleConf.MoniCdt {
var (
val int64
satisfy bool
)
if val, err = s.reflectIntVal(nObj, field, 0); err != nil {
errs = append(errs, err)
logs = append(logs, fmt.Sprintf("没有找到字段%s", field))
}
if satisfy, err = s.monitorCompSatisfy(cdt.Comp, val); err != nil {
allSatisfy = false
break
}
if !satisfy {
allSatisfy = false
break
}
}
}
if allSatisfy { //如果满足所有条件,则移入监控
nKeys = append(nKeys, fmt.Sprintf(moniMdl.RedisPrefix, rule.ID))
} else { //如果有条件不满足,则移出监控
oKeys = append(oKeys, fmt.Sprintf(moniMdl.RedisPrefix, rule.ID))
}
}
logs = append(logs, fmt.Sprintf("%d移出keys%v", oid, oKeys))
logs = append(logs, fmt.Sprintf("%d移入keys%v", oid, nKeys))
logs2, errs2 = s.monitorSave(oKeys, nKeys, oid)
logs = append(logs, logs2...)
if len(errs2) != 0 {
errs = append(errs, errs2...)
}
return
}
// monitorSave 保存结果
func (s *Service) monitorSave(oKeys, nKeys []string, oid int64) (logs []string, errs []error) {
var (
c = context.TODO()
logs2 []string
err error
)
defer func() {
logStr := strings.Join(logs, "\n")
if x := recover(); x != nil {
log.Error("s.monitorSave() unknown panic(%v)", x)
} else if len(errs) != 0 {
log.Error("s.monitorSave(\n oKeys: %v \n nKeys: %v \n oid: %d) \n logStr:\n %v \n error:%v", oKeys, nKeys, oid, logStr, errs)
} else {
log.Info("s.monitorSave(\n oKeys: %v \n nKeys: %v \n oid: %d) \n logStr:\n %v", oKeys, nKeys, oid, logStr)
}
}()
//从旧key中移出
logs2, err = s.moniDao.RemFromSet(c, oKeys, oid)
logs = append(logs, logs2...)
if err != nil {
errs = append(errs, err)
}
//清除过期的旧key
logs2, err = s.moniDao.ClearExpireSet(c, oKeys)
logs = append(logs, logs2...)
if err != nil {
errs = append(errs, err)
}
//移入到新key
logs2, err = s.moniDao.AddToSet(c, nKeys, oid)
logs = append(logs, logs2...)
if err != nil {
errs = append(errs, err)
}
//清除过期的移入key
logs2, err = s.moniDao.ClearExpireSet(c, nKeys)
logs = append(logs, logs2...)
if err != nil {
errs = append(errs, err)
}
return
}
// monitorNotify 监控报警
func (s *Service) monitorNotify() {
defer func() {
log.Warn("monitorNotify exited.")
}()
var (
c = context.TODO()
err error
rules []*moniMdl.Rule
stats *moniMdl.Stats
min, max int64
)
for {
log.Info("s.monitorNotify() begin")
if rules, err = s.moniDao.ValidRules(c); err != nil {
log.Error("s.monitorNotify() rules:%+v error:%v", rules, err)
time.Sleep(1 * time.Minute)
continue
}
for _, rule := range rules {
if rule.ID == moniMdl.RuleHighUpDelArc || rule.ID == moniMdl.RuleFamUpDelArc {
// 删稿监控特殊处理
s.wg.Add(1)
go s.moniUpDelArcNotify(rule)
continue
}
if min, max, err = s.monitorNotifyTime(rule.RuleConf); err != nil {
log.Error("s.monitorNotify() s.monitorNotifyTime() rule:%+v error:%v", rule, err)
continue
}
if stats, err = s.moniDao.MoniRuleStats(c, rule.ID, min, max); err != nil {
log.Error("s.monitorNotify() s.moniDao.MoniRuleStats(%d,%d,%d) error:%v", rule.ID, min, max, err)
continue
}
notify := s.moniSatisfyNotify(rule.RuleConf, stats)
if notify {
title := fmt.Sprintf("%s监控(aegis)", rule.RuleConf.Name)
body := fmt.Sprintf("当前滞留时间为%s超过阀值滞留量为%d整体量为%d \n报警时间%s", secondsFormat(stats.MaxTime), stats.MoniCount, stats.TotalCount, time.Now().Format("2006-01-02 15:04:05"))
url := ""
switch rule.BID {
case moniMdl.BusVideo:
url = fmt.Sprintf("http://manager.bilibili.co/#!/video/list?monitor_list=%d_%d_%d", rule.Type, rule.BID, rule.ID)
case moniMdl.BusArc:
url = fmt.Sprintf("http://manager.bilibili.co/#!/archive_utils/all?monitor_list=%d_%d_%d", rule.Type, rule.BID, rule.ID)
}
body += fmt.Sprintf("\n跳转链接<a href=\"%s\">点击跳转</a> %s", url, url)
if err = s.monitorSendNotify(c, rule.RuleConf.Notify.Way, rule.RuleConf.Notify.Member, title, body); err != nil {
log.Error("s.monitorNotify() s.monitorSendNotify(%d,%v,%s,%s) error:%v", rule.RuleConf.Notify.Way, rule.RuleConf.Notify.Member, title, body, err)
}
}
}
time.Sleep(30 * time.Minute)
}
}
// moniSatisfyNotify 检查监控是否满足报警,目前只有数量+时长的条件
func (s *Service) moniSatisfyNotify(conf *moniMdl.RuleConf, stats *moniMdl.Stats) (notify bool) {
notify = true
if _, ok := conf.MoniCdt["time"]; ok {
threshold := conf.NotifyCdt["time"].Value
comp := conf.NotifyCdt["time"].Comp
switch comp {
case moniMdl.CompGT:
if int64(stats.MaxTime) < threshold {
notify = false
}
case moniMdl.CompLT:
if int64(stats.MaxTime) > threshold {
notify = false
}
}
}
if _, ok := conf.NotifyCdt["count"]; ok {
threshold := conf.NotifyCdt["count"].Value
comp := conf.NotifyCdt["count"].Comp
switch comp {
case moniMdl.CompGT:
if int64(stats.MoniCount) < threshold {
notify = false
}
case moniMdl.CompLT:
if int64(stats.MoniCount) > threshold {
notify = false
}
}
}
return
}
// moniUpDelArcNotify 特殊处理UP主删稿的逻辑
func (s *Service) moniUpDelArcNotify(rule *moniMdl.Rule) (err error) {
var (
c = context.TODO()
min, max int64
oidMap map[int64]int
oids, mids []int64
infos map[int64]*moniMdl.DelArcInfo
delMap map[int64][]*moniMdl.DelArcInfo
accStats map[int64]*accApi.ProfileStatReply
threshold int
)
defer func() {
s.wg.Done()
}()
if _, ok := rule.RuleConf.NotifyCdt["count"]; !ok {
err = errors.New("notify count config error")
log.Error("s.moniUpDelArcNotify(%+v) 没有count监控配置", rule)
return
}
threshold = int(rule.RuleConf.NotifyCdt["count"].Value)
delMap = make(map[int64][]*moniMdl.DelArcInfo)
min, max, err = s.monitorNotifyTime(rule.RuleConf)
if err != nil {
log.Error("s.monitorNotifyTime() rule:%+v error:%v", rule, err)
return
}
if oidMap, err = s.moniDao.MoniRuleOids(c, rule.ID, min, max); err != nil {
log.Error("s.moniDao.MoniRuleOids() rule:%+v error:%v", rule, err)
return
}
for oid := range oidMap {
oids = append(oids, oid)
}
if infos, err = s.moniDao.ArcDelInfos(c, oids); err != nil {
log.Error("s.moniUpDelArcNotify() s.moniDao.ArcDelInfos(%v) error(%v)", oids, err)
return
}
for _, info := range infos {
delMap[info.MID] = append(delMap[info.MID], info)
mids = append(mids, info.MID)
}
if accStats, err = s.multiAccounts(c, mids); err != nil {
log.Error("s.moniUpDelArcNotify() s.multiAccounts(%v) error(%v)", mids, err)
accStats = make(map[int64]*accApi.ProfileStatReply)
}
for mid, ins := range delMap {
if _, ok := accStats[mid]; !ok {
log.Error("s.monitorNotify() account ")
accStats[mid] = &accApi.ProfileStatReply{
Profile: &accApi.Profile{
Name: "nil",
Mid: mid,
},
}
}
if len(ins) >= threshold {
var (
title, content string
)
for _, v := range ins {
if title == "" {
title = fmt.Sprintf("【异常删稿报警】“%s” 24内已删除%d个自制稿件 ", accStats[mid].Profile.Name, len(ins))
}
if content == "" {
content = fmt.Sprintf("监控规则:%d——%s——%s<br />报警时间:%s<br /><br />", rule.ID, rule.Name, rule.RuleConf.Name, time.Now().Format("2006-01-02 15:04:05"))
content += fmt.Sprintf("<b>UP主昵称:%smid: %d当前粉丝数:%d 24内已删除:%d</b><br /><br />", accStats[mid].Profile.Name, accStats[mid].Profile.Mid, accStats[mid].Follower, len(ins))
content += "<table border=\"1\" style=\"border-collapse: collapse;\"><tr><th>标题</th><th>av号</th><th>删除时间</th></tr>"
}
content += fmt.Sprintf("<tr><td style=\"padding: 5px 10px;\"> %s </td><td style=\"padding: 5px 10px;\"> %d </td><td style=\"padding: 5px 10px;\"> %s </td></tr>", v.Title, v.AID, v.Time)
}
content += "</table>"
if err = s.monitorSendNotify(c, rule.RuleConf.Notify.Way, rule.RuleConf.Notify.Member, title, content); err != nil {
log.Error("s.moniUpDelArcNotify(%d) s.monitorSendNotify(%d,%v,%s,%s) error:%v", rule.ID, rule.RuleConf.Notify.Way, rule.RuleConf.Notify.Member, title, content, err)
}
}
}
return
}
// monitorNotifyTime 计算监控报警的score区间
func (s *Service) monitorNotifyTime(conf *moniMdl.RuleConf) (tFrom, tTo int64, err error) {
now := time.Now().Unix()
if _, ok := conf.NotifyCdt["time"]; !ok {
err = errors.New("配置的 NotifyCdt 中不存在 time")
return
}
timeCdt := conf.NotifyCdt["time"].Value
compCdt := conf.NotifyCdt["time"].Comp
switch compCdt {
case moniMdl.CompGT:
tFrom = 0
tTo = now - timeCdt
case moniMdl.CompLT:
tFrom = now - timeCdt
tTo = now
default:
err = errors.New("配置的 NotifyCdt 中 comparison 不合法: " + compCdt)
return
}
return
}
// reflectIntVal 反射Int值。支持多级查询比如Addit.MissionID。
func (s *Service) reflectIntVal(obj interface{}, field string, dep int) (val int64, err error) {
if dep > 10 {
err = fmt.Errorf("too deep:%d", dep)
return
}
if obj == nil {
err = errors.New("s.reflectIntVal() obj is invalid memory address or nil pointer dereference")
return
}
if reflect.TypeOf(obj).Kind() == reflect.Ptr {
dep += 1
return s.reflectIntVal(reflect.ValueOf(obj).Elem().Interface(), field, dep)
}
if strings.Contains(field, ".") {
fs := strings.Split(field, ".")
for i, v := range fs {
f := strings.Join(fs[i+1:], ".")
fv := reflect.ValueOf(obj).FieldByName(v)
if !fv.IsValid() {
err = fmt.Errorf("s.reflectIntVal() field not found. field:%s obj: %+v", field, obj)
return
}
if fv.IsNil() {
err = fmt.Errorf("s.reflectIntVal() field is nil. field:%s obj: %+v", field, obj)
return
}
dep += 1
return s.reflectIntVal(fv.Elem().Interface(), f, dep)
}
} else {
fv := reflect.ValueOf(obj).FieldByName(field)
if !fv.IsValid() {
err = fmt.Errorf("s.reflectIntVal() field not found. field:%s obj: %+v", field, obj)
return
}
val = fv.Int()
}
return
}
// monitorCompSatisfy 验证值是否满足监控表达式
func (s *Service) monitorCompSatisfy(com string, val int64) (is bool, err error) {
var (
v int64
vals []int64
)
//暂时支持!=、>、<、=、in
if strings.Contains(com, "!=") {
//"!=10"
if v, err = strconv.ParseInt(strings.Replace(com, "!=", "", -1), 10, 64); err != nil {
return
}
is = v != val
} else if strings.Contains(com, ">=") {
//">=10"
if v, err = strconv.ParseInt(strings.Replace(com, ">=", "", -1), 10, 64); err != nil {
return
}
is = val >= v
} else if strings.Contains(com, "<=") {
//"<=10"
if v, err = strconv.ParseInt(strings.Replace(com, "<=", "", -1), 10, 64); err != nil {
return
}
is = val <= v
} else if strings.Contains(com, "=") {
//"=10"
if v, err = strconv.ParseInt(strings.Replace(com, "=", "", -1), 10, 64); err != nil {
return
}
is = v == val
} else if strings.Contains(com, "!in") {
//"in(1,2,3)"
com = strings.Replace(com, "!in(", "", -1)
com = strings.Replace(com, ")", "", -1)
if vals, err = xstr.SplitInts(com); err != nil {
return
}
is = true
for _, v := range vals {
if val == v {
is = false
break
}
}
} else if strings.Contains(com, "in") {
//"in(1,2,3)"
com = strings.Replace(com, "in(", "", -1)
com = strings.Replace(com, ")", "", -1)
if vals, err = xstr.SplitInts(com); err != nil {
return
}
for _, v := range vals {
if val == v {
is = true
break
}
}
} else if strings.Contains(com, ">") {
//">10"
if v, err = strconv.ParseInt(strings.Replace(com, ">", "", -1), 10, 64); err != nil {
return
}
is = val > v
} else if strings.Contains(com, "<") {
//"<10"
if v, err = strconv.ParseInt(strings.Replace(com, "<", "", -1), 10, 64); err != nil {
return
}
is = val < v
} else {
err = errors.New("unknown comparison")
}
return
}
// monitorSendNotify 发送监控通知
func (s *Service) monitorSendNotify(c context.Context, way int8, members []string, title, content string) (err error) {
switch way {
case moniMdl.NotifyTypeEmail:
log.Info("s.monitorSendNotify() begin. way:%d members:%v title:%s content:%s", way, members, title, content)
if err = s.email.MonitorEmailAsync(c, members, title, content); err != nil {
log.Error("s.email.SendMonitorNotify(%v,%s,%s) error:%v", members, title, content, err)
return
}
default:
err = errors.New("unknown notify way")
log.Error("s.monitorSendNotify(%d,%v,%s,%s) unknown notify way", way, members, title, content)
return
}
return
}
// multiAccounts 批量获取用户数据
func (s *Service) multiAccounts(c context.Context, mids []int64) (res map[int64]*accApi.ProfileStatReply, err error) {
var (
mark map[int64]bool
)
res = make(map[int64]*accApi.ProfileStatReply)
mark = make(map[int64]bool)
if len(mids) == 0 {
return
}
for _, v := range mids {
if mark[v] {
continue
}
mark[v] = true
var r *accApi.ProfileStatReply
if r, err = s.acc.ProfileWithStat3(c, &accApi.MidReq{Mid: v}); err != nil {
log.Error("s.multiAccounts() s.acc.ProfileWithStat3(%d) error(%v)", v, err)
continue
}
res[v] = r
}
return
}
// monitorEmailProc 发送监控邮件任务
func (s *Service) monitorEmailProc() {
defer s.wg.Done()
for {
s.email.MonitorEmailProc()
time.Sleep(200 * time.Millisecond)
}
}
// secondsFormat 将秒转成 时:分:秒
func secondsFormat(sec int) (str string) {
if sec < 0 {
return "--:--:--"
}
if sec == 0 {
return "00:00:00"
}
h := math.Floor(float64(sec) / 3600)
m := math.Floor((float64(sec) - 3600*h) / 60)
se := sec % 60
return fmt.Sprintf("%02d:%02d:%02d", int64(h), int64(m), se)
}

View File

@@ -0,0 +1,134 @@
package service
import (
"context"
"github.com/golang/mock/gomock"
. "github.com/smartystreets/goconvey/convey"
"go-common/app/job/main/aegis/model/monitor"
accApi "go-common/app/service/main/account/api"
"testing"
)
func WithMock(t *testing.T, f func(mock *gomock.Controller)) func() {
return func() {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
f(mockCtrl)
}
}
func TestService_monitorArchive(t *testing.T) {
var (
na = &monitor.BinlogArchive{
ID: 10111555,
State: -100,
Round: 10,
MID: 666,
TypeID: 2422,
}
)
Convey("monitorUpDelArc", t, func(ctx C) {
errs := s.monitorArchive("update", nil, na)
So(errs, ShouldNotBeEmpty)
})
}
func TestService_monitorUpDelArc(t *testing.T) {
var (
na = &monitor.BinlogArchive{
ID: 10111555,
State: -100,
Round: 10,
MID: 666,
TypeID: 24,
}
logs []string
)
Convey("monitorUpDelArc", t, func(ctx C) {
_, logs, _ = s.monitorUpDelArc(1, na)
So(logs, ShouldNotBeEmpty)
})
}
func TestService_monitorVideo(t *testing.T) {
var (
na = &monitor.BinlogVideo{
ID: 10134809,
Status: 0,
}
)
Convey("monitorVideo", t, func(ctx C) {
errs := s.monitorVideo("update", nil, na)
So(errs, ShouldBeEmpty)
})
}
func TestService_reflectIntVal(t *testing.T) {
var (
a = &monitor.BinlogArchive{
ID: 123,
State: 0,
Round: 10,
MID: 666,
TypeID: 22,
Addit: &monitor.ArchiveAddit{
MissionID: 999,
},
}
)
Convey("reflectIntVal", t, func(ctx C) {
_, err := s.reflectIntVal(a, "Addit.MissionID", 0)
So(err, ShouldBeNil)
_, err = s.reflectIntVal(a, "Addit111", 0)
So(err, ShouldNotBeNil)
_, err = s.reflectIntVal(a, "ID", 0)
So(err, ShouldBeNil)
})
}
func TestService_monitorCompSatisfy(t *testing.T) {
Convey("monitorCompSatisfy >=", t, func(ctx C) {
is, err := s.monitorCompSatisfy(">=10", 11)
So(err, ShouldBeNil)
So(is, ShouldBeTrue)
is, err = s.monitorCompSatisfy(">10", 10)
So(err, ShouldBeNil)
So(is, ShouldBeFalse)
is, err = s.monitorCompSatisfy("=10", 10)
So(err, ShouldBeNil)
So(is, ShouldBeTrue)
is, err = s.monitorCompSatisfy("in(10,20,30)", 10)
So(err, ShouldBeNil)
So(is, ShouldBeTrue)
is, err = s.monitorCompSatisfy("in(10,20,30)", 40)
So(err, ShouldBeNil)
So(is, ShouldBeFalse)
})
}
func TestService_monitorSave(t *testing.T) {
Convey("monitorSave", t, func(ctx C) {
_, errs := s.monitorSave([]string{"monitor_test_1"}, []string{"monitor_test_2"}, 123)
So(errs, ShouldBeEmpty)
})
}
func TestService_multiAccounts(t *testing.T) {
var c = context.Background()
Convey("multiAccounts", t, WithMock(t, func(mockCtrl *gomock.Controller) {
mock := accApi.NewMockAccountClient(mockCtrl)
s.acc = mock
mockReq := &accApi.MidReq{
Mid: 123,
}
mock.EXPECT().ProfileWithStat3(gomock.Any(), mockReq).Return(&accApi.ProfileStatReply{}, nil)
_, err := s.multiAccounts(c, []int64{123})
So(err, ShouldBeNil)
}))
}

View File

@@ -0,0 +1,77 @@
package service
import (
"context"
"fmt"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
)
func (s *Service) reportSubmit(c context.Context, old, new *model.Task) {
s.reportTaskFinish(c, new)
stfield := fmt.Sprintf(model.Submit, new.State, old.UID)
s.dao.IncresByField(c, new.BusinessID, new.FlowID, new.UID, stfield, 1)
s.dao.IncresByField(c, new.BusinessID, new.FlowID, new.UID, model.UseTime, new.Utime)
//统计资源的通过,打回什么的,只统计任务列表操作; 异步统计,免得干扰缓存的同步速度
if old.UID == new.UID && new.State == model.TaskStateSubmit {
select {
case s.chanReport <- &model.RIR{
BizID: new.BusinessID,
FlowID: new.FlowID,
UID: new.UID,
RID: new.RID,
}:
case <-time.NewTimer(time.Millisecond * 10).C:
log.Error("reportSubmit chanfull")
}
}
}
func (s *Service) reportResource(c context.Context, bizid, flowid, rid, uid int64) {
st, err := s.dao.RscState(c, rid)
if err != nil {
log.Error("reportResource RscState(%d) error(%v)", rid, err)
return
}
field := fmt.Sprintf(model.RscState, st)
s.dao.IncresByField(c, bizid, flowid, uid, field, 1)
}
func (s *Service) syncReport(c context.Context) {
datas, err := s.dao.FlushReport(c)
if err != nil {
log.Error("FlushReport error(%v)", err)
return
}
if len(datas) == 0 {
return
}
for key, val := range datas {
tp, bizid, flowid, uid, err := model.ParseKey(key)
if err != nil {
log.Error("syncReport ParseKey(%s)", key)
continue
}
rt := &model.Report{
BusinessID: int64(bizid),
FlowID: int64(flowid),
UID: int64(uid),
TYPE: tp,
Content: val,
}
s.dao.Report(c, rt)
}
}
func (s *Service) reportTaskCreate(c context.Context, new *model.Task) {
s.dao.IncresTaskInOut(c, new.BusinessID, new.FlowID, "in")
}
func (s *Service) reportTaskFinish(c context.Context, new *model.Task) {
s.dao.IncresTaskInOut(c, new.BusinessID, new.FlowID, "out")
}

View File

@@ -0,0 +1,147 @@
package service
import (
"context"
"strings"
"sync"
"go-common/app/job/main/aegis/conf"
"go-common/app/job/main/aegis/dao"
"go-common/app/job/main/aegis/dao/email"
"go-common/app/job/main/aegis/dao/monitor"
"go-common/app/job/main/aegis/model"
accApi "go-common/app/service/main/account/api"
upApi "go-common/app/service/main/up/api/v1"
"go-common/library/queue/databus"
"go-common/library/queue/databus/databusutil"
)
// Service struct
type Service struct {
c *conf.Config
acc accApi.AccountClient
up upApi.UpClient
dao *dao.Dao
moniDao *monitor.Dao
email *email.Dao
// databus
binLogDataBus *databus.Databus
archiveDataBus *databus.Databus
aegisRscDataBus *databus.Databus
aegisTaskDataBus *databus.Databus
//channel
chanReport chan *model.RIR
// cache
Cache
//权重计算器
wmHash map[string]*WeightManager
rschandle map[string]RscHandler
taskhandle map[string]TaskHandler
wg sync.WaitGroup
//databus group
resourceGroup *databusutil.Group
taskGroup *databusutil.Group
}
// New init
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
dao: dao.New(c),
moniDao: monitor.New(c),
email: email.New(c),
binLogDataBus: databus.New(c.DataBus.BinLogSub),
chanReport: make(chan *model.RIR, 1024),
archiveDataBus: databus.New(c.DataBus.ArchiveSub),
aegisRscDataBus: databus.New(c.DataBus.ResourceSub),
aegisTaskDataBus: databus.New(c.DataBus.TaskSub),
}
if !s.c.Debug {
var err error
if s.acc, err = accApi.NewClient(c.GRPC.Acc); err != nil {
panic(err)
}
if s.up, err = upApi.NewClient(c.GRPC.Up); err != nil {
panic(err)
}
}
initHandler(s)
s.initCache()
s.startWeightManager()
s.resourceGroup = databusutil.NewGroup(c.Databusutil.Resource, s.aegisRscDataBus.Messages())
s.resourceGroup.New = s.newrsc
s.resourceGroup.Split = s.splitrsc
s.resourceGroup.Do = s.dorsc
s.resourceGroup.Start()
s.taskGroup = databusutil.NewGroup(c.Databusutil.Task, s.aegisTaskDataBus.Messages())
s.taskGroup.New = s.newtask
s.taskGroup.Split = s.splittask
s.taskGroup.Do = s.dotask
s.taskGroup.Start()
go s.cacheProc()
go s.taskProc()
go s.monitorNotify()
s.wg.Add(1)
go s.taskconsumeproc()
s.wg.Add(1)
go s.archiveConsumeProc()
s.wg.Add(1)
go s.monitorEmailProc()
return s
}
// Cache .
type Cache struct {
upCache map[int64]map[int64]struct{}
rangeWeightCfg map[int64]map[string]*model.RangeWeightConfig
equalWeightCfg map[string][]*model.EqualWeightConfig
assignConfig map[string][]*model.AssignConfig
consumerCache map[string]map[int64]struct{}
ccMux sync.RWMutex
oldactiveBizFlow map[string]struct{}
newactiveBizFlow map[string]struct{}
}
// DebugCache .
func (s *Service) DebugCache(keys string) map[string]interface{} {
dc := map[string]interface{}{
"upCache": s.upCache,
"rangeWeightCfg": s.rangeWeightCfg,
"equalWeightCfg": s.equalWeightCfg,
"assignConfig": s.assignConfig,
"consumerCache": s.consumerCache,
"oldactiveBizFlow": s.oldactiveBizFlow,
"newactiveBizFlow": s.newactiveBizFlow,
}
res := make(map[string]interface{})
if len(keys) > 0 {
for _, key := range strings.Split(keys, ",") {
res[key] = dc[key]
}
}
return res
}
// Ping Service
func (s *Service) Ping(c context.Context) (err error) {
return s.dao.Ping(c)
}
// Close Service
func (s *Service) Close() {
s.binLogDataBus.Close()
s.archiveDataBus.Close()
s.aegisRscDataBus.Close()
s.aegisTaskDataBus.Close()
s.resourceGroup.Close()
s.taskGroup.Close()
s.wg.Wait()
s.dao.Close()
s.moniDao.Close()
}

View File

@@ -0,0 +1,31 @@
package service
import (
"context"
"flag"
"testing"
"go-common/app/job/main/aegis/conf"
"go-common/library/log"
)
var (
s *Service
)
func initConf() {
if err := conf.Init(); err != nil {
panic(err)
}
log.Init(conf.Conf.Log)
}
func init() {
flag.Set("conf", "../cmd/aegis-job.toml")
initConf()
s = New(conf.Conf)
}
func Test_syncReport(t *testing.T) {
s.syncReport(context.Background())
}

View File

@@ -0,0 +1,60 @@
package service
import (
"context"
"strconv"
"strings"
"time"
"go-common/app/job/main/aegis/model"
)
func (s *Service) taskProc() {
for {
// 检索超时任务,进行释放
s.dao.TaskRelease(context.Background(), time.Now().Add(-10*time.Minute))
// 检索过期登陆用户,进行踢出
s.checkKickOut(context.Background())
time.Sleep(10 * time.Minute)
s.syncReport(context.Background())
s.taskClear()
}
}
func (s *Service) checkKickOut(c context.Context) {
s.ccMux.RLock()
defer s.ccMux.RUnlock()
for bizfwid, uidm := range s.consumerCache {
for uid := range uidm {
pos := strings.Index(bizfwid, "-")
bizid, _ := strconv.Atoi(bizfwid[:pos])
flowid, _ := strconv.Atoi(bizfwid[pos+1:])
if on, err := s.dao.IsConsumerOn(c, bizid, flowid, uid); err == nil && !on {
delete(s.consumerCache[bizfwid], uid)
s.KickOut(c, int64(bizid), int64(flowid), uid)
}
}
}
}
// KickOut 踢出过期用户并释放任务
func (s *Service) KickOut(c context.Context, bizid, flowid, uid int64) {
// 1. 踢出用户
s.dao.KickOutConsumer(c, int64(bizid), int64(flowid), uid)
s.sendTaskLog(c, &model.Task{BusinessID: bizid, FlowID: flowid}, model.LogTypeTaskConsumer, "kickout", uid, "")
// 2. 释放任务
s.dao.ReleaseByConsumer(c, bizid, flowid, uid)
}
func (s *Service) taskClear() {
mt := time.Now().Add(-3 * 24 * time.Hour)
for {
rows, err := s.dao.TaskClear(context.Background(), mt, 1000)
if err != nil || rows == 0 {
break
}
time.Sleep(time.Second)
}
}

View File

@@ -0,0 +1,49 @@
package service
import (
"context"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
"go-common/library/queue/databus/report"
)
// send to log service
func (s *Service) sendTaskLog(c context.Context, task *model.Task, tp int, action string, uid int64, uname string) (err error) {
logData := &report.ManagerInfo{
UID: uid,
Uname: uname,
Business: model.LogBusinessTask,
Type: tp,
Oid: task.ID,
Action: action,
Ctime: time.Now(),
Index: []interface{}{task.BusinessID, task.FlowID, task.State},
Content: map[string]interface{}{
"task": task,
},
}
err = report.Manager(logData)
log.Info("sendTaskLog logData(%+v) errmsg(%v)", logData, err)
return
}
func (s *Service) sendWeightLog(c context.Context, task *model.Task, wl *model.WeightLog) (err error) {
logData := &report.ManagerInfo{
UID: 399,
Uname: "aegis-job",
Business: model.LogBusinessTask,
Type: model.LogTYpeTaskWeight,
Oid: task.ID,
Action: "weight",
Ctime: time.Now(),
Index: []interface{}{task.BusinessID, task.FlowID, task.State},
Content: map[string]interface{}{
"weightlog": wl,
},
}
err = report.Manager(logData)
log.Info("sendWeightLog logData(%+v) errmsg(%v)", logData, err)
return
}

View File

@@ -0,0 +1,362 @@
package service
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"go-common/app/job/main/aegis/model"
"go-common/library/log"
)
// WeightManager weight manager
type WeightManager struct {
s *Service
businessID, flowID int64
toplen, batchlen int64
minute int64
// cache
topweightList []*model.WeightItem
// channel
redisWeightList chan *model.WeightItem
dbWeightList chan *model.WeightItem
asignList chan *model.Task
//dbstartSig, dbstopSig chan struct{}
redisFinish chan struct{}
//closeChan chan struct{}
close bool
}
var _defaultopt = &model.WeightOPT{
TopListLen: 1000,
BatchListLen: 1000,
RedisListLen: 10000,
DbListLen: 2000,
AssignLen: 100,
Minute: 3,
}
// NewWeightManager new
func NewWeightManager(s *Service, opt *model.WeightOPT, key string) (wm *WeightManager) {
if opt == nil {
opt = _defaultopt
} else {
if opt.TopListLen <= 0 {
opt.TopListLen = _defaultopt.TopListLen
}
if opt.BatchListLen <= 0 {
opt.BatchListLen = _defaultopt.BatchListLen
}
if opt.RedisListLen <= 0 {
opt.RedisListLen = _defaultopt.RedisListLen
}
if opt.DbListLen <= 0 {
opt.DbListLen = _defaultopt.DbListLen
}
if opt.AssignLen <= 0 {
opt.AssignLen = _defaultopt.AssignLen
}
if opt.Minute <= 0 {
opt.Minute = _defaultopt.Minute
}
}
if len(key) > 0 {
bizid, flowid := parseKey(key)
opt.BusinessID = int64(bizid)
opt.FlowID = int64(flowid)
}
wm = &WeightManager{
s: s,
businessID: opt.BusinessID,
flowID: opt.FlowID,
toplen: opt.TopListLen,
batchlen: opt.BatchListLen,
minute: opt.Minute,
redisWeightList: make(chan *model.WeightItem, opt.RedisListLen),
dbWeightList: make(chan *model.WeightItem, opt.DbListLen),
asignList: make(chan *model.Task, opt.AssignLen),
redisFinish: make(chan struct{}),
}
go wm.weightProc()
go wm.weightWatcher()
log.Info("启动权重计算器 bizid(%d) flowid(%d) opt(%+v)", wm.businessID, wm.flowID, opt)
return
}
func parseKey(key string) (bizid, flowid int) {
pos := strings.Index(key, "-")
bizids := key[:pos]
flowids := key[pos+1:]
bizid, _ = strconv.Atoi(bizids)
flowid, _ = strconv.Atoi(flowids)
return
}
func (s *Service) startWeightManager() {
// 1.当前的所有业务线,需要计算权重的先枚举出来
s.wmHash = make(map[string]*WeightManager)
for key := range s.newactiveBizFlow {
bizid, _ := parseKey(key)
s.wmHash[key] = NewWeightManager(s, s.getWeightOpt(bizid), key)
}
}
func (w *WeightManager) weightProc() {
for !w.close {
if err := w.weightRedisProcess(); err != nil {
w.weightDBProcess()
}
time.Sleep(time.Duration(w.minute) * time.Minute)
}
}
func (w *WeightManager) weightWatcher() {
for !w.close {
select {
case <-w.redisFinish: //取出权重最大的一批,更新到数据库
log.Info("redisFinish(%d-%d:%d)", w.businessID, w.flowID, w.toplen)
w.handleRedisFinish(context.Background())
case wi := <-w.redisWeightList:
w.handleRedisWeightList(context.Background(), wi)
case wi := <-w.dbWeightList:
w.handleDBWeightList(context.Background(), wi)
case task := <-w.asignList:
w.handleAssign(context.Background(), task)
}
}
}
func (w *WeightManager) weightRedisProcess() (err error) {
var c = context.Background()
if err = w.s.dao.CreateUnionSet(c, w.businessID, w.flowID); err != nil {
return
}
var (
start = int64(0)
stop = w.batchlen
)
for {
wis, err := w.s.dao.RangeUinonSet(c, w.businessID, w.flowID, start, stop)
if err != nil {
return err
}
log.Info("weightRedisProcess length(%d) start(%d) stop(%d)", len(wis), start, stop)
start += w.batchlen
stop += w.batchlen
if len(wis) == 0 {
break
}
for _, wi := range wis {
if w.caculateWeight(c, wi) {
log.Warn("weightRedisProcess 任务未找到 wi(%+v)", wi)
continue
}
w.s.dao.SetWeight(c, w.businessID, w.flowID, wi.ID, wi.Weight)
}
time.Sleep(time.Second)
}
w.redisFinish <- struct{}{}
w.s.dao.DeleteUinonSet(c, w.businessID, w.flowID)
return nil
}
func (w *WeightManager) caculateWeight(c context.Context, wi *model.WeightItem) (skip bool) {
task, err := w.s.dao.GetTask(c, wi.ID)
if err != nil {
return true
}
w.reAssign(c, task)
wm := int64(time.Since(task.Ctime.Time()).Minutes())
wl := &model.WeightLog{
UPtime: time.Now().Format("2006-01-02 15:04:05"),
Mid: task.MID,
Fans: task.Fans,
Group: task.Group,
WaitTime: model.WaitTime(task.Ctime.Time()),
}
var wtRange, wtEqual int64
wci, ewc := w.s.getWeightCache(c, task.BusinessID, task.FlowID)
if wci != nil {
wtRange = w.rangeCaculate(c, wci, task, wm, wl)
}
if ewc != nil {
wtEqual = w.equalCaculate(c, ewc, task, wm, wl)
}
wi.Weight = wtRange + wtEqual
wl.Weight = wi.Weight
w.s.sendWeightLog(c, task, wl)
return
}
func (w *WeightManager) rangeCaculate(c context.Context, wci map[string]*model.RangeWeightConfig, task *model.Task, wt int64, wl *model.WeightLog) (weight int64) {
var wtWeight, fanWeight, groupWeight int64
if cfg, ok := wci["waittime"]; ok {
if wtlen := len(cfg.Range); wtlen > 0 { // 等待时长,要把之前等级的权重加上去
for i := wtlen - 1; i >= 0; i-- {
if wt >= cfg.Range[i].Threshold { // 命中配置
wtWeight += cfg.Range[i].Weight * ((wt - cfg.Range[i].Threshold) / w.minute)
// 计算0 到 (i-1) 累计权重
for j := 0; j <= i-1; j++ {
wtWeight += cfg.Range[j].Weight * ((cfg.Range[j+1].Threshold - cfg.Range[j].Threshold) / w.minute)
}
break
}
}
}
}
if cfg, ok := wci["fans"]; ok {
if fanLen := len(cfg.Range); fanLen > 0 {
for i := fanLen - 1; i >= 0; i-- {
if task.Fans >= cfg.Range[i].Threshold {
fanWeight = cfg.Range[i].Weight * (wt / w.minute)
break
}
}
}
}
if cfg, ok := wci["group"]; ok {
if len(cfg.Range) > 0 {
for _, item := range cfg.Range {
if strings.Contains(","+task.Group+",", fmt.Sprintf(",%d,", item.Threshold)) {
groupWeight = item.Weight * (wt / w.minute)
}
}
}
}
weight = wtWeight + fanWeight + groupWeight
wl.WaitWeight = wtWeight
wl.FansWeight = fanWeight
wl.GroupWeight = groupWeight
return
}
func (w *WeightManager) equalCaculate(c context.Context, ewc []*model.EqualWeightConfig, task *model.Task, wt int64, wl *model.WeightLog) (weight int64) {
var midweight, taskweight int64
for _, item := range ewc {
if item.Name == "mid" {
if strings.Contains(","+item.IDs+",", fmt.Sprintf(",%d,", task.MID)) {
if item.Type == model.WeightTypeCycle {
midweight += item.Weight * (wt / w.minute)
} else {
midweight += item.Weight
}
log.Info("equalCaculate task(%+v) hit (%+v)", task, item)
wl.ConfigItems = append(wl.ConfigItems, &model.ConfigItem{
Name: item.Name,
Desc: item.Description,
Uname: item.Uname,
})
}
}
if item.Name == "taskid" || item.Name == "task_id" {
if strings.Contains(","+item.IDs+",", fmt.Sprintf(",%d,", task.ID)) {
if item.Type == model.WeightTypeCycle {
taskweight += item.Weight * (wt / w.minute)
} else {
taskweight += item.Weight
}
log.Info("equalCaculate task(%+v) hit (%+v)", task, item)
wl.ConfigItems = append(wl.ConfigItems, &model.ConfigItem{
Name: item.Name,
Desc: item.Description,
Uname: item.Uname,
})
}
}
}
weight = midweight + taskweight
wl.EqualWeight = weight
return
}
func (w *WeightManager) reAssign(c context.Context, task *model.Task) {
if task.UID == 0 {
select {
case w.asignList <- task:
log.Info("指派判断 reAssign%+v", task)
case <-time.NewTimer(10 * time.Millisecond).C:
log.Warn("chan asignList full,len:%d", len(w.dbWeightList))
}
}
}
func (w *WeightManager) weightDBProcess() (err error) {
// TODO 只用db更新权重的策略
return nil
}
func (w *WeightManager) handleAssign(c context.Context, task *model.Task) (err error) {
if w.s.setAssign(c, task) {
if rows, err := w.s.dao.AssignTask(c, task); err == nil && rows == 1 {
w.s.dao.SetTask(c, task)
}
}
return
}
func (w *WeightManager) handleRedisWeightList(c context.Context, wi *model.WeightItem) (err error) {
return w.s.dao.SetWeight(c, w.businessID, w.flowID, wi.ID, wi.Weight)
}
func (w *WeightManager) handleDBWeightList(c context.Context, wi *model.WeightItem) (rows int64, err error) {
return w.s.dao.SetWeightDB(c, wi.ID, wi.Weight)
}
func (w *WeightManager) handleRedisFinish(c context.Context) (err error) {
log.Info("handleRedisFinish")
wis, err := w.s.dao.TopWeights(c, w.businessID, w.flowID, w.toplen)
if err != nil {
return
}
tempMap := make(map[int64]struct{})
for _, wi := range wis {
log.Info("handleRedisFinish:(%+v)", wi)
w.addToDBList(wi)
tempMap[wi.ID] = struct{}{}
}
for _, wi := range w.topweightList {
if _, ok := tempMap[wi.ID]; !ok {
weight, err := w.s.dao.GetWeight(c, w.businessID, w.flowID, wi.ID)
if err != nil {
continue
}
wi.Weight = weight
w.addToDBList(wi)
}
}
w.topweightList = wis
log.Info("handleRedisFinish:topweightList(%d)", len(wis))
return
}
func (w *WeightManager) addToDBList(wi *model.WeightItem) {
select {
case w.dbWeightList <- wi:
log.Info("addToDBList (%+v)", wi)
case <-time.NewTimer(10 * time.Millisecond).C:
log.Warn("chan dbWeightList full,len:%d", len(w.dbWeightList))
}
}