Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

21
app/job/main/push/BUILD Normal file
View File

@@ -0,0 +1,21 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/push/cmd:all-srcs",
"//app/job/main/push/conf:all-srcs",
"//app/job/main/push/dao:all-srcs",
"//app/job/main/push/http:all-srcs",
"//app/job/main/push/model:all-srcs",
"//app/job/main/push/service:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,119 @@
# push-job
### v2.0.5
1. waitgroup
### v2.0.4
1. 修改定时delete的任务
### v2.0.3
1. fix close addTaskCh
### v2.0.2
1. mid文件目录增加目录层级防止同一个目录下文件数过多
### v2.0.1
1. 优化全量推送建任务速度
### v2.0.0
1. using push grpc
2. remove abtest code
### v1.8.0
1. 删除老上报
### v1.7.3
1. fix 老上报 HD 归类错误
### v1.7.2
1. stop abtest
### v1.7.1
1. 优化生成abtest池子
### v1.7.0
1. abtest
### v1.6.0
1. 支持图片推送字段
### v1.5.0
1. 对接数据平台用户画像
### v1.4.2
1. 调整callback写入速率
### v1.4.1
1. 优化-批量写入token缓存
### v1.4.0
1. 刷新缓存的时候增加token级别的缓存
### v1.3.0
1. 推送服务切换到push-service
### v1.2.1
1. 使用 go-common/env
### v1.2.0
1. 接bm
### v1.1.9
1. 迁移model至push-service
### v1.1.8
1. 定期删除 task
### v1.1.7
1. 修复 write closed chan
### v1.1.6
1. remove consumer uninstall mi token
### v1.1.5
1. 刷新token后释放内存
### v1.1.4
1. 项目迁移到main目录下
### v1.1.3
1. 上报增加设备信息
2. 去掉获取小米推送结果
### v1.1.2
1. 定时删除callback数据
2. 定时刷新上报缓存
### v1.1.1
1. 更改报警方式为企业微信
### v1.1.0
1. add push callback
### v1.0.15
1. 不外理新版本的老的上报数据
2. 更改prom为go-common中的对象
### v1.0.14
1. fix kafka consume
### v1.0.13
1. fix kafka consume
### v1.0.12
1. fix kafka commit
### v1.0.11
1. 优化 kafka 消费的写法
### v1.0.10
1. fix kafka consumer
### v1.0.9
1. pull push result
### v1.0.4
1. 升级 trace / http client auto sign
### v1.0.0
1. 项目初始化

View File

@@ -0,0 +1,10 @@
# Owner
liweijia
zhapuyu
renwei
# Author
wangjian
# Reviewer
zhapuyu

16
app/job/main/push/OWNERS Normal file
View File

@@ -0,0 +1,16 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- liweijia
- renwei
- wangjian
- zhapuyu
labels:
- job
- job/main/push
- main
options:
no_parent_owners: true
reviewers:
- wangjian
- zhapuyu

View File

@@ -0,0 +1,10 @@
#### push-job
##### 项目简介
> 1.推送服务的job
##### 编译环境
> 请只用golang v1.7.x以上版本编译执行。
##### 依赖包
> 1.公共包go-common

View File

@@ -0,0 +1,42 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "cmd",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
data = ["push-job-test.toml"],
importpath = "go-common/app/job/main/push/cmd",
tags = ["automanaged"],
deps = [
"//app/job/main/push/conf:go_default_library",
"//app/job/main/push/http:go_default_library",
"//app/job/main/push/service:go_default_library",
"//library/log:go_default_library",
"//library/net/trace:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,50 @@
package main
import (
"flag"
"os"
"os/signal"
"syscall"
"time"
"go-common/app/job/main/push/conf"
"go-common/app/job/main/push/http"
"go-common/app/job/main/push/service"
"go-common/library/log"
"go-common/library/net/trace"
)
func main() {
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
log.Init(conf.Conf.Log)
defer log.Close()
trace.Init(conf.Conf.Tracer)
defer trace.Close()
log.Info("push-job start")
svc := service.New(conf.Conf)
http.Init(conf.Conf, svc)
listenSignals(svc)
}
func listenSignals(svc *service.Service) {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
for {
s := <-c
log.Info("push-job get a signal: %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGSTOP, syscall.SIGINT:
svc.Close()
log.Info("push-job stop")
time.Sleep(time.Second)
return
case syscall.SIGHUP:
// TODO: reload
default:
return
}
}
}

View File

@@ -0,0 +1,124 @@
version = "1.0.0"
user = "nobody"
pid = "/tmp/push-job.pid"
dir = "./"
family = "push-job"
[log]
dir = "/data/log/push-job/"
[HTTPServer]
addr = "0.0.0.0:7021"
maxListen = 1000
timeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
[HTTPClient]
dial = "50ms"
timeout = "1s"
keepAlive = "60s"
key = "f265dcfa28272742"
secret = "437facc22dc8698b5544669bcc12348d"
[HTTPClient.breaker]
window ="10s"
sleep ="10ms"
bucket = 10
ratio = 0.5
request = 100
[dpClient]
key = "17d515f7fa6324a19cfc6546d17ddca7"
secret = "eee2d709e54600ce147a4f522dc3c86e"
dial = "2s"
timeout = "30s"
keepAlive = "60s"
[dpClient.breaker]
window = "10s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[memcache]
name = "push"
proto = "tcp"
addr = "172.18.33.60:11228"
idle = 1000
active = 1000
dialTimeout = "10s"
readTimeout = "10s"
writeTimeout = "10s"
idleTimeout = "30s"
[reportSub]
key = "0QEO9F8JuuIxZzNDvklH"
secret="0QEO9F8JuuIxZzNDvklI"
group= "PushReport-Push-S"
topic= "PushReport-T"
action="sub"
name = "push-job/report-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[callbackSub]
key = "9765cdac5894f2ba"
secret="1448f5f2cd6029f6af6c5d438cd31edd"
group= "PushCallback-MainWebSvr-S"
topic= "PushCallback-T"
action="sub"
name = "push-callback-sub"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 100
active = 100
dialTimeout = "1s"
readTimeout = "60s"
writeTimeout = "1s"
idleTimeout = "10s"
[mysql]
addr = "172.16.33.205"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_push?timeout=1m&readTimeout=1m&writeTimeout=1m&parseTime=true&loc=Local&charset=utf8,utf8mb4"
active = 10
idle = 5
queryTimeout = "1m"
execTimeout = "1m"
tranTimeout = "1m"
[mysql.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[wechat]
token = "GYQeuDWBbAsCNeGz"
secret = "ZKpmgINTkianyMbMixyxcPQjMCSHCDrk"
username = "wangjian"
[job]
reportTicker = "5s"
delInvalidReportInterval = "10000h"
loadTaskInteval = "10ms"
pullResultIntervalHour = 8
delCallbackInterval = 15 # 单位:天
delTaskInterval = 30 # 单位:天
syncReportCacheWeek = 3 # report cache 同步时间,每周三
syncReportCacheHour = 2 # 两点
reportShard = 5
callbackShard = 5
pretreatmentTaskShard = 3
taskGoroutines = 5
limitPerTask = 100000
pushPartSize = 1000
pushPartChanSize = 10
mountDir = "/data/storage/"
pretreatTask = true
dpPollingTime = "30m"

View File

@@ -0,0 +1,39 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["conf.go"],
importpath = "go-common/app/job/main/push/conf",
tags = ["automanaged"],
deps = [
"//library/cache/memcache:go_default_library",
"//library/conf:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/rpc/warden:go_default_library",
"//library/net/trace:go_default_library",
"//library/queue/databus:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/BurntSushi/toml:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,115 @@
package conf
import (
"errors"
"flag"
"go-common/library/cache/memcache"
"go-common/library/conf"
"go-common/library/database/sql"
xlog "go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/rpc/warden"
"go-common/library/net/trace"
"go-common/library/queue/databus"
xtime "go-common/library/time"
"github.com/BurntSushi/toml"
)
// Config .
type Config struct {
Env string
Log *xlog.Config
Tracer *trace.Config
PushRPC *warden.ClientConfig
HTTPServer *bm.ServerConfig
HTTPClient *bm.ClientConfig
DpClient *bm.ClientConfig
ReportSub *databus.Config
CallbackSub *databus.Config
MySQL *sql.Config
Memcache *mc
Wechat *wechat
Job *job
}
// mc config
type mc struct {
*memcache.Config
}
type wechat struct {
Token string
Secret string
Username string
}
type job struct {
ReportTicker xtime.Duration
DelInvalidReportInterval xtime.Duration
LoadTaskInteval xtime.Duration
PullResultIntervalHour int
DelCallbackInterval int
DelTaskInterval int
SyncReportCacheWeek int
SyncReportCacheHour int
ReportShard int
CallbackShard int
PretreatmentTaskShard int
TaskGoroutines int
LimitPerTask int
PushPartSize int
PushPartChanSize int
MountDir string
PretreatTask bool
DpPollingTime xtime.Duration
}
var (
confPath string
client *conf.Client
// Conf config
Conf = &Config{}
)
func init() {
flag.StringVar(&confPath, "conf", "", "config path")
}
// Init .
func Init() (err error) {
if confPath != "" {
return local()
}
return remote()
}
func local() (err error) {
_, err = toml.DecodeFile(confPath, &Conf)
return
}
func remote() (err error) {
if client, err = conf.New(); err != nil {
return
}
err = load()
return
}
func load() (err error) {
var (
s string
ok bool
tmpConf *Config
)
if s, ok = client.Toml2(); !ok {
return errors.New("load config center error")
}
if _, err = toml.Decode(s, &tmpConf); err != nil {
return errors.New("could not decode config")
}
*Conf = *tmpConf
return
}

View File

@@ -0,0 +1,65 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"dao_test.go",
"memcache_test.go",
"mysql_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/push/conf:go_default_library",
"//app/service/main/push/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"dao.go",
"dataplatform.go",
"memcache.go",
"mysql.go",
"mysql_task.go",
"wechat.go",
],
importpath = "go-common/app/job/main/push/dao",
tags = ["automanaged"],
deps = [
"//app/admin/main/push/model:go_default_library",
"//app/job/main/push/conf:go_default_library",
"//app/job/main/push/model:go_default_library",
"//app/service/main/push/model:go_default_library",
"//library/cache/memcache:go_default_library",
"//library/database/sql:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/stat/prom:go_default_library",
"//library/sync/errgroup:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,76 @@
package dao
import (
"context"
"go-common/app/job/main/push/conf"
"go-common/library/cache/memcache"
xsql "go-common/library/database/sql"
bm "go-common/library/net/http/blademaster"
"go-common/library/stat/prom"
)
// Dao .
type Dao struct {
c *conf.Config
db *xsql.DB
mc *memcache.Pool
httpClient *bm.Client
dpClient *bm.Client
delCallbacksStmt *xsql.Stmt
delTasksStmt *xsql.Stmt
reportLastIDStmt *xsql.Stmt
reportsByRangeStmt *xsql.Stmt
updateTaskStatusStmt *xsql.Stmt
updateTaskStmt *xsql.Stmt
updateDpCondStmt *xsql.Stmt
}
var (
errorsCount = prom.BusinessErrCount
infosCount = prom.BusinessInfoCount
)
// New creates a dao instance.
func New(c *conf.Config) (d *Dao) {
d = &Dao{
c: c,
db: xsql.NewMySQL(c.MySQL),
mc: memcache.NewPool(c.Memcache.Config),
httpClient: bm.NewClient(c.HTTPClient),
dpClient: bm.NewClient(c.DpClient),
}
d.delCallbacksStmt = d.db.Prepared(_delCallbacksSQL)
d.delTasksStmt = d.db.Prepared(_delTasksSQL)
d.reportLastIDStmt = d.db.Prepared(_reportLastIDSQL)
d.reportsByRangeStmt = d.db.Prepared(_reportsByRangeSQL)
d.updateTaskStatusStmt = d.db.Prepared(_upadteTaskStatusSQL)
d.updateTaskStmt = d.db.Prepared(_upadteTaskSQL)
d.updateDpCondStmt = d.db.Prepared(_updateDpCondSQL)
return
}
// PromError prometheus error count.
func PromError(name string) {
errorsCount.Incr(name)
}
// PromInfo prometheus info count.
func PromInfo(name string) {
infosCount.Incr(name)
}
// Ping reports the health of the db/cache etc.
func (d *Dao) Ping(c context.Context) (err error) {
if err = d.db.Ping(c); err != nil {
return
}
err = d.pingMC(c)
return
}
// Close .
func (d *Dao) Close() {
d.db.Close()
d.mc.Close()
}

View File

@@ -0,0 +1,60 @@
package dao
import (
"context"
"flag"
"os"
"path/filepath"
"testing"
"go-common/app/job/main/push/conf"
. "github.com/smartystreets/goconvey/convey"
)
var d *Dao
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") == "uat" {
flag.Set("app_id", "main.web-svr.push-job")
flag.Set("conf_token", "4de43ccf842485eea314fd8a48f1ee84")
flag.Set("tree_id", "5220")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
dir, _ := filepath.Abs("../cmd/push-job-test.toml")
flag.Set("conf", dir)
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
func Test_Wechat(t *testing.T) {
Convey("test wechat message", t, func() {
err := d.SendWechat("test send wechat message")
So(err, ShouldBeNil)
})
}
func Test_DpDownloadFile(t *testing.T) {
Convey("data platform download file", t, func() {
_, err := d.DpDownloadFile(context.Background(), "https://raw.githubusercontent.com/Bilibili/discovery/master/README.md")
So(err, ShouldBeNil)
})
}
func Test_DpSubmitQuery(t *testing.T) {
Convey("data platform submit query", t, func() {
url, err := d.DpSubmitQuery(context.Background(), "select device_token from basic.dws_push_buvid where log_date='20180707'")
So(err, ShouldNotBeNil)
t.Logf("url(%v)", url)
})
}

View File

@@ -0,0 +1,153 @@
package dao
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"time"
"go-common/app/job/main/push/model"
"go-common/library/ecode"
"go-common/library/log"
)
const (
// 提交查询的接口
_dpSubmitQueryURL = "http://berserker.bilibili.co/avenger/api/74/query"
)
var (
dpSignParams = []string{"appKey", "timestamp", "version"}
)
// DpSubmitQuery .
func (d *Dao) DpSubmitQuery(ctx context.Context, query string) (statusRUL string, err error) {
params := d.params()
params.Set("query", query)
var res struct {
Code int `json:"code"`
Msg string `json:"msg"`
StatusURL string `json:"jobStatusUrl"`
}
if err = d.newRequest(ctx, _dpSubmitQueryURL, params, &res); err != nil {
log.Error("d.DpSubmitQuery newRequest url(%s) error(%v)", _dpSubmitQueryURL+"?"+params.Encode(), err)
return
}
if res.Code != http.StatusOK {
log.Error("d.DpSubmitQuery newRequest error code:%d ; url(%s) ", res.Code, _dpSubmitQueryURL+"?"+params.Encode())
err = ecode.Int(res.Code)
err = fmt.Errorf("code(%d) msg(%s)", res.Code, res.Msg)
return
}
statusRUL = res.StatusURL
return
}
// DpCheckJob .
func (d *Dao) DpCheckJob(ctx context.Context, url string) (res *model.DpCheckJobResult, err error) {
params := d.params()
if err = d.newRequest(ctx, url, params, &res); err != nil {
log.Error("d.DpCheckJob newRequest error(%v)", err)
return
}
if res.Code != http.StatusOK {
log.Error("d.DpCheckJob newRequest error code:%d ; url(%s) ", res.Code, url+"?"+params.Encode())
err = fmt.Errorf("code(%d) msg(%s)", res.Code, res.Msg)
}
return
}
// DpDownloadFile .
func (d *Dao) DpDownloadFile(ctx context.Context, url string) (content []byte, err error) {
req, _ := http.NewRequest(http.MethodGet, url, nil)
if content, err = d.dpClient.Raw(ctx, req); err != nil {
log.Error("d.dpClient.Raw(%s) error(%v)", url, err)
}
return
}
func (d *Dao) params() url.Values {
params := url.Values{}
params.Set("appKey", d.c.DpClient.Key)
params.Set("timestamp", time.Now().Format("2006-01-02 15:04:05"))
params.Set("version", "1.0")
params.Set("signMethod", "md5")
return params
}
// newRequest new http request with method, url, ip, values and headers.
func (d *Dao) newRequest(c context.Context, url string, params url.Values, res interface{}) (err error) {
enc, err := d.dpSign(params)
if err != nil {
log.Error("url:%s,params:%v", url, params)
return
}
if enc != "" {
url = url + "?" + enc
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
log.Error("method:%s,url:%s", http.MethodGet, url)
return
}
return d.httpClient.Do(c, req, res)
}
// dpSign calc appkey and appsecret sign.
func (d *Dao) dpSign(params url.Values) (query string, err error) {
tmp := params.Encode()
signTmp := d.encode(params)
if strings.IndexByte(tmp, '+') > -1 {
tmp = strings.Replace(tmp, "+", "%20", -1)
}
var b bytes.Buffer
b.WriteString(d.c.DpClient.Secret)
b.WriteString(signTmp)
b.WriteString(d.c.DpClient.Secret)
mh := md5.Sum(b.Bytes())
var qb bytes.Buffer
qb.WriteString(tmp)
qb.WriteString("&sign=")
qb.WriteString(strings.ToUpper(hex.EncodeToString(mh[:])))
query = qb.String()
return
}
// Encode encodes the values into ``URL encoded'' form
// ("bar=baz&foo=quux") sorted by key.
func (d *Dao) encode(v url.Values) string {
if v == nil {
return ""
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
found := false
for _, p := range dpSignParams {
if p == k {
found = true
break
}
}
if !found {
continue
}
vs := v[k]
prefix := k
for _, v := range vs {
buf.WriteString(prefix)
buf.WriteString(v)
}
}
return buf.String()
}

View File

@@ -0,0 +1,92 @@
package dao
import (
"context"
"fmt"
"sync"
"time"
pushmdl "go-common/app/service/main/push/model"
"go-common/library/cache/memcache"
"go-common/library/log"
"go-common/library/sync/errgroup"
)
const (
_prefixReport = "r_%d"
_bulkSize = 10
)
func reportKey(mid int64) string {
return fmt.Sprintf(_prefixReport, mid)
}
// pingMc ping memcache
func (d *Dao) pingMC(c context.Context) (err error) {
conn := d.mc.Get(c)
defer conn.Close()
item := memcache.Item{Key: "ping", Value: []byte{1}, Expiration: int32(time.Now().Unix())}
err = conn.Set(&item)
return
}
// ReportsCacheByMids get report cache by mids.
func (d *Dao) ReportsCacheByMids(c context.Context, mids []int64) (res map[int64][]*pushmdl.Report, missed []int64, err error) {
res = make(map[int64][]*pushmdl.Report, len(mids))
if len(mids) == 0 {
return
}
allKeys := make([]string, 0, len(mids))
midmap := make(map[string]int64, len(mids))
for _, mid := range mids {
k := reportKey(mid)
allKeys = append(allKeys, k)
midmap[k] = mid
}
group := errgroup.Group{}
mutex := sync.Mutex{}
keysLen := len(allKeys)
for i := 0; i < keysLen; i += _bulkSize {
var keys []string
if (i + _bulkSize) > keysLen {
keys = allKeys[i:]
} else {
keys = allKeys[i : i+_bulkSize]
}
group.Go(func() error {
conn := d.mc.Get(context.TODO())
defer conn.Close()
replys, err := conn.GetMulti(keys)
if err != nil {
PromError("mc:获取上报")
log.Error("conn.Gets(%v) error(%v)", keys, err)
return nil
}
for k, item := range replys {
rm := make(map[int64]map[string]*pushmdl.Report)
if err = conn.Scan(item, &rm); err != nil {
PromError("mc:解析上报")
log.Error("item.Scan(%s) error(%v)", item.Value, err)
continue
}
mutex.Lock()
mid := midmap[k]
for _, v := range rm {
for _, r := range v {
res[mid] = append(res[mid], r)
}
}
delete(midmap, k)
mutex.Unlock()
}
return nil
})
}
group.Wait()
missed = make([]int64, 0, len(midmap))
for _, mid := range midmap {
missed = append(missed, mid)
}
return
}

View File

@@ -0,0 +1,22 @@
package dao
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func Test_Ping(t *testing.T) {
Convey("ping mc ", t, func() {
err := d.Ping(context.Background())
So(err, ShouldBeNil)
})
}
func Test_ReportsCacheByMids(t *testing.T) {
Convey("ReportsCacheByMids", t, func() {
_, _, err := d.ReportsCacheByMids(context.Background(), []int64{0, 1})
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,76 @@
package dao
import (
"context"
"database/sql"
"time"
pushmdl "go-common/app/service/main/push/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
)
const (
_delCallbacksSQL = `DELETE FROM push_callbacks where ctime <= ? limit ?`
_reportLastIDSQL = `SELECT MAX(id) from push_reports`
_reportsByRangeSQL = `SELECT id,app_id,platform_id,mid,buvid,device_token,build,time_zone,notify_switch,device_brand,device_model,os_version,extra FROM push_reports WHERE id>? and id<? and dtime=0`
// for 全量推送
_reportsTaskAllByRangeSQL = `SELECT platform_id,device_token,build FROM push_reports WHERE id>? and id<=? and app_id=? and dtime=0 and notify_switch=1`
)
// BeginTx begin transaction.
func (d *Dao) BeginTx(c context.Context) (*xsql.Tx, error) {
return d.db.Begin(c)
}
// DelCallbacks deletes callbacks.
func (d *Dao) DelCallbacks(c context.Context, t time.Time, limit int) (rows int64, err error) {
res, err := d.delCallbacksStmt.Exec(c, t, limit)
if err != nil {
log.Error("d.DelCallbacks(%v) error(%v)", t, err)
PromError("mysql:DelCallbacks")
return
}
rows, err = res.RowsAffected()
return
}
// ReportLastID gets the latest ID of report database record.
func (d *Dao) ReportLastID(c context.Context) (id int64, err error) {
if err = d.reportLastIDStmt.QueryRow(c).Scan(&id); err != nil {
if err == sql.ErrNoRows {
return
}
log.Error("d.ReportLastID() error(%v)", err)
PromError("mysql:ReportLastID")
}
return
}
// ReportsByRange gets reports by id range.
func (d *Dao) ReportsByRange(c context.Context, min, max int64) (rs []*pushmdl.Report, err error) {
rows, err := d.reportsByRangeStmt.Query(c, min, max)
if err != nil {
return
}
defer rows.Close()
for rows.Next() {
r := &pushmdl.Report{}
if err = rows.Scan(&r.ID, &r.APPID, &r.PlatformID, &r.Mid, &r.Buvid, &r.DeviceToken,
&r.Build, &r.TimeZone, &r.NotifySwitch, &r.DeviceBrand, &r.DeviceModel, &r.OSVersion, &r.Extra); err != nil {
log.Error("d.ReportsByRange Scan() error(%v)", err)
PromError("mysql:ReportsByRange")
return
}
rs = append(rs, r)
}
return
}
// ReportsTaskAll gets reports by range
func (d *Dao) ReportsTaskAll(c context.Context, min, max, app int64) (rows *xsql.Rows, err error) {
if rows, err = d.db.Query(c, _reportsTaskAllByRangeSQL, min, max, app); err != nil {
log.Error("ReportsTaskAll load reports start(%d) end(%d) error(%v)", min, max, err)
}
return
}

View File

@@ -0,0 +1,157 @@
package dao
import (
"context"
"database/sql"
"encoding/json"
"strconv"
"time"
pamdl "go-common/app/admin/main/push/model"
pushmdl "go-common/app/service/main/push/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
)
const (
_addTaskSQL = "INSERT INTO push_tasks (job,type,app_id,business_id,platform,platform_id,title,summary,link_type,link_value,build,sound,vibration,pass_through,mid_file,push_time,expire_time,status,`group`,image_url,extra) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"
_delTasksSQL = `DELETE FROM push_tasks where mtime <= ? limit ?`
_upadteTaskStatusSQL = "UPDATE push_tasks SET status=? WHERE id=?"
_taskByStatusSQL = "SELECT id,job,type,app_id,business_id,platform,title,summary,link_type,link_value,build,sound,vibration,pass_through,mid_file,progress,push_time,expire_time,status,`group`,image_url,extra FROM push_tasks WHERE status=? AND dtime=0 LIMIT 1 FOR UPDATE"
_upadteTaskSQL = "UPDATE push_tasks SET mid_file=?,status=? WHERE id=?"
// dataplatform
_txDpCondByStatusSQL = `SELECT id,job,task,conditions,sql_stmt,status,status_url,file FROM push_dataplatform_conditions WHERE status=? LIMIT 1 FOR UPDATE`
_updateDpCondSQL = `UPDATE push_dataplatform_conditions SET job=?,task=?,conditions=?,sql_stmt=?,status=?,status_url=?,file=? WHERE id=?`
_UpdateDpCondStatusSQL = `UPDATE push_dataplatform_conditions SET status=? WHERE id=?`
)
// DelTasks deletes tasks.
func (d *Dao) DelTasks(c context.Context, t time.Time, limit int) (rows int64, err error) {
res, err := d.delTasksStmt.Exec(c, t, limit)
if err != nil {
log.Error("d.DelTasks(%v) error(%v)", t, err)
PromError("mysql:DelTasks")
return
}
rows, err = res.RowsAffected()
return
}
// TxTaskByStatus gets task by status by tx.
func (d *Dao) TxTaskByStatus(tx *xsql.Tx, status int8) (t *pushmdl.Task, err error) {
var (
id int64
platform string
build string
progress string
extra string
now = time.Now()
)
t = &pushmdl.Task{Progress: &pushmdl.Progress{}, Extra: &pushmdl.TaskExtra{}}
if err = tx.QueryRow(_taskByStatusSQL, status).Scan(&id, &t.Job, &t.Type, &t.APPID, &t.BusinessID, &platform, &t.Title, &t.Summary, &t.LinkType, &t.LinkValue, &build,
&t.Sound, &t.Vibration, &t.PassThrough, &t.MidFile, &progress, &t.PushTime, &t.ExpireTime, &t.Status, &t.Group, &t.ImageURL, &extra); err != nil {
t = nil
if err == sql.ErrNoRows {
err = nil
return
}
log.Error("d.TxTaskByStatus() QueryRow(%d,%v) error(%v)", status, now, err)
PromError("mysql:按状态查询任务")
return
}
t.ID = strconv.FormatInt(id, 10)
t.Platform = pushmdl.SplitInts(platform)
t.Build = pushmdl.ParseBuild(build)
if progress != "" {
if err = json.Unmarshal([]byte(progress), t.Progress); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", progress, err)
return
}
}
if extra != "" {
if err = json.Unmarshal([]byte(extra), t.Extra); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", extra, err)
}
}
return
}
// TxUpdateTaskStatus updates task status by tx.
func (d *Dao) TxUpdateTaskStatus(tx *xsql.Tx, taskID string, status int8) (err error) {
id, _ := strconv.ParseInt(taskID, 10, 64)
if _, err = tx.Exec(_upadteTaskStatusSQL, status, id); err != nil {
log.Error("d.TxUpdateTaskStatus() Exec(%s,%d) error(%v)", taskID, status, err)
PromError("mysql:更新推送任务状态")
}
return
}
// UpdateTaskStatus update task status.
func (d *Dao) UpdateTaskStatus(c context.Context, taskID int64, status int8) (err error) {
if _, err = d.updateTaskStatusStmt.Exec(c, status, taskID); err != nil {
log.Error("d.updateTaskStatusStmt.Exec(%d,%d) error(%v)", taskID, status, err)
PromError("mysql:更新推送任务状态")
}
return
}
// UpdateTask update task.
func (d *Dao) UpdateTask(c context.Context, taskID string, file string, status int8) (err error) {
id, _ := strconv.ParseInt(taskID, 10, 64)
if _, err = d.updateTaskStmt.Exec(c, file, status, id); err != nil {
log.Error("d.updateTaskFileStmt.Exec(%d,%s,%d) error(%v)", id, file, status, err)
PromError("mysql:更新推送任务file")
}
return
}
// AddTask adds task
func (d *Dao) AddTask(ctx context.Context, t *pushmdl.Task) (err error) {
var (
platform = pushmdl.JoinInts(t.Platform)
build, _ = json.Marshal(t.Build)
extra, _ = json.Marshal(t.Extra)
)
if _, err = d.db.Exec(ctx, _addTaskSQL, t.Job, t.Type, t.APPID, t.BusinessID, platform, t.PlatformID, t.Title, t.Summary, t.LinkType, t.LinkValue,
build, t.Sound, t.Vibration, t.PassThrough, t.MidFile, t.PushTime, t.ExpireTime, t.Status, t.Group, t.ImageURL, extra); err != nil {
log.Error("d.AddTask(%+v) error(%v)", t, err)
}
return
}
// TxCondByStatus gets condition by status.
func (d *Dao) TxCondByStatus(tx *xsql.Tx, status int) (cond *pamdl.DPCondition, err error) {
cond = new(pamdl.DPCondition)
if err = tx.QueryRow(_txDpCondByStatusSQL, status).Scan(&cond.ID, &cond.Job, &cond.Task, &cond.Condition, &cond.SQL, &cond.Status, &cond.StatusURL, &cond.File); err != nil {
if err == sql.ErrNoRows {
cond = nil
err = nil
}
return
}
return
}
// UpdateDpCond update data platform query condition
func (d *Dao) UpdateDpCond(ctx context.Context, cond *pamdl.DPCondition) (err error) {
if _, err = d.updateDpCondStmt.Exec(ctx, cond.Job, cond.Task, cond.Condition, cond.SQL, cond.Status, cond.StatusURL, cond.File, cond.ID); err != nil {
log.Error("d.UpdateDpCond(%+v) error(%v)", cond, err)
}
return
}
// UpdateDpCondStatus update data platform query condition
func (d *Dao) UpdateDpCondStatus(ctx context.Context, id int64, status int) (err error) {
if _, err = d.db.Exec(ctx, _UpdateDpCondStatusSQL, status, id); err != nil {
log.Error("d.UpdateCondStatus(%d,%d) error(%v)", id, status, err)
}
return
}
// TxUpdateCondStatus update data platform query condition status
func (d *Dao) TxUpdateCondStatus(tx *xsql.Tx, id int64, status int) (err error) {
if _, err = tx.Exec(_UpdateDpCondStatusSQL, status, id); err != nil {
log.Error("d.TxUpdateCondStatus(%d,%d) error(%v)", id, status, err)
}
return
}

View File

@@ -0,0 +1,56 @@
package dao
import (
"context"
"testing"
"time"
pushmdl "go-common/app/service/main/push/model"
. "github.com/smartystreets/goconvey/convey"
)
func Test_DelCallbacks(t *testing.T) {
Convey("del callbacks", t, func() {
loc, _ := time.LoadLocation("Local")
tm := time.Date(2018, 1, 11, 18, 27, 03, 0, loc)
rows, err := d.DelCallbacks(context.TODO(), tm, 1000)
So(err, ShouldBeNil)
t.Logf("del callback rows:%d", rows)
})
}
func Test_DelTasks(t *testing.T) {
Convey("del tasks", t, func() {
loc, _ := time.LoadLocation("Local")
tm := time.Date(2018, 4, 2, 16, 00, 00, 0, loc)
rows, err := d.DelTasks(context.TODO(), tm, 1000)
So(err, ShouldBeNil)
t.Logf("del task rows:%d", rows)
})
}
func Test_ReportLastID(t *testing.T) {
Convey("get report latest id", t, func() {
id, err := d.ReportLastID(context.TODO())
So(err, ShouldBeNil)
t.Logf("latest report id(%d)", id)
})
}
func Test_TxTaskByStatus(t *testing.T) {
Convey("tx task by status", t, func() {
tx, _ := d.BeginTx(context.Background())
_, err := d.TxTaskByStatus(tx, pushmdl.TaskStatusPrepared)
So(err, ShouldBeNil)
err = tx.Commit()
So(err, ShouldBeNil)
})
}
func Test_AddTask(t *testing.T) {
Convey("tx add task", t, func() {
err := d.AddTask(context.Background(), &pushmdl.Task{})
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,79 @@
package dao
import (
"bytes"
"context"
"crypto/md5"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"time"
"go-common/library/log"
)
type wechatResp struct {
Status int `json:"status"`
Msg string `json:"msg"`
}
const (
_url = "http://bap.bilibili.co/api/v1/message/add"
)
// SendWechat 发送企业微信消息
func (d *Dao) SendWechat(content string) (err error) {
params := map[string]string{
"content": content,
"timestamp": strconv.FormatInt(time.Now().Unix(), 10),
"token": d.c.Wechat.Token,
"type": "wechat",
"username": d.c.Wechat.Username,
"url": "",
}
params["signature"] = d.sign(params)
b, err := json.Marshal(params)
if err != nil {
log.Error("SendWechat json.Marshal error(%v)", err)
return
}
req, err := http.NewRequest(http.MethodPost, _url, bytes.NewReader(b))
if err != nil {
log.Error("SendWechat NewRequest error(%v), params(%s)", err, string(b))
return
}
req.Header.Set("Content-Type", "application/json; charset=utf-8")
res := wechatResp{}
if err = d.httpClient.Do(context.TODO(), req, &res); err != nil {
log.Error("SendWechat Do error(%v), params(%s)", err, string(b))
return
}
if res.Status != 0 {
err = fmt.Errorf("status(%d) msg(%s)", res.Status, res.Msg)
log.Error("SendWechat response error(%v), params(%s)", err, string(b))
}
return
}
func (d *Dao) sign(params map[string]string) string {
keys := []string{}
for k := range params {
keys = append(keys, k)
}
sort.Strings(keys)
buf := bytes.Buffer{}
for _, k := range keys {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(url.QueryEscape(k) + "=")
buf.WriteString(url.QueryEscape(params[k]))
}
h := md5.New()
io.WriteString(h, buf.String()+d.c.Wechat.Secret)
return fmt.Sprintf("%x", h.Sum(nil))
}

View File

@@ -0,0 +1,33 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["http.go"],
importpath = "go-common/app/job/main/push/http",
tags = ["automanaged"],
deps = [
"//app/job/main/push/conf:go_default_library",
"//app/job/main/push/service:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,45 @@
package http
import (
"net/http"
"go-common/app/job/main/push/conf"
"go-common/app/job/main/push/service"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
)
var pushSrv *service.Service
// Init .
func Init(c *conf.Config, srv *service.Service) {
pushSrv = srv
engine := bm.DefaultServer(c.HTTPServer)
route(engine)
if err := engine.Start(); err != nil {
log.Error("engine.Start error(%v)", err)
panic(err)
}
}
func route(e *bm.Engine) {
e.Ping(ping)
e.Register(register)
e.GET("/refresh_token_cache", refreshTokenCache)
}
func ping(ctx *bm.Context) {
if err := pushSrv.Ping(ctx); err != nil {
log.Error("push-job ping error(%v)", err)
ctx.AbortWithStatus(http.StatusServiceUnavailable)
}
}
func register(ctx *bm.Context) {
ctx.JSON(map[string]interface{}{}, nil)
}
func refreshTokenCache(ctx *bm.Context) {
go pushSrv.RefreshTokenCache()
ctx.JSON(nil, nil)
}

View File

@@ -0,0 +1,27 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["model.go"],
importpath = "go-common/app/job/main/push/model",
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,21 @@
package model
const (
// CheckJobStatusOk 已完成
CheckJobStatusOk = 1
// CheckJobStatusErr 失败
CheckJobStatusErr = 2
// CheckJobStatusDoing 进行中
CheckJobStatusDoing = 3
// CheckJobStatusPending 等待执行
CheckJobStatusPending = 4
)
// DpCheckJobResult .
type DpCheckJobResult struct {
Code int `json:"code"`
Msg string `json:"msg"`
StatusID int `json:"statusId"`
StatusMsg string `json:"statusMsg"`
Files []string `json:"hdfsPath"`
}

View File

@@ -0,0 +1,61 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["service_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/push/conf:go_default_library",
"//app/service/main/push/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"callback.go",
"dataplatform.go",
"report.go",
"service.go",
"task.go",
],
importpath = "go-common/app/job/main/push/service",
tags = ["automanaged"],
deps = [
"//app/admin/main/push/model:go_default_library",
"//app/job/main/push/conf:go_default_library",
"//app/job/main/push/dao:go_default_library",
"//app/job/main/push/model:go_default_library",
"//app/service/main/push/api/grpc/v1:go_default_library",
"//app/service/main/push/model:go_default_library",
"//library/cache:go_default_library",
"//library/conf/env:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/queue/databus:go_default_library",
"//library/sync/errgroup:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,114 @@
package service
import (
"context"
"encoding/json"
"time"
"go-common/app/job/main/push/dao"
pb "go-common/app/service/main/push/api/grpc/v1"
pushmdl "go-common/app/service/main/push/model"
"go-common/library/log"
)
const (
_retryCallback = 5
_delCallbackLimit = 5000
)
func (s *Service) callbackproc() {
defer s.waiter.Done()
var err error
for {
msg, ok := <-s.callbackCh
if !ok {
log.Warn("s.callbackproc() closed")
return
}
for _, v := range msg {
if v == nil {
continue
}
arg := &pb.AddCallbackRequest{
Task: v.Task,
APP: v.APP,
Platform: int32(v.Platform),
Mid: v.Mid,
Pid: int32(v.Pid),
Token: v.Token,
Buvid: v.Buvid,
Click: int32(v.Click),
}
if v.Extra != nil {
arg.Extra = &pb.CallbackExtra{Status: int32(v.Extra.Status), Channel: int32(v.Extra.Channel)}
}
for i := 0; i < _retryCallback; i++ {
if _, err = s.pushRPC.AddCallback(context.Background(), arg); err == nil {
break
}
time.Sleep(20 * time.Millisecond)
}
if err != nil {
log.Error("s.pushRPC.AddCallback(%+v) error(%v)", arg, err)
dao.PromError("report:新增callback")
continue
}
log.Info("add callback success task(%s) token(%s)", v.Task, v.Token)
time.Sleep(time.Millisecond)
}
}
}
// consumeCallback consumes callback.
func (s *Service) consumeCallback() {
defer s.waiter.Done()
for {
msg, ok := <-s.callbackSub.Messages()
if !ok {
log.Info("databus: push-job callback consumer exit!")
close(s.callbackCh)
return
}
s.callbackCnt++
msg.Commit()
var cbs []*pushmdl.Callback
if err := json.Unmarshal(msg.Value, &cbs); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
dao.PromError("service:解析databus中callback消息")
continue
}
log.Info("consumeCallback key(%s) partition(%d) offset(%d) msg(%v)", msg.Key, msg.Partition, msg.Offset, string(msg.Value))
s.callbackCh <- cbs
}
}
func (s *Service) delCallbacksproc() {
for {
now := time.Now()
// 每天4点时删除七天前的callback数据
if now.Hour() == 4 {
var (
err error
deleted int64
b = now.Add(time.Duration(-s.c.Job.DelCallbackInterval*24) * time.Hour)
loc, _ = time.LoadLocation("Local")
t = time.Date(b.Year(), b.Month(), b.Day(), 23, 59, 59, 0, loc)
)
for {
if deleted, err = s.dao.DelCallbacks(context.TODO(), t, _delCallbackLimit); err != nil {
log.Error("s.delCallbacks(%v) error(%v)", t, err)
s.dao.SendWechat("DB操作失败:push-job删除callback数据错误")
time.Sleep(time.Second)
continue
}
if deleted < _delCallbackLimit {
break
}
time.Sleep(time.Second)
}
log.Info("delCallbacksproc success date(%v)", t)
time.Sleep(time.Hour)
}
time.Sleep(time.Minute)
}
}

View File

@@ -0,0 +1,254 @@
package service
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"os"
"strconv"
"strings"
"time"
pamdl "go-common/app/admin/main/push/model"
"go-common/app/job/main/push/model"
pushmdl "go-common/app/service/main/push/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
)
// txCond get a new condition by tx.
func (s *Service) txCond(oldStatus, newStatus int) (cond *pamdl.DPCondition, err error) {
ctx := context.Background()
var tx *xsql.Tx
if tx, err = s.dao.BeginTx(ctx); err != nil {
log.Error("tx.BeginTx() error(%v)", err)
return
}
if cond, err = s.dao.TxCondByStatus(tx, oldStatus); err != nil || cond == nil {
if e := tx.Rollback(); e != nil {
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if err = s.dao.TxUpdateCondStatus(tx, cond.ID, newStatus); err != nil {
if e := tx.Rollback(); e != nil {
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if err = tx.Commit(); err != nil {
log.Error("tx.Commit() error(%v)", err)
}
return
}
// data platform query
func (s *Service) dpQueryproc() {
defer s.waiter.Done()
for {
if s.closed {
return
}
cond, err := s.txCond(pushmdl.DpCondStatusPrepared, pushmdl.DpCondStatusSubmitting)
if err != nil || cond == nil {
time.Sleep(time.Second)
continue
}
for i := 0; i < _retry; i++ {
if cond.StatusURL, err = s.dao.DpSubmitQuery(context.Background(), cond.SQL); err == nil {
break
}
time.Sleep(time.Second)
}
if err != nil {
log.Error("data platform add query(%+v) error(%v)", cond, err)
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusFailed)
s.dao.UpdateTaskStatus(context.Background(), cond.Task, pushmdl.TaskStatusFailed)
continue
}
cond.Status = pushmdl.DpCondStatusSubmitted
for i := 0; i < _retry; i++ {
if err = s.dao.UpdateDpCond(context.Background(), cond); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("data platform update condition(%+v) error(%v)", cond, err)
}
time.Sleep(time.Second)
}
}
// data platform get file
func (s *Service) dpFileproc() {
defer s.waiter.Done()
for {
if s.closed {
return
}
cond, err := s.txCond(pushmdl.DpCondStatusSubmitted, pushmdl.DpCondStatusPolling)
if err != nil || cond == nil {
time.Sleep(time.Second)
continue
}
var (
path string
files []string
)
if files = s.dpCheckJob(cond); len(files) == 0 {
continue
}
for i := 0; i < _retry; i++ {
if path, err = s.dpDownloadFiles(cond, files); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil || path == "" {
log.Error("data platform download query(%+v) file error(%v)", cond, err)
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusFailed)
s.dao.UpdateTaskStatus(context.Background(), cond.Task, pushmdl.TaskStatusFailed)
continue
}
cond.File = path
cond.Status = pushmdl.DpCondStatusDone
for i := 0; i < _retry; i++ {
if err = s.dao.UpdateDpCond(context.Background(), cond); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("data platform UpdateDpCond(%+v) error(%v)", cond, err)
continue
}
for i := 0; i < _retry; i++ {
if err = s.dao.UpdateTask(context.Background(), strconv.FormatInt(cond.Task, 10), path, pushmdl.TaskStatusPretreatmentPrepared); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.dao.UpdateTask(%d,%s,%d) error(%v)", cond.Task, path, pushmdl.TaskStatusPretreatmentPrepared)
}
time.Sleep(time.Second)
}
}
func (s *Service) dpCheckJob(cond *pamdl.DPCondition) (files []string) {
now := time.Now()
for {
if time.Since(now) > time.Duration(s.c.Job.DpPollingTime) {
log.Error("polling stoped, more over than dpPollingTime, give job up")
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusFailed)
break
}
res, err := s.dao.DpCheckJob(context.Background(), cond.StatusURL)
if err != nil {
log.Error("s.dao.DpCheckJob(%s) error(%v)", cond.StatusURL, err)
time.Sleep(time.Second)
continue
}
if res.StatusID == model.CheckJobStatusDoing || res.StatusID == model.CheckJobStatusPending {
log.Info("polling (%s) ing..., status(%d)", cond.StatusURL, res.StatusID)
time.Sleep(5 * time.Second)
continue
}
if res.StatusID == model.CheckJobStatusOk {
if len(res.Files) == 0 {
log.Info("polling (%s) success, no files found", cond.StatusURL)
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusNoFile)
break
}
files = res.Files
log.Info("polling (%s) success, files(%d)", cond.StatusURL, len(files))
return
}
if res.StatusID == model.CheckJobStatusErr {
log.Error("polling (%s) error, res(%+v)", cond.StatusURL, res)
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusFailed)
break
}
}
log.Error("polling cond(%d) error", cond.ID)
s.dao.UpdateTaskStatus(context.Background(), cond.Task, pushmdl.TaskStatusFailed)
return
}
func (s *Service) dpDownloadFiles(cond *pamdl.DPCondition, files []string) (path string, err error) {
for i := 0; i < _retry; i++ {
if err = s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusDownloading); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
return
}
dir := fmt.Sprintf("%s/%s", strings.TrimSuffix(s.c.Job.MountDir, "/"), time.Now().Format("20060102"))
if _, err = os.Stat(dir); err != nil {
if !os.IsNotExist(err) {
log.Error("os.IsNotExist(%s) error(%v)", dir, err)
return
}
if err = os.MkdirAll(dir, 0777); err != nil {
log.Error("os.MkdirAll(%s) error(%v)", dir, err)
return
}
}
name := strconv.FormatInt(time.Now().UnixNano(), 10)
path = fmt.Sprintf("%s/%x", dir, md5.Sum([]byte(name)))
for _, f := range files {
if err = s.dpDownloadFile(f, path); err != nil {
return
}
}
return
}
func (s *Service) dpDownloadFile(url, path string) (err error) {
var (
res []byte
content [][]byte
)
for i := 0; i < _retry; i++ {
if res, err = s.dao.DpDownloadFile(context.Background(), url); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.dao.DpDownloadFile(%s) error(%v)", url, err)
return
}
for _, bs := range bytes.Split(res, []byte("\n")) {
n := bytes.Split(bs, []byte("\u0001"))
content = append(content, bytes.Join(n, []byte(" ")))
}
for i := 0; i < _retry; i++ {
if err = s.saveDpFile(path, bytes.Join(content, []byte("\n"))); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.saveNASFile(%s) error(%v)", url, err)
}
return
}
// saveDpFile writes data platform data into NAS.
func (s *Service) saveDpFile(path string, data []byte) (err error) {
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Error("s.saveDpFile(%s) OpenFile() error(%v)", path, err)
return
}
defer f.Close()
if _, err = f.Write(data); err != nil {
log.Error("s.saveDpFile(%s) f.Write() error(%v)", path, err)
}
return
}

View File

@@ -0,0 +1,243 @@
package service
import (
"context"
"runtime"
"time"
"go-common/app/job/main/push/dao"
pb "go-common/app/service/main/push/api/grpc/v1"
pushmdl "go-common/app/service/main/push/model"
"go-common/library/log"
)
const (
_dbBatch = 100000
_cacheBatch = 50
)
func (s *Service) delInvalidReportsproc() {
for {
arg := &pb.DelInvalidReportsRequest{Type: pushmdl.DelMiFeedback}
if _, err := s.pushRPC.DelInvalidReports(context.Background(), arg); err != nil {
log.Error("s.pushRPC.DelInvalidReports(%d) error(%v)", arg.Type, err)
dao.PromError("report:删除mi无效上报")
}
// arg = &pushmdl.ArgDelInvalidReport{Type: pushmdl.DelMiUninstalled}
// if err := s.pushRPC.DelInvalidReports(context.Background(), arg); err != nil {
// log.Error("s.pushRPC.DelInvalidReports(%d) error(%v)", arg.Type, err)
// dao.PromError("report:删除mi卸载token")
// }
time.Sleep(time.Duration(s.c.Job.DelInvalidReportInterval))
}
}
func (s *Service) reportproc() {
defer s.waiter.Done()
var err error
for {
msg, ok := <-s.reportCh
if !ok {
log.Warn("s.reportproc() closed")
return
}
for _, v := range msg {
if v == nil {
continue
}
arg := &pb.AddReportRequest{
Report: &pb.ModelReport{
APPID: int32(v.APPID),
PlatformID: int32(v.PlatformID),
Mid: v.Mid,
Buvid: v.Buvid,
DeviceToken: v.DeviceToken,
Build: int32(v.Build),
TimeZone: int32(v.TimeZone),
NotifySwitch: int32(v.NotifySwitch),
DeviceBrand: v.DeviceBrand,
DeviceModel: v.DeviceModel,
OSVersion: v.OSVersion,
Extra: v.Extra,
},
}
for i := 0; i < _retry; i++ {
if _, err = s.pushRPC.AddReport(context.Background(), arg); err == nil {
break
}
time.Sleep(20 * time.Millisecond)
}
if err != nil {
log.Error("s.pushRPC.AddReport(%+v) error(%v)", v, err)
dao.PromError("report:新增上报数据")
}
time.Sleep(time.Millisecond)
}
}
}
func (s *Service) refreshTokensproc() {
for {
now := time.Now()
if int(now.Weekday()) != s.c.Job.SyncReportCacheWeek || int(now.Hour()) != s.c.Job.SyncReportCacheHour {
time.Sleep(time.Minute)
continue
}
s.RefreshTokenCache()
time.Sleep(time.Hour)
}
}
// RefreshTokenCache .
func (s *Service) RefreshTokenCache() {
var (
err error
maxid int64
ctx = context.Background()
)
for i := 0; i < _retry; i++ {
if maxid, err = s.dao.ReportLastID(ctx); err == nil {
break
}
time.Sleep(time.Second)
}
if err != nil {
log.Error("s.refreshTokensproc() error(%v)", err)
return
}
log.Info("refresh token start, maxid(%d)", maxid)
var (
updatedUsers int64
updatedTokens int64
sli []*pb.ModelReport
pool = make(map[int64][]*pb.ModelReport)
)
for i := int64(0); i <= maxid; i += _dbBatch {
var rs []*pushmdl.Report
for j := 0; j < _retry; j++ {
if rs, err = s.dao.ReportsByRange(ctx, i, i+_dbBatch); err == nil {
break
}
time.Sleep(20 * time.Millisecond)
}
if err != nil {
log.Error("s.dao.ReportsByRange(%d,%d) error(%v)", i, i+_dbBatch, err)
continue
}
for _, r := range rs {
if r.NotifySwitch == 0 {
continue
}
nr := &pb.ModelReport{
APPID: int32(r.APPID),
PlatformID: int32(r.PlatformID),
Mid: r.Mid,
Buvid: r.Buvid,
DeviceToken: r.DeviceToken,
Build: int32(r.Build),
TimeZone: int32(r.TimeZone),
NotifySwitch: int32(r.NotifySwitch),
DeviceBrand: r.DeviceBrand,
DeviceModel: r.DeviceModel,
OSVersion: r.OSVersion,
Extra: r.Extra,
}
sli = append(sli, nr)
if len(sli) >= _cacheBatch {
s.addTokensCache(sli)
sli = []*pb.ModelReport{}
}
if r.Mid == 0 {
continue
}
pool[r.Mid] = append(pool[r.Mid], nr)
updatedTokens++
}
log.Info("refresh token sovled min(%d) max(%d)", i, i+_dbBatch)
time.Sleep(time.Millisecond)
}
if len(sli) > 0 {
s.addTokensCache(sli)
}
log.Info("refresh token data, users(%d) tokens(%d)", len(pool), updatedTokens)
for mid, rs := range pool {
arg := &pb.AddUserReportCacheRequest{Mid: mid, Reports: rs}
for i := 0; i < _retry; i++ {
if _, err = s.pushRPC.AddUserReportCache(ctx, arg); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.pushRPC.AddUserReportCache(%d) error(%v)", mid, err)
continue
}
updatedUsers++
delete(pool, mid)
}
pool = nil
runtime.GC()
log.Info("refresh token end, updated users(%d) tokens(%d)", updatedUsers, updatedTokens)
}
func (s *Service) addTokensCache(rs []*pb.ModelReport) (err error) {
arg := new(pb.AddTokensCacheRequest)
arg.Reports = append(arg.Reports, rs...)
for i := 0; i < _retry; i++ {
if _, err = s.pushRPC.AddTokensCache(context.Background(), arg); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.pushRPC.AddTokensCache tokens(%d) error(%v)", len(rs), err)
return
}
log.Info("s.pushRPC.AddTokensCache tokens(%d)", len(rs))
return
}
func (s *Service) tokensByMids(task *pushmdl.Task, mids []int64) (res map[int][]string, valid int64, err error) {
rs, _, err := s.dao.ReportsCacheByMids(context.Background(), mids)
if err != nil {
log.Error("s.dao.ReportsCacheByMids() error(%v)", err)
return
}
var (
exist = make(map[int64]bool, len(rs))
// platformCount = len(task.Platform)
buildCount = len(task.Build)
)
for mid := range rs {
exist[mid] = true
}
for _, mid := range mids {
if !exist[mid] {
log.Warn("tokens by mid, task(%s) mid(%d)", task.ID, mid)
}
}
res = make(map[int][]string)
for _, rr := range rs {
for _, r := range rr {
if r.APPID != task.APPID {
continue
}
if r.NotifySwitch == pushmdl.SwitchOff {
continue
}
realTime := pushmdl.RealTime(r.TimeZone)
if realTime.Unix() > int64(task.ExpireTime) {
continue
}
// if platformCount > 0 && !validatePlatform(r.PlatformID, task.Platform) {
// continue
// }
if buildCount > 0 && !pushmdl.ValidateBuild(r.PlatformID, r.Build, task.Build) {
continue
}
res[r.PlatformID] = append(res[r.PlatformID], r.DeviceToken)
}
valid++
}
return
}

View File

@@ -0,0 +1,178 @@
package service
import (
"context"
"encoding/json"
"sync"
"sync/atomic"
"time"
"go-common/app/job/main/push/conf"
"go-common/app/job/main/push/dao"
pushrpc "go-common/app/service/main/push/api/grpc/v1"
pushmdl "go-common/app/service/main/push/model"
"go-common/library/cache"
"go-common/library/conf/env"
"go-common/library/log"
"go-common/library/queue/databus"
)
const (
_max = 1024
_retry = 3
)
// Service .
type Service struct {
c *conf.Config
dao *dao.Dao
waiter sync.WaitGroup
addTaskWg sync.WaitGroup
cache *cache.Cache
pushRPC pushrpc.PushClient
reportSub *databus.Databus // consumer for new reports
callbackSub *databus.Databus // consumer for callback
reportCh chan []*pushmdl.Report
callbackCh chan []*pushmdl.Callback
addTaskCh chan *pushmdl.Task
reportCnt int64
callbackCnt int64
closedCnt int64
closed bool
}
// New creates a Service instance.
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
dao: dao.New(c),
cache: cache.New(1, 102400),
reportSub: databus.New(c.ReportSub),
callbackSub: databus.New(c.CallbackSub),
reportCh: make(chan []*pushmdl.Report, 1024),
callbackCh: make(chan []*pushmdl.Callback, 1024),
addTaskCh: make(chan *pushmdl.Task, 10240),
}
var err error
if s.pushRPC, err = pushrpc.NewClient(c.PushRPC); err != nil {
panic(err)
}
if env.DeployEnv == env.DeployEnvProd {
go s.delInvalidReportsproc() // 主动删除无效token
}
for i := 0; i < s.c.Job.ReportShard; i++ {
s.waiter.Add(1)
go s.reportproc()
}
for i := 0; i < s.c.Job.CallbackShard; i++ {
s.waiter.Add(1)
go s.callbackproc()
}
if s.c.Job.PretreatTask {
for i := 0; i < s.c.Job.PretreatmentTaskShard; i++ {
s.waiter.Add(1)
go s.pretreatTaskproc() // 预处理任务将任务转化成按平台分的token任务
}
}
s.addTaskWg.Add(1)
go s.addTaskproc()
s.waiter.Add(1)
go s.consumeReport()
s.waiter.Add(1)
go s.consumeCallback()
go s.checkConsumer()
// 删除过期的数据
go s.delCallbacksproc()
go s.delTasksproc()
// 定期更新token缓存
go s.refreshTokensproc()
// data platform
s.waiter.Add(1)
go s.dpQueryproc()
s.waiter.Add(1)
go s.dpFileproc()
return
}
// consumeReport consumes report.
func (s *Service) consumeReport() {
defer s.waiter.Done()
reports := make([]*pushmdl.Report, _max)
ticker := time.NewTicker(time.Duration(s.c.Job.ReportTicker))
for {
select {
case msg, ok := <-s.reportSub.Messages():
if !ok {
log.Info("databus: push-job report consumer exit!")
if len(reports) > 0 {
s.reportCh <- reports
}
if !atomic.CompareAndSwapInt64(&s.closedCnt, 0, 1) {
close(s.reportCh)
}
return
}
s.reportCnt++
msg.Commit()
m := &pushmdl.Report{}
if err := json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
dao.PromError("service:解析计数databus消息")
continue
}
log.Info("consumeReport key(%s) partition(%d) offset(%d) msg(%+v)", msg.Key, msg.Partition, msg.Offset, m)
reports = append(reports, m)
if len(reports) < _max {
continue
}
case <-ticker.C:
}
if len(reports) > 0 {
temp := make([]*pushmdl.Report, len(reports))
copy(temp, reports)
reports = []*pushmdl.Report{}
s.reportCh <- temp
}
}
}
// checkConsumer checks consumer state.
func (s *Service) checkConsumer() {
if env.DeployEnv != env.DeployEnvProd {
return
}
var c1, c2 int64
for {
time.Sleep(5 * time.Minute)
if s.reportCnt-c1 == 0 {
msg := "push-job report did not consume within 5 minute"
s.dao.SendWechat(msg)
log.Warn(msg)
}
c1 = s.reportCnt
if s.callbackCnt-c2 == 0 {
msg := "push-job callback did not consume within 5 minute"
s.dao.SendWechat(msg)
log.Warn(msg)
}
c2 = s.callbackCnt
}
}
// Ping reports the heath of services.
func (s *Service) Ping(c context.Context) (err error) {
return s.dao.Ping(c)
}
// Close releases resources which owned by the Service instance.
func (s *Service) Close() {
s.closed = true
s.reportSub.Close()
s.callbackSub.Close()
s.dao.Close()
s.waiter.Wait()
close(s.addTaskCh)
s.addTaskWg.Wait()
}

View File

@@ -0,0 +1,45 @@
package service
import (
"context"
"flag"
"path/filepath"
"testing"
"time"
"go-common/app/job/main/push/conf"
pushmdl "go-common/app/service/main/push/model"
. "github.com/smartystreets/goconvey/convey"
)
var srv *Service
func init() {
dir, _ := filepath.Abs("../cmd/push-job-test.toml")
flag.Set("conf", dir)
conf.Init()
srv = New(conf.Conf)
time.Sleep(time.Second)
}
func WithService(f func(s *Service)) func() {
return func() {
f(srv)
}
}
func Test_Ping(t *testing.T) {
Convey("ping", t, WithService(func(s *Service) {
err := s.Ping(context.TODO())
So(err, ShouldBeNil)
}))
}
func Test_TxCond(t *testing.T) {
Convey("query conditon by tx", t, WithService(func(s *Service) {
cond, err := s.txCond(pushmdl.DpCondStatusPrepared, pushmdl.DpCondStatusSubmitting)
So(err, ShouldBeNil)
t.Logf("cond(%+v)", cond)
}))
}

View File

@@ -0,0 +1,533 @@
package service
import (
"bufio"
"context"
"crypto/md5"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"time"
"go-common/app/job/main/push/dao"
pb "go-common/app/service/main/push/api/grpc/v1"
pushmdl "go-common/app/service/main/push/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/sync/errgroup"
)
const (
_delTaskLimit = 5000
)
var (
errEmptyLine = errors.New("empty line")
errInvalidMid = errors.New("invalid mid format")
errInvalidToken = errors.New("invalid token format")
)
func (s *Service) addTaskproc() {
defer s.addTaskWg.Done()
var err error
for {
task, ok := <-s.addTaskCh
if !ok {
log.Info("add task channel exit")
return
}
if task == nil {
continue
}
task.Status = pushmdl.TaskStatusPrepared
for i := 0; i < _retry; i++ {
if err = s.dao.AddTask(context.Background(), task); err == nil {
break
}
}
if err != nil {
log.Error("add task(%+v) error(%v)", task, err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("add task(%d)", task.Job))
})
continue
}
dao.PromInfo("add task")
time.Sleep(time.Millisecond)
}
}
func (s *Service) delTasksproc() {
for {
now := time.Now()
// 每天2点时删除一个月前的task数据
if now.Hour() != 2 {
time.Sleep(time.Minute)
continue
}
var (
err error
deleted int64
b = now.Add(time.Duration(-s.c.Job.DelTaskInterval*24) * time.Hour)
loc, _ = time.LoadLocation("Local")
t = time.Date(b.Year(), b.Month(), b.Day(), 23, 59, 59, 0, loc)
)
for {
if deleted, err = s.dao.DelTasks(context.TODO(), t, _delTaskLimit); err != nil {
log.Error("s.delTasks(%v) error(%v)", t, err)
s.dao.SendWechat("DB操作失败:push-job删除task数据错误")
time.Sleep(time.Second)
continue
}
if deleted < _delTaskLimit {
break
}
time.Sleep(time.Second)
}
time.Sleep(time.Hour)
}
}
func (s *Service) pretreatTaskproc() {
defer s.waiter.Done()
for {
if s.closed {
return
}
task, err := s.pickPretreatmentTask()
if err != nil {
time.Sleep(5 * time.Second)
continue
}
if task != nil {
log.Info("pretreat task job(%d) id(%s)", task.Job, task.ID)
if err = s.pretreatTask(task); err != nil {
log.Error("pretreat task(%+v) error(%v)", task, err)
s.cache.Save(func() { s.dao.SendWechat(fmt.Sprintf("pretreat task(%s) error", task.ID)) })
}
}
time.Sleep(time.Duration(s.c.Job.LoadTaskInteval))
}
}
func (s *Service) pickPretreatmentTask() (t *pushmdl.Task, err error) {
c := context.Background()
var tx *xsql.Tx
if tx, err = s.dao.BeginTx(c); err != nil {
log.Error("tx.BeginTx() error(%v)", err)
return
}
if t, err = s.dao.TxTaskByStatus(tx, pushmdl.TaskStatusPretreatmentPrepared); err != nil {
if e := tx.Rollback(); e != nil {
dao.PromError("task:获取新任务")
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if t == nil {
if e := tx.Rollback(); e != nil {
dao.PromError("task:获取新任务")
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if err = s.dao.TxUpdateTaskStatus(tx, t.ID, pushmdl.TaskStatusPretreatmentDoing); err != nil {
if e := tx.Rollback(); e != nil {
dao.PromError("task:更新任务状态")
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if err = tx.Commit(); err != nil {
dao.PromError("task:获取新任务commit")
log.Error("tx.Commit() error(%v)", err)
}
return
}
func (s *Service) pretreatTask(t *pushmdl.Task) (err error) {
id, _ := strconv.ParseInt(t.ID, 10, 64)
switch t.Type {
case pushmdl.TaskTypeAll:
err = s.pretreatTaskAll(t)
case pushmdl.TaskTypeMngToken, pushmdl.TaskTypeDataPlatformToken, pushmdl.TaskTypeDataPlatformMid:
err = s.pretreatTaskToken(t)
case pushmdl.TaskTypeStrategyMid, pushmdl.TaskTypeMngMid:
err = s.pretreatTaskMid(t)
default:
log.Error("invalid task type, (%+v)", t)
}
if err != nil {
err = s.dao.UpdateTaskStatus(context.Background(), id, pushmdl.TaskStatusPretreatmentFailed)
return
}
err = s.dao.UpdateTaskStatus(context.Background(), id, pushmdl.TaskStatusPretreatmentDone)
return
}
func (s *Service) pretreatTaskAll(t *pushmdl.Task) (err error) {
log.Info("AddTaskAll start, task(%+v)", t)
var (
maxID int64
group = errgroup.Group{}
)
maxID, err = s.dao.ReportLastID(context.Background())
if err != nil || maxID <= 0 {
log.Error("s.pretreatTaskAll() error(%v)", err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) ReportLastID(%d) error", t.ID, maxID))
})
return
}
log.Info("AddTaskAll get last report ID(%d)", maxID)
buildCount := len(t.Build)
batch := maxID / int64(s.c.Job.TaskGoroutines)
for j := 0; j < s.c.Job.TaskGoroutines; j++ {
begin := int64(j) * batch
end := begin + batch
group.Go(func() (e error) {
var (
path string
rows *xsql.Rows
tokens = make(map[int][]string)
)
for {
if begin >= end {
break
}
l := begin + int64(_dbBatch)
if l >= end {
l = end
}
log.Info("AddTaskAll load reports start(%d) end(%d)", begin, l)
if rows, e = s.dao.ReportsTaskAll(context.Background(), begin, l, t.APPID); e != nil {
log.Error("s.dao.ReportsTaskAll(%d,%d,%d) error(%v)", begin, l, t.APPID)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) ReportsTaskAll(%d,%d,%d) error", t.ID, begin, l, t.APPID))
})
return
}
for rows.Next() {
var (
platformID int
build int
token string
)
if e = rows.Scan(&platformID, &token, &build); e != nil {
log.Error("AddTaskAll rows.Scan() error(%v)", e)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) ReportsTaskAll(%d,%d,%d) error", t.ID, begin, l, t.APPID))
})
return
}
if buildCount > 0 && !pushmdl.ValidateBuild(platformID, build, t.Build) {
continue
}
tokens[platformID] = append(tokens[platformID], token)
if len(tokens[platformID]) >= s.c.Job.LimitPerTask {
if path, e = s.saveFile(tokens[platformID]); e != nil {
log.Error("AddTaskAll s.saveTokens error(%v)", e)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) saveTokens error(%v)", t.ID, e))
})
return
}
tokens[platformID] = []string{}
task := *t
task.MidFile = path
task.PlatformID = platformID
s.addTaskCh <- &task
}
}
begin = l
}
for p, v := range tokens {
if len(v) == 0 {
continue
}
if path, e = s.saveFile(v); e == nil {
task := *t
task.MidFile = path
task.PlatformID = p
s.addTaskCh <- &task
}
}
return
})
}
if err = group.Wait(); err != nil {
log.Error("add task all, task(%+v) error(%v)", t, err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) error(%v)", t.ID, err))
})
return
}
log.Info("AddTaskAll end, task(%+v)", t)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("add task all success, job(%d)", t.Job))
})
return
}
func (s *Service) pretreatTaskMid(t *pushmdl.Task) (err error) {
f, err := os.Open(t.MidFile)
if err != nil {
log.Error("pretreatTaskMid(%+v) open file error(%v)", t, err)
return
}
defer f.Close()
var (
exit bool
line string
path string
mid int64
counter int
midTotal int64
midValid int64
mu sync.Mutex
mids []int64
tokens = make(map[int][]string)
group = errgroup.Group{}
reader = bufio.NewReader(f)
)
for {
if exit {
break
}
if line, err = reader.ReadString('\n'); err != nil {
if err == io.EOF {
exit = true
} else {
log.Error("read file error(%v)", err)
continue
}
}
if mid, err = parseMidLine(line); err != nil {
log.Error("parse mid line(%s) error(%v)", line, err)
continue
}
midTotal++
mids = append(mids, mid)
if len(mids) >= s.c.Job.PushPartSize {
midsCp := make([]int64, len(mids))
copy(midsCp, mids)
mids = []int64{}
group.Go(func() (e error) {
ts, valid, e := s.tokensByMids(t, midsCp)
if e != nil {
log.Error("s.tokensByMids(%v) error(%v)", t.ID, e)
return
}
tcopy := make(map[int][]string)
mu.Lock()
midValid += valid
for p, v := range ts {
tokens[p] = append(tokens[p], v...)
if len(tokens[p]) >= s.c.Job.LimitPerTask {
tcopy[p] = append(tcopy[p], tokens[p]...)
tokens[p] = []string{}
}
}
mu.Unlock()
for p, v := range tcopy {
if path, err = s.saveFile(v); err != nil {
log.Error("pretreatTaskMid s.saveFild error(%v)", err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskMid(%v) saveTokens error(%v)", t.ID, err))
})
return
}
task := *t
task.MidFile = path
task.PlatformID = p
s.addTaskCh <- &task
}
return
})
counter++
if counter == s.c.Job.PushPartChanSize {
group.Wait()
counter = 0
}
}
}
if counter > 0 {
group.Wait()
}
if len(mids) > 0 {
var (
valid int64
ts map[int][]string
)
if ts, valid, err = s.tokensByMids(t, mids); err == nil {
midValid += valid
for p, v := range ts {
tokens[p] = append(tokens[p], v...)
}
} else {
log.Error("s.tokensByMids(%+v) error(%v)", t, err)
}
}
s.cache.Save(func() {
arg := &pb.AddMidProgressRequest{Task: t.ID, MidTotal: midTotal, MidValid: midValid}
if _, e := s.pushRPC.AddMidProgress(context.Background(), arg); e != nil {
log.Error("s.pushRPC.AddMidProgress(%+v) error(%v)", arg, e)
}
})
for p, v := range tokens {
if len(v) == 0 {
continue
}
if path, err = s.saveFile(v); err != nil {
log.Error("pretreatTaskMid s.saveFild error(%v)", err)
return
}
task := *t
task.MidFile = path
task.PlatformID = p
s.addTaskCh <- &task
}
log.Info("pretreatTaskMid task(%+v)", t)
return
}
func (s *Service) pretreatTaskToken(t *pushmdl.Task) (err error) {
f, err := os.Open(t.MidFile)
if err != nil {
log.Error("pretreatTaskToken(%+v) open file error(%v)", t, err)
return
}
defer f.Close()
var (
exit bool
plat int
line string
token string
path string
tokens = make(map[int][]string)
reader = bufio.NewReader(f)
)
for {
if exit {
break
}
if line, err = reader.ReadString('\n'); err != nil {
if err == io.EOF {
exit = true // no 'continue', solve the last line whitout '\n'
} else {
log.Error("read file error(%v)", err)
continue
}
}
if plat, token, err = parseTokenLine(line); err != nil {
log.Error("parse token line(%s) error(%v)", line, err)
continue
}
tokens[plat] = append(tokens[plat], token)
if len(tokens[plat]) >= s.c.Job.LimitPerTask {
if path, err = s.saveFile(tokens[plat]); err != nil {
log.Error("pretreatTaskToken s.saveFile error(%v)", err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskToken(%v) saveTokens error(%v)", t.ID, err))
})
return
}
tokens[plat] = []string{}
task := *t
task.MidFile = path
task.PlatformID = plat
s.addTaskCh <- &task
}
}
for p, v := range tokens {
if len(v) == 0 {
continue
}
if path, err = s.saveFile(v); err == nil {
task := *t
task.MidFile = path
task.PlatformID = p
s.addTaskCh <- &task
}
}
log.Info("pretreatTaskToken task(%+v)", t)
return
}
func parseTokenLine(line string) (plat int, token string, err error) {
line = strings.Trim(line, " \r\n")
if line == "" {
err = errEmptyLine
return
}
res := strings.Split(line, "\t")
if len(res) != 2 {
err = errInvalidToken
return
}
if res[0] == "" || res[1] == "" {
err = errInvalidToken
return
}
if plat, err = strconv.Atoi(res[0]); err != nil || plat <= 0 {
err = errInvalidToken
return
}
token = res[1]
return
}
func parseMidLine(line string) (mid int64, err error) {
line = strings.Trim(line, " \r\t\n")
if line == "" {
err = errEmptyLine
return
}
if mid, err = strconv.ParseInt(line, 10, 64); err != nil || mid <= 0 {
err = errInvalidMid
}
return
}
func (s *Service) saveFile(tokens []string) (path string, err error) {
name := strconv.FormatInt(time.Now().UnixNano(), 10) + tokens[0]
data := []byte(strings.Join(tokens, "\n"))
for i := 0; i < _retry; i++ {
if path, err = s.saveNASFile(name, data); err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return
}
// saveNASFile writes data into NAS.
func (s *Service) saveNASFile(name string, data []byte) (path string, err error) {
name = fmt.Sprintf("%x", md5.Sum([]byte(name)))
dir := fmt.Sprintf("%s/%s/%s", strings.TrimSuffix(s.c.Job.MountDir, "/"), time.Now().Format("20060102"), name[:2])
if _, err = os.Stat(dir); err != nil {
if !os.IsNotExist(err) {
log.Error("os.IsNotExist(%s) error(%v)", dir, err)
return
}
if err = os.MkdirAll(dir, 0777); err != nil {
log.Error("os.MkdirAll(%s) error(%v)", dir, err)
return
}
}
path = fmt.Sprintf("%s/%s", dir, name)
f, err := os.OpenFile(path, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Error("s.saveNASFile(%s) OpenFile() error(%v)", path, err)
return
}
defer f.Close()
if _, err = f.Write(data); err != nil {
log.Error("s.saveNASFile(%s) f.Write() error(%v)", err)
}
return
}