Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,61 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["service_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/push/conf:go_default_library",
"//app/service/main/push/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"callback.go",
"dataplatform.go",
"report.go",
"service.go",
"task.go",
],
importpath = "go-common/app/job/main/push/service",
tags = ["automanaged"],
deps = [
"//app/admin/main/push/model:go_default_library",
"//app/job/main/push/conf:go_default_library",
"//app/job/main/push/dao:go_default_library",
"//app/job/main/push/model:go_default_library",
"//app/service/main/push/api/grpc/v1:go_default_library",
"//app/service/main/push/model:go_default_library",
"//library/cache:go_default_library",
"//library/conf/env:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/queue/databus:go_default_library",
"//library/sync/errgroup:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,114 @@
package service
import (
"context"
"encoding/json"
"time"
"go-common/app/job/main/push/dao"
pb "go-common/app/service/main/push/api/grpc/v1"
pushmdl "go-common/app/service/main/push/model"
"go-common/library/log"
)
const (
_retryCallback = 5
_delCallbackLimit = 5000
)
func (s *Service) callbackproc() {
defer s.waiter.Done()
var err error
for {
msg, ok := <-s.callbackCh
if !ok {
log.Warn("s.callbackproc() closed")
return
}
for _, v := range msg {
if v == nil {
continue
}
arg := &pb.AddCallbackRequest{
Task: v.Task,
APP: v.APP,
Platform: int32(v.Platform),
Mid: v.Mid,
Pid: int32(v.Pid),
Token: v.Token,
Buvid: v.Buvid,
Click: int32(v.Click),
}
if v.Extra != nil {
arg.Extra = &pb.CallbackExtra{Status: int32(v.Extra.Status), Channel: int32(v.Extra.Channel)}
}
for i := 0; i < _retryCallback; i++ {
if _, err = s.pushRPC.AddCallback(context.Background(), arg); err == nil {
break
}
time.Sleep(20 * time.Millisecond)
}
if err != nil {
log.Error("s.pushRPC.AddCallback(%+v) error(%v)", arg, err)
dao.PromError("report:新增callback")
continue
}
log.Info("add callback success task(%s) token(%s)", v.Task, v.Token)
time.Sleep(time.Millisecond)
}
}
}
// consumeCallback consumes callback.
func (s *Service) consumeCallback() {
defer s.waiter.Done()
for {
msg, ok := <-s.callbackSub.Messages()
if !ok {
log.Info("databus: push-job callback consumer exit!")
close(s.callbackCh)
return
}
s.callbackCnt++
msg.Commit()
var cbs []*pushmdl.Callback
if err := json.Unmarshal(msg.Value, &cbs); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
dao.PromError("service:解析databus中callback消息")
continue
}
log.Info("consumeCallback key(%s) partition(%d) offset(%d) msg(%v)", msg.Key, msg.Partition, msg.Offset, string(msg.Value))
s.callbackCh <- cbs
}
}
func (s *Service) delCallbacksproc() {
for {
now := time.Now()
// 每天4点时删除七天前的callback数据
if now.Hour() == 4 {
var (
err error
deleted int64
b = now.Add(time.Duration(-s.c.Job.DelCallbackInterval*24) * time.Hour)
loc, _ = time.LoadLocation("Local")
t = time.Date(b.Year(), b.Month(), b.Day(), 23, 59, 59, 0, loc)
)
for {
if deleted, err = s.dao.DelCallbacks(context.TODO(), t, _delCallbackLimit); err != nil {
log.Error("s.delCallbacks(%v) error(%v)", t, err)
s.dao.SendWechat("DB操作失败:push-job删除callback数据错误")
time.Sleep(time.Second)
continue
}
if deleted < _delCallbackLimit {
break
}
time.Sleep(time.Second)
}
log.Info("delCallbacksproc success date(%v)", t)
time.Sleep(time.Hour)
}
time.Sleep(time.Minute)
}
}

View File

@@ -0,0 +1,254 @@
package service
import (
"bytes"
"context"
"crypto/md5"
"fmt"
"os"
"strconv"
"strings"
"time"
pamdl "go-common/app/admin/main/push/model"
"go-common/app/job/main/push/model"
pushmdl "go-common/app/service/main/push/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
)
// txCond get a new condition by tx.
func (s *Service) txCond(oldStatus, newStatus int) (cond *pamdl.DPCondition, err error) {
ctx := context.Background()
var tx *xsql.Tx
if tx, err = s.dao.BeginTx(ctx); err != nil {
log.Error("tx.BeginTx() error(%v)", err)
return
}
if cond, err = s.dao.TxCondByStatus(tx, oldStatus); err != nil || cond == nil {
if e := tx.Rollback(); e != nil {
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if err = s.dao.TxUpdateCondStatus(tx, cond.ID, newStatus); err != nil {
if e := tx.Rollback(); e != nil {
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if err = tx.Commit(); err != nil {
log.Error("tx.Commit() error(%v)", err)
}
return
}
// data platform query
func (s *Service) dpQueryproc() {
defer s.waiter.Done()
for {
if s.closed {
return
}
cond, err := s.txCond(pushmdl.DpCondStatusPrepared, pushmdl.DpCondStatusSubmitting)
if err != nil || cond == nil {
time.Sleep(time.Second)
continue
}
for i := 0; i < _retry; i++ {
if cond.StatusURL, err = s.dao.DpSubmitQuery(context.Background(), cond.SQL); err == nil {
break
}
time.Sleep(time.Second)
}
if err != nil {
log.Error("data platform add query(%+v) error(%v)", cond, err)
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusFailed)
s.dao.UpdateTaskStatus(context.Background(), cond.Task, pushmdl.TaskStatusFailed)
continue
}
cond.Status = pushmdl.DpCondStatusSubmitted
for i := 0; i < _retry; i++ {
if err = s.dao.UpdateDpCond(context.Background(), cond); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("data platform update condition(%+v) error(%v)", cond, err)
}
time.Sleep(time.Second)
}
}
// data platform get file
func (s *Service) dpFileproc() {
defer s.waiter.Done()
for {
if s.closed {
return
}
cond, err := s.txCond(pushmdl.DpCondStatusSubmitted, pushmdl.DpCondStatusPolling)
if err != nil || cond == nil {
time.Sleep(time.Second)
continue
}
var (
path string
files []string
)
if files = s.dpCheckJob(cond); len(files) == 0 {
continue
}
for i := 0; i < _retry; i++ {
if path, err = s.dpDownloadFiles(cond, files); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil || path == "" {
log.Error("data platform download query(%+v) file error(%v)", cond, err)
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusFailed)
s.dao.UpdateTaskStatus(context.Background(), cond.Task, pushmdl.TaskStatusFailed)
continue
}
cond.File = path
cond.Status = pushmdl.DpCondStatusDone
for i := 0; i < _retry; i++ {
if err = s.dao.UpdateDpCond(context.Background(), cond); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("data platform UpdateDpCond(%+v) error(%v)", cond, err)
continue
}
for i := 0; i < _retry; i++ {
if err = s.dao.UpdateTask(context.Background(), strconv.FormatInt(cond.Task, 10), path, pushmdl.TaskStatusPretreatmentPrepared); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.dao.UpdateTask(%d,%s,%d) error(%v)", cond.Task, path, pushmdl.TaskStatusPretreatmentPrepared)
}
time.Sleep(time.Second)
}
}
func (s *Service) dpCheckJob(cond *pamdl.DPCondition) (files []string) {
now := time.Now()
for {
if time.Since(now) > time.Duration(s.c.Job.DpPollingTime) {
log.Error("polling stoped, more over than dpPollingTime, give job up")
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusFailed)
break
}
res, err := s.dao.DpCheckJob(context.Background(), cond.StatusURL)
if err != nil {
log.Error("s.dao.DpCheckJob(%s) error(%v)", cond.StatusURL, err)
time.Sleep(time.Second)
continue
}
if res.StatusID == model.CheckJobStatusDoing || res.StatusID == model.CheckJobStatusPending {
log.Info("polling (%s) ing..., status(%d)", cond.StatusURL, res.StatusID)
time.Sleep(5 * time.Second)
continue
}
if res.StatusID == model.CheckJobStatusOk {
if len(res.Files) == 0 {
log.Info("polling (%s) success, no files found", cond.StatusURL)
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusNoFile)
break
}
files = res.Files
log.Info("polling (%s) success, files(%d)", cond.StatusURL, len(files))
return
}
if res.StatusID == model.CheckJobStatusErr {
log.Error("polling (%s) error, res(%+v)", cond.StatusURL, res)
s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusFailed)
break
}
}
log.Error("polling cond(%d) error", cond.ID)
s.dao.UpdateTaskStatus(context.Background(), cond.Task, pushmdl.TaskStatusFailed)
return
}
func (s *Service) dpDownloadFiles(cond *pamdl.DPCondition, files []string) (path string, err error) {
for i := 0; i < _retry; i++ {
if err = s.dao.UpdateDpCondStatus(context.Background(), cond.ID, pushmdl.DpCondStatusDownloading); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
return
}
dir := fmt.Sprintf("%s/%s", strings.TrimSuffix(s.c.Job.MountDir, "/"), time.Now().Format("20060102"))
if _, err = os.Stat(dir); err != nil {
if !os.IsNotExist(err) {
log.Error("os.IsNotExist(%s) error(%v)", dir, err)
return
}
if err = os.MkdirAll(dir, 0777); err != nil {
log.Error("os.MkdirAll(%s) error(%v)", dir, err)
return
}
}
name := strconv.FormatInt(time.Now().UnixNano(), 10)
path = fmt.Sprintf("%s/%x", dir, md5.Sum([]byte(name)))
for _, f := range files {
if err = s.dpDownloadFile(f, path); err != nil {
return
}
}
return
}
func (s *Service) dpDownloadFile(url, path string) (err error) {
var (
res []byte
content [][]byte
)
for i := 0; i < _retry; i++ {
if res, err = s.dao.DpDownloadFile(context.Background(), url); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.dao.DpDownloadFile(%s) error(%v)", url, err)
return
}
for _, bs := range bytes.Split(res, []byte("\n")) {
n := bytes.Split(bs, []byte("\u0001"))
content = append(content, bytes.Join(n, []byte(" ")))
}
for i := 0; i < _retry; i++ {
if err = s.saveDpFile(path, bytes.Join(content, []byte("\n"))); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.saveNASFile(%s) error(%v)", url, err)
}
return
}
// saveDpFile writes data platform data into NAS.
func (s *Service) saveDpFile(path string, data []byte) (err error) {
f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Error("s.saveDpFile(%s) OpenFile() error(%v)", path, err)
return
}
defer f.Close()
if _, err = f.Write(data); err != nil {
log.Error("s.saveDpFile(%s) f.Write() error(%v)", path, err)
}
return
}

View File

@@ -0,0 +1,243 @@
package service
import (
"context"
"runtime"
"time"
"go-common/app/job/main/push/dao"
pb "go-common/app/service/main/push/api/grpc/v1"
pushmdl "go-common/app/service/main/push/model"
"go-common/library/log"
)
const (
_dbBatch = 100000
_cacheBatch = 50
)
func (s *Service) delInvalidReportsproc() {
for {
arg := &pb.DelInvalidReportsRequest{Type: pushmdl.DelMiFeedback}
if _, err := s.pushRPC.DelInvalidReports(context.Background(), arg); err != nil {
log.Error("s.pushRPC.DelInvalidReports(%d) error(%v)", arg.Type, err)
dao.PromError("report:删除mi无效上报")
}
// arg = &pushmdl.ArgDelInvalidReport{Type: pushmdl.DelMiUninstalled}
// if err := s.pushRPC.DelInvalidReports(context.Background(), arg); err != nil {
// log.Error("s.pushRPC.DelInvalidReports(%d) error(%v)", arg.Type, err)
// dao.PromError("report:删除mi卸载token")
// }
time.Sleep(time.Duration(s.c.Job.DelInvalidReportInterval))
}
}
func (s *Service) reportproc() {
defer s.waiter.Done()
var err error
for {
msg, ok := <-s.reportCh
if !ok {
log.Warn("s.reportproc() closed")
return
}
for _, v := range msg {
if v == nil {
continue
}
arg := &pb.AddReportRequest{
Report: &pb.ModelReport{
APPID: int32(v.APPID),
PlatformID: int32(v.PlatformID),
Mid: v.Mid,
Buvid: v.Buvid,
DeviceToken: v.DeviceToken,
Build: int32(v.Build),
TimeZone: int32(v.TimeZone),
NotifySwitch: int32(v.NotifySwitch),
DeviceBrand: v.DeviceBrand,
DeviceModel: v.DeviceModel,
OSVersion: v.OSVersion,
Extra: v.Extra,
},
}
for i := 0; i < _retry; i++ {
if _, err = s.pushRPC.AddReport(context.Background(), arg); err == nil {
break
}
time.Sleep(20 * time.Millisecond)
}
if err != nil {
log.Error("s.pushRPC.AddReport(%+v) error(%v)", v, err)
dao.PromError("report:新增上报数据")
}
time.Sleep(time.Millisecond)
}
}
}
func (s *Service) refreshTokensproc() {
for {
now := time.Now()
if int(now.Weekday()) != s.c.Job.SyncReportCacheWeek || int(now.Hour()) != s.c.Job.SyncReportCacheHour {
time.Sleep(time.Minute)
continue
}
s.RefreshTokenCache()
time.Sleep(time.Hour)
}
}
// RefreshTokenCache .
func (s *Service) RefreshTokenCache() {
var (
err error
maxid int64
ctx = context.Background()
)
for i := 0; i < _retry; i++ {
if maxid, err = s.dao.ReportLastID(ctx); err == nil {
break
}
time.Sleep(time.Second)
}
if err != nil {
log.Error("s.refreshTokensproc() error(%v)", err)
return
}
log.Info("refresh token start, maxid(%d)", maxid)
var (
updatedUsers int64
updatedTokens int64
sli []*pb.ModelReport
pool = make(map[int64][]*pb.ModelReport)
)
for i := int64(0); i <= maxid; i += _dbBatch {
var rs []*pushmdl.Report
for j := 0; j < _retry; j++ {
if rs, err = s.dao.ReportsByRange(ctx, i, i+_dbBatch); err == nil {
break
}
time.Sleep(20 * time.Millisecond)
}
if err != nil {
log.Error("s.dao.ReportsByRange(%d,%d) error(%v)", i, i+_dbBatch, err)
continue
}
for _, r := range rs {
if r.NotifySwitch == 0 {
continue
}
nr := &pb.ModelReport{
APPID: int32(r.APPID),
PlatformID: int32(r.PlatformID),
Mid: r.Mid,
Buvid: r.Buvid,
DeviceToken: r.DeviceToken,
Build: int32(r.Build),
TimeZone: int32(r.TimeZone),
NotifySwitch: int32(r.NotifySwitch),
DeviceBrand: r.DeviceBrand,
DeviceModel: r.DeviceModel,
OSVersion: r.OSVersion,
Extra: r.Extra,
}
sli = append(sli, nr)
if len(sli) >= _cacheBatch {
s.addTokensCache(sli)
sli = []*pb.ModelReport{}
}
if r.Mid == 0 {
continue
}
pool[r.Mid] = append(pool[r.Mid], nr)
updatedTokens++
}
log.Info("refresh token sovled min(%d) max(%d)", i, i+_dbBatch)
time.Sleep(time.Millisecond)
}
if len(sli) > 0 {
s.addTokensCache(sli)
}
log.Info("refresh token data, users(%d) tokens(%d)", len(pool), updatedTokens)
for mid, rs := range pool {
arg := &pb.AddUserReportCacheRequest{Mid: mid, Reports: rs}
for i := 0; i < _retry; i++ {
if _, err = s.pushRPC.AddUserReportCache(ctx, arg); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.pushRPC.AddUserReportCache(%d) error(%v)", mid, err)
continue
}
updatedUsers++
delete(pool, mid)
}
pool = nil
runtime.GC()
log.Info("refresh token end, updated users(%d) tokens(%d)", updatedUsers, updatedTokens)
}
func (s *Service) addTokensCache(rs []*pb.ModelReport) (err error) {
arg := new(pb.AddTokensCacheRequest)
arg.Reports = append(arg.Reports, rs...)
for i := 0; i < _retry; i++ {
if _, err = s.pushRPC.AddTokensCache(context.Background(), arg); err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
if err != nil {
log.Error("s.pushRPC.AddTokensCache tokens(%d) error(%v)", len(rs), err)
return
}
log.Info("s.pushRPC.AddTokensCache tokens(%d)", len(rs))
return
}
func (s *Service) tokensByMids(task *pushmdl.Task, mids []int64) (res map[int][]string, valid int64, err error) {
rs, _, err := s.dao.ReportsCacheByMids(context.Background(), mids)
if err != nil {
log.Error("s.dao.ReportsCacheByMids() error(%v)", err)
return
}
var (
exist = make(map[int64]bool, len(rs))
// platformCount = len(task.Platform)
buildCount = len(task.Build)
)
for mid := range rs {
exist[mid] = true
}
for _, mid := range mids {
if !exist[mid] {
log.Warn("tokens by mid, task(%s) mid(%d)", task.ID, mid)
}
}
res = make(map[int][]string)
for _, rr := range rs {
for _, r := range rr {
if r.APPID != task.APPID {
continue
}
if r.NotifySwitch == pushmdl.SwitchOff {
continue
}
realTime := pushmdl.RealTime(r.TimeZone)
if realTime.Unix() > int64(task.ExpireTime) {
continue
}
// if platformCount > 0 && !validatePlatform(r.PlatformID, task.Platform) {
// continue
// }
if buildCount > 0 && !pushmdl.ValidateBuild(r.PlatformID, r.Build, task.Build) {
continue
}
res[r.PlatformID] = append(res[r.PlatformID], r.DeviceToken)
}
valid++
}
return
}

View File

@@ -0,0 +1,178 @@
package service
import (
"context"
"encoding/json"
"sync"
"sync/atomic"
"time"
"go-common/app/job/main/push/conf"
"go-common/app/job/main/push/dao"
pushrpc "go-common/app/service/main/push/api/grpc/v1"
pushmdl "go-common/app/service/main/push/model"
"go-common/library/cache"
"go-common/library/conf/env"
"go-common/library/log"
"go-common/library/queue/databus"
)
const (
_max = 1024
_retry = 3
)
// Service .
type Service struct {
c *conf.Config
dao *dao.Dao
waiter sync.WaitGroup
addTaskWg sync.WaitGroup
cache *cache.Cache
pushRPC pushrpc.PushClient
reportSub *databus.Databus // consumer for new reports
callbackSub *databus.Databus // consumer for callback
reportCh chan []*pushmdl.Report
callbackCh chan []*pushmdl.Callback
addTaskCh chan *pushmdl.Task
reportCnt int64
callbackCnt int64
closedCnt int64
closed bool
}
// New creates a Service instance.
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
dao: dao.New(c),
cache: cache.New(1, 102400),
reportSub: databus.New(c.ReportSub),
callbackSub: databus.New(c.CallbackSub),
reportCh: make(chan []*pushmdl.Report, 1024),
callbackCh: make(chan []*pushmdl.Callback, 1024),
addTaskCh: make(chan *pushmdl.Task, 10240),
}
var err error
if s.pushRPC, err = pushrpc.NewClient(c.PushRPC); err != nil {
panic(err)
}
if env.DeployEnv == env.DeployEnvProd {
go s.delInvalidReportsproc() // 主动删除无效token
}
for i := 0; i < s.c.Job.ReportShard; i++ {
s.waiter.Add(1)
go s.reportproc()
}
for i := 0; i < s.c.Job.CallbackShard; i++ {
s.waiter.Add(1)
go s.callbackproc()
}
if s.c.Job.PretreatTask {
for i := 0; i < s.c.Job.PretreatmentTaskShard; i++ {
s.waiter.Add(1)
go s.pretreatTaskproc() // 预处理任务将任务转化成按平台分的token任务
}
}
s.addTaskWg.Add(1)
go s.addTaskproc()
s.waiter.Add(1)
go s.consumeReport()
s.waiter.Add(1)
go s.consumeCallback()
go s.checkConsumer()
// 删除过期的数据
go s.delCallbacksproc()
go s.delTasksproc()
// 定期更新token缓存
go s.refreshTokensproc()
// data platform
s.waiter.Add(1)
go s.dpQueryproc()
s.waiter.Add(1)
go s.dpFileproc()
return
}
// consumeReport consumes report.
func (s *Service) consumeReport() {
defer s.waiter.Done()
reports := make([]*pushmdl.Report, _max)
ticker := time.NewTicker(time.Duration(s.c.Job.ReportTicker))
for {
select {
case msg, ok := <-s.reportSub.Messages():
if !ok {
log.Info("databus: push-job report consumer exit!")
if len(reports) > 0 {
s.reportCh <- reports
}
if !atomic.CompareAndSwapInt64(&s.closedCnt, 0, 1) {
close(s.reportCh)
}
return
}
s.reportCnt++
msg.Commit()
m := &pushmdl.Report{}
if err := json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
dao.PromError("service:解析计数databus消息")
continue
}
log.Info("consumeReport key(%s) partition(%d) offset(%d) msg(%+v)", msg.Key, msg.Partition, msg.Offset, m)
reports = append(reports, m)
if len(reports) < _max {
continue
}
case <-ticker.C:
}
if len(reports) > 0 {
temp := make([]*pushmdl.Report, len(reports))
copy(temp, reports)
reports = []*pushmdl.Report{}
s.reportCh <- temp
}
}
}
// checkConsumer checks consumer state.
func (s *Service) checkConsumer() {
if env.DeployEnv != env.DeployEnvProd {
return
}
var c1, c2 int64
for {
time.Sleep(5 * time.Minute)
if s.reportCnt-c1 == 0 {
msg := "push-job report did not consume within 5 minute"
s.dao.SendWechat(msg)
log.Warn(msg)
}
c1 = s.reportCnt
if s.callbackCnt-c2 == 0 {
msg := "push-job callback did not consume within 5 minute"
s.dao.SendWechat(msg)
log.Warn(msg)
}
c2 = s.callbackCnt
}
}
// Ping reports the heath of services.
func (s *Service) Ping(c context.Context) (err error) {
return s.dao.Ping(c)
}
// Close releases resources which owned by the Service instance.
func (s *Service) Close() {
s.closed = true
s.reportSub.Close()
s.callbackSub.Close()
s.dao.Close()
s.waiter.Wait()
close(s.addTaskCh)
s.addTaskWg.Wait()
}

View File

@@ -0,0 +1,45 @@
package service
import (
"context"
"flag"
"path/filepath"
"testing"
"time"
"go-common/app/job/main/push/conf"
pushmdl "go-common/app/service/main/push/model"
. "github.com/smartystreets/goconvey/convey"
)
var srv *Service
func init() {
dir, _ := filepath.Abs("../cmd/push-job-test.toml")
flag.Set("conf", dir)
conf.Init()
srv = New(conf.Conf)
time.Sleep(time.Second)
}
func WithService(f func(s *Service)) func() {
return func() {
f(srv)
}
}
func Test_Ping(t *testing.T) {
Convey("ping", t, WithService(func(s *Service) {
err := s.Ping(context.TODO())
So(err, ShouldBeNil)
}))
}
func Test_TxCond(t *testing.T) {
Convey("query conditon by tx", t, WithService(func(s *Service) {
cond, err := s.txCond(pushmdl.DpCondStatusPrepared, pushmdl.DpCondStatusSubmitting)
So(err, ShouldBeNil)
t.Logf("cond(%+v)", cond)
}))
}

View File

@@ -0,0 +1,533 @@
package service
import (
"bufio"
"context"
"crypto/md5"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"time"
"go-common/app/job/main/push/dao"
pb "go-common/app/service/main/push/api/grpc/v1"
pushmdl "go-common/app/service/main/push/model"
xsql "go-common/library/database/sql"
"go-common/library/log"
"go-common/library/sync/errgroup"
)
const (
_delTaskLimit = 5000
)
var (
errEmptyLine = errors.New("empty line")
errInvalidMid = errors.New("invalid mid format")
errInvalidToken = errors.New("invalid token format")
)
func (s *Service) addTaskproc() {
defer s.addTaskWg.Done()
var err error
for {
task, ok := <-s.addTaskCh
if !ok {
log.Info("add task channel exit")
return
}
if task == nil {
continue
}
task.Status = pushmdl.TaskStatusPrepared
for i := 0; i < _retry; i++ {
if err = s.dao.AddTask(context.Background(), task); err == nil {
break
}
}
if err != nil {
log.Error("add task(%+v) error(%v)", task, err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("add task(%d)", task.Job))
})
continue
}
dao.PromInfo("add task")
time.Sleep(time.Millisecond)
}
}
func (s *Service) delTasksproc() {
for {
now := time.Now()
// 每天2点时删除一个月前的task数据
if now.Hour() != 2 {
time.Sleep(time.Minute)
continue
}
var (
err error
deleted int64
b = now.Add(time.Duration(-s.c.Job.DelTaskInterval*24) * time.Hour)
loc, _ = time.LoadLocation("Local")
t = time.Date(b.Year(), b.Month(), b.Day(), 23, 59, 59, 0, loc)
)
for {
if deleted, err = s.dao.DelTasks(context.TODO(), t, _delTaskLimit); err != nil {
log.Error("s.delTasks(%v) error(%v)", t, err)
s.dao.SendWechat("DB操作失败:push-job删除task数据错误")
time.Sleep(time.Second)
continue
}
if deleted < _delTaskLimit {
break
}
time.Sleep(time.Second)
}
time.Sleep(time.Hour)
}
}
func (s *Service) pretreatTaskproc() {
defer s.waiter.Done()
for {
if s.closed {
return
}
task, err := s.pickPretreatmentTask()
if err != nil {
time.Sleep(5 * time.Second)
continue
}
if task != nil {
log.Info("pretreat task job(%d) id(%s)", task.Job, task.ID)
if err = s.pretreatTask(task); err != nil {
log.Error("pretreat task(%+v) error(%v)", task, err)
s.cache.Save(func() { s.dao.SendWechat(fmt.Sprintf("pretreat task(%s) error", task.ID)) })
}
}
time.Sleep(time.Duration(s.c.Job.LoadTaskInteval))
}
}
func (s *Service) pickPretreatmentTask() (t *pushmdl.Task, err error) {
c := context.Background()
var tx *xsql.Tx
if tx, err = s.dao.BeginTx(c); err != nil {
log.Error("tx.BeginTx() error(%v)", err)
return
}
if t, err = s.dao.TxTaskByStatus(tx, pushmdl.TaskStatusPretreatmentPrepared); err != nil {
if e := tx.Rollback(); e != nil {
dao.PromError("task:获取新任务")
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if t == nil {
if e := tx.Rollback(); e != nil {
dao.PromError("task:获取新任务")
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if err = s.dao.TxUpdateTaskStatus(tx, t.ID, pushmdl.TaskStatusPretreatmentDoing); err != nil {
if e := tx.Rollback(); e != nil {
dao.PromError("task:更新任务状态")
log.Error("tx.Rollback() error(%v)", e)
}
return
}
if err = tx.Commit(); err != nil {
dao.PromError("task:获取新任务commit")
log.Error("tx.Commit() error(%v)", err)
}
return
}
func (s *Service) pretreatTask(t *pushmdl.Task) (err error) {
id, _ := strconv.ParseInt(t.ID, 10, 64)
switch t.Type {
case pushmdl.TaskTypeAll:
err = s.pretreatTaskAll(t)
case pushmdl.TaskTypeMngToken, pushmdl.TaskTypeDataPlatformToken, pushmdl.TaskTypeDataPlatformMid:
err = s.pretreatTaskToken(t)
case pushmdl.TaskTypeStrategyMid, pushmdl.TaskTypeMngMid:
err = s.pretreatTaskMid(t)
default:
log.Error("invalid task type, (%+v)", t)
}
if err != nil {
err = s.dao.UpdateTaskStatus(context.Background(), id, pushmdl.TaskStatusPretreatmentFailed)
return
}
err = s.dao.UpdateTaskStatus(context.Background(), id, pushmdl.TaskStatusPretreatmentDone)
return
}
func (s *Service) pretreatTaskAll(t *pushmdl.Task) (err error) {
log.Info("AddTaskAll start, task(%+v)", t)
var (
maxID int64
group = errgroup.Group{}
)
maxID, err = s.dao.ReportLastID(context.Background())
if err != nil || maxID <= 0 {
log.Error("s.pretreatTaskAll() error(%v)", err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) ReportLastID(%d) error", t.ID, maxID))
})
return
}
log.Info("AddTaskAll get last report ID(%d)", maxID)
buildCount := len(t.Build)
batch := maxID / int64(s.c.Job.TaskGoroutines)
for j := 0; j < s.c.Job.TaskGoroutines; j++ {
begin := int64(j) * batch
end := begin + batch
group.Go(func() (e error) {
var (
path string
rows *xsql.Rows
tokens = make(map[int][]string)
)
for {
if begin >= end {
break
}
l := begin + int64(_dbBatch)
if l >= end {
l = end
}
log.Info("AddTaskAll load reports start(%d) end(%d)", begin, l)
if rows, e = s.dao.ReportsTaskAll(context.Background(), begin, l, t.APPID); e != nil {
log.Error("s.dao.ReportsTaskAll(%d,%d,%d) error(%v)", begin, l, t.APPID)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) ReportsTaskAll(%d,%d,%d) error", t.ID, begin, l, t.APPID))
})
return
}
for rows.Next() {
var (
platformID int
build int
token string
)
if e = rows.Scan(&platformID, &token, &build); e != nil {
log.Error("AddTaskAll rows.Scan() error(%v)", e)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) ReportsTaskAll(%d,%d,%d) error", t.ID, begin, l, t.APPID))
})
return
}
if buildCount > 0 && !pushmdl.ValidateBuild(platformID, build, t.Build) {
continue
}
tokens[platformID] = append(tokens[platformID], token)
if len(tokens[platformID]) >= s.c.Job.LimitPerTask {
if path, e = s.saveFile(tokens[platformID]); e != nil {
log.Error("AddTaskAll s.saveTokens error(%v)", e)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) saveTokens error(%v)", t.ID, e))
})
return
}
tokens[platformID] = []string{}
task := *t
task.MidFile = path
task.PlatformID = platformID
s.addTaskCh <- &task
}
}
begin = l
}
for p, v := range tokens {
if len(v) == 0 {
continue
}
if path, e = s.saveFile(v); e == nil {
task := *t
task.MidFile = path
task.PlatformID = p
s.addTaskCh <- &task
}
}
return
})
}
if err = group.Wait(); err != nil {
log.Error("add task all, task(%+v) error(%v)", t, err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskAll(%v) error(%v)", t.ID, err))
})
return
}
log.Info("AddTaskAll end, task(%+v)", t)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("add task all success, job(%d)", t.Job))
})
return
}
func (s *Service) pretreatTaskMid(t *pushmdl.Task) (err error) {
f, err := os.Open(t.MidFile)
if err != nil {
log.Error("pretreatTaskMid(%+v) open file error(%v)", t, err)
return
}
defer f.Close()
var (
exit bool
line string
path string
mid int64
counter int
midTotal int64
midValid int64
mu sync.Mutex
mids []int64
tokens = make(map[int][]string)
group = errgroup.Group{}
reader = bufio.NewReader(f)
)
for {
if exit {
break
}
if line, err = reader.ReadString('\n'); err != nil {
if err == io.EOF {
exit = true
} else {
log.Error("read file error(%v)", err)
continue
}
}
if mid, err = parseMidLine(line); err != nil {
log.Error("parse mid line(%s) error(%v)", line, err)
continue
}
midTotal++
mids = append(mids, mid)
if len(mids) >= s.c.Job.PushPartSize {
midsCp := make([]int64, len(mids))
copy(midsCp, mids)
mids = []int64{}
group.Go(func() (e error) {
ts, valid, e := s.tokensByMids(t, midsCp)
if e != nil {
log.Error("s.tokensByMids(%v) error(%v)", t.ID, e)
return
}
tcopy := make(map[int][]string)
mu.Lock()
midValid += valid
for p, v := range ts {
tokens[p] = append(tokens[p], v...)
if len(tokens[p]) >= s.c.Job.LimitPerTask {
tcopy[p] = append(tcopy[p], tokens[p]...)
tokens[p] = []string{}
}
}
mu.Unlock()
for p, v := range tcopy {
if path, err = s.saveFile(v); err != nil {
log.Error("pretreatTaskMid s.saveFild error(%v)", err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskMid(%v) saveTokens error(%v)", t.ID, err))
})
return
}
task := *t
task.MidFile = path
task.PlatformID = p
s.addTaskCh <- &task
}
return
})
counter++
if counter == s.c.Job.PushPartChanSize {
group.Wait()
counter = 0
}
}
}
if counter > 0 {
group.Wait()
}
if len(mids) > 0 {
var (
valid int64
ts map[int][]string
)
if ts, valid, err = s.tokensByMids(t, mids); err == nil {
midValid += valid
for p, v := range ts {
tokens[p] = append(tokens[p], v...)
}
} else {
log.Error("s.tokensByMids(%+v) error(%v)", t, err)
}
}
s.cache.Save(func() {
arg := &pb.AddMidProgressRequest{Task: t.ID, MidTotal: midTotal, MidValid: midValid}
if _, e := s.pushRPC.AddMidProgress(context.Background(), arg); e != nil {
log.Error("s.pushRPC.AddMidProgress(%+v) error(%v)", arg, e)
}
})
for p, v := range tokens {
if len(v) == 0 {
continue
}
if path, err = s.saveFile(v); err != nil {
log.Error("pretreatTaskMid s.saveFild error(%v)", err)
return
}
task := *t
task.MidFile = path
task.PlatformID = p
s.addTaskCh <- &task
}
log.Info("pretreatTaskMid task(%+v)", t)
return
}
func (s *Service) pretreatTaskToken(t *pushmdl.Task) (err error) {
f, err := os.Open(t.MidFile)
if err != nil {
log.Error("pretreatTaskToken(%+v) open file error(%v)", t, err)
return
}
defer f.Close()
var (
exit bool
plat int
line string
token string
path string
tokens = make(map[int][]string)
reader = bufio.NewReader(f)
)
for {
if exit {
break
}
if line, err = reader.ReadString('\n'); err != nil {
if err == io.EOF {
exit = true // no 'continue', solve the last line whitout '\n'
} else {
log.Error("read file error(%v)", err)
continue
}
}
if plat, token, err = parseTokenLine(line); err != nil {
log.Error("parse token line(%s) error(%v)", line, err)
continue
}
tokens[plat] = append(tokens[plat], token)
if len(tokens[plat]) >= s.c.Job.LimitPerTask {
if path, err = s.saveFile(tokens[plat]); err != nil {
log.Error("pretreatTaskToken s.saveFile error(%v)", err)
s.cache.Save(func() {
s.dao.SendWechat(fmt.Sprintf("pretreatTaskToken(%v) saveTokens error(%v)", t.ID, err))
})
return
}
tokens[plat] = []string{}
task := *t
task.MidFile = path
task.PlatformID = plat
s.addTaskCh <- &task
}
}
for p, v := range tokens {
if len(v) == 0 {
continue
}
if path, err = s.saveFile(v); err == nil {
task := *t
task.MidFile = path
task.PlatformID = p
s.addTaskCh <- &task
}
}
log.Info("pretreatTaskToken task(%+v)", t)
return
}
func parseTokenLine(line string) (plat int, token string, err error) {
line = strings.Trim(line, " \r\n")
if line == "" {
err = errEmptyLine
return
}
res := strings.Split(line, "\t")
if len(res) != 2 {
err = errInvalidToken
return
}
if res[0] == "" || res[1] == "" {
err = errInvalidToken
return
}
if plat, err = strconv.Atoi(res[0]); err != nil || plat <= 0 {
err = errInvalidToken
return
}
token = res[1]
return
}
func parseMidLine(line string) (mid int64, err error) {
line = strings.Trim(line, " \r\t\n")
if line == "" {
err = errEmptyLine
return
}
if mid, err = strconv.ParseInt(line, 10, 64); err != nil || mid <= 0 {
err = errInvalidMid
}
return
}
func (s *Service) saveFile(tokens []string) (path string, err error) {
name := strconv.FormatInt(time.Now().UnixNano(), 10) + tokens[0]
data := []byte(strings.Join(tokens, "\n"))
for i := 0; i < _retry; i++ {
if path, err = s.saveNASFile(name, data); err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
return
}
// saveNASFile writes data into NAS.
func (s *Service) saveNASFile(name string, data []byte) (path string, err error) {
name = fmt.Sprintf("%x", md5.Sum([]byte(name)))
dir := fmt.Sprintf("%s/%s/%s", strings.TrimSuffix(s.c.Job.MountDir, "/"), time.Now().Format("20060102"), name[:2])
if _, err = os.Stat(dir); err != nil {
if !os.IsNotExist(err) {
log.Error("os.IsNotExist(%s) error(%v)", dir, err)
return
}
if err = os.MkdirAll(dir, 0777); err != nil {
log.Error("os.MkdirAll(%s) error(%v)", dir, err)
return
}
}
path = fmt.Sprintf("%s/%s", dir, name)
f, err := os.OpenFile(path, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Error("s.saveNASFile(%s) OpenFile() error(%v)", path, err)
return
}
defer f.Close()
if _, err = f.Write(data); err != nil {
log.Error("s.saveNASFile(%s) f.Write() error(%v)", err)
}
return
}