Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,72 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["service_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/archive/conf:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"archive.go",
"archive_result.go",
"cache.go",
"dm.go",
"retry.go",
"service.go",
"video.go",
],
importpath = "go-common/app/job/main/archive/service",
tags = ["automanaged"],
deps = [
"//app/interface/main/dm2/model:go_default_library",
"//app/interface/main/dm2/rpc/client:go_default_library",
"//app/job/main/archive/conf:go_default_library",
"//app/job/main/archive/dao/archive:go_default_library",
"//app/job/main/archive/dao/email:go_default_library",
"//app/job/main/archive/dao/monitor:go_default_library",
"//app/job/main/archive/dao/reply:go_default_library",
"//app/job/main/archive/dao/result:go_default_library",
"//app/job/main/archive/model/archive:go_default_library",
"//app/job/main/archive/model/databus:go_default_library",
"//app/job/main/archive/model/dm:go_default_library",
"//app/job/main/archive/model/result:go_default_library",
"//app/job/main/archive/model/retry:go_default_library",
"//app/service/main/account/api:go_default_library",
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/api/gorpc:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//library/cache/redis:go_default_library",
"//library/database/sql:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/queue/databus:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,131 @@
package service
import (
"context"
"go-common/app/job/main/archive/model/archive"
"go-common/app/job/main/archive/model/result"
"go-common/app/job/main/archive/model/retry"
arcmdl "go-common/app/service/main/archive/model/archive"
"go-common/library/log"
)
func (s *Service) isPGC(aid int64) bool {
if addit, _ := s.archiveDao.Addit(context.TODO(), aid); addit != nil && (addit.UpFrom == archive.UpFromPGC || addit.UpFrom == archive.UpFromPGCSecret) {
return true
}
return false
}
func (s *Service) consumerVideoup(i int) {
defer s.waiter.Done()
for {
var (
aid int64
ok bool
)
if aid, ok = <-s.videoupAids[i]; !ok {
log.Error("s.videoupAids chan closed")
return
}
arc, _ := s.arcServices[0].Archive3(context.TODO(), &arcmdl.ArgAid2{Aid: aid})
if arc != nil && (arc.AttrVal(arcmdl.AttrBitIsPGC) == arcmdl.AttrYes || arc.AttrVal(arcmdl.AttrBitIsBangumi) == arcmdl.AttrYes) {
if s.c.PGCAsync == 1 {
rt := &retry.Info{Action: retry.FailResultAdd}
rt.Data.Aid = aid
s.PushFail(context.TODO(), rt)
log.Warn("async PGC archive(%d)", aid)
continue
}
s.pgcAids <- aid
log.Info("aid(%d) title(%s) is PGC", aid, arc.Title)
continue
}
if s.c.UGCAsync == 1 {
rt := &retry.Info{Action: retry.FailResultAdd}
rt.Data.Aid = aid
s.PushFail(context.TODO(), rt)
log.Warn("async UGC archive(%d)", aid)
continue
}
s.arcUpdate(aid)
}
}
func (s *Service) pgcConsumer() {
defer s.waiter.Done()
for {
var (
aid int64
ok bool
)
if aid, ok = <-s.pgcAids; !ok {
log.Error("s.pgcAids closed")
return
}
s.arcUpdate(aid)
}
}
func (s *Service) arcUpdate(aid int64) {
var (
oldResult *result.Archive
newResult *result.Archive
c = context.TODO()
upCids []int64
delCids []int64
err error
changed bool
)
log.Info("sync resultDB archive(%d) start", aid)
defer func() {
if err != nil {
if oldResult != nil && (oldResult.AttrVal(result.AttrBitIsBangumi) == result.AttrYes || oldResult.AttrVal(result.AttrBitIsPGC) == result.AttrYes) {
s.pgcAids <- aid
} else {
s.videoupAids[aid%int64(s.c.ChanSize)] <- aid
}
log.Error("s.arcUpdate(%d) error(%v)", aid, err)
}
}()
if oldResult, err = s.resultDao.Archive(c, aid); err != nil {
log.Error("s.resultDao.Archive(%d) error(%v)", aid, err)
}
if changed, upCids, delCids, err = s.tranResult(c, aid); err != nil || !changed {
log.Error("aid(%d) nothing changed err(%+v)", aid, err)
err = nil
return
}
s.upVideoCache(aid, upCids)
s.delVideoCache(aid, delCids)
if newResult, err = s.resultDao.Archive(c, aid); err != nil {
log.Error("s.resultDao.Archive(%d) error(%v)", aid, err)
return
}
err = s.updateResultCache(newResult, oldResult)
if oldResult != nil {
s.updateResultField(newResult, oldResult)
s.updateSubjectMid(newResult, oldResult)
s.sendMail(newResult, oldResult)
}
action := "update"
if oldResult == nil {
action = "insert"
}
s.sendNotify(&result.ArchiveUpInfo{Table: "archive", Action: action, Nw: newResult, Old: oldResult})
if oldResult != nil {
log.Info("sync resultDB archive(%d) sync old(%+v) new(%+v) updated", aid, oldResult, newResult)
return
}
log.Info("sync resultDB archive(%d) new(%+v) inserted", aid, newResult)
}
func (s *Service) hadPassed(c context.Context, aid int64) (had bool) {
id, err := s.archiveDao.GetFirstPassByAID(c, aid)
if err != nil {
log.Error("hadPassed s.arc.GetFirstPassByAID error(%v) aid(%d)", err, aid)
return
}
had = id > 0
return
}

View File

@@ -0,0 +1,237 @@
package service
import (
"context"
"go-common/app/job/main/archive/model/archive"
"go-common/app/job/main/archive/model/result"
"go-common/app/job/main/archive/model/retry"
arcmdl "go-common/app/service/main/archive/model/archive"
"go-common/library/database/sql"
"go-common/library/log"
)
func (s *Service) updateSubjectMid(nw *result.Archive, old *result.Archive) {
if nw.Mid == old.Mid {
return
}
if err := s.replyDao.ChangeSubjectMid(nw.AID, nw.Mid); err == nil {
log.Info("/x/v2/reply/admin/subject/mid called")
}
}
// sendMail send e-mail after state change
func (s *Service) sendMail(nw *result.Archive, old *result.Archive) {
// nw.Attribute == old.Attribute tmp remove
if nw.Mid == old.Mid && nw.TypeID == old.TypeID && nw.Duration == old.Duration && nw.Title == old.Title &&
nw.Cover == old.Cover && nw.Content == old.Content && nw.Copyright == old.Copyright &&
nw.State == old.State && nw.Access == old.Access && nw.Forward == old.Forward && nw.PubTime == old.PubTime && nw.CTime == old.CTime { // all field
log.Info("archive(%d) change only mtime no email", nw.ID)
return
}
a, err := s.arcServices[0].Archive3(context.TODO(), &arcmdl.ArgAid2{Aid: nw.AID})
if err != nil || a == nil {
log.Error("s.arcRPC.Archive3(%d) error(%v) or arc is nil", nw.AID, err)
return
}
// not bangumi or movie
if a.AttrVal(arcmdl.AttrBitIsBangumi) == arcmdl.AttrNo && a.AttrVal(arcmdl.AttrBitIsMovie) == arcmdl.AttrNo {
return
}
s.emailDao.PGCNotifyMail(a, nw, old)
log.Info("pgc notify(%d) mail send success", nw.AID)
}
func (s *Service) updateResultCache(nw *result.Archive, old *result.Archive) (err error) {
var (
c = context.TODO()
oldMid int64
)
if old != nil && old.Mid != nw.Mid {
oldMid = old.Mid
}
for k, rpc := range s.arcServices {
if err = rpc.ArcCache2(c, &arcmdl.ArgCache2{Aid: nw.AID, Tp: arcmdl.CacheUpdate}); err != nil {
log.Error("s.arcRPC(%d).ArcCache2(%d, %s) error(%v)", k, nw.AID, arcmdl.CacheUpdate, err)
}
if nw.State >= 0 || nw.State == -6 {
if err = rpc.ArcCache2(c, &arcmdl.ArgCache2{Aid: nw.AID, OldMid: oldMid, Tp: arcmdl.CacheAdd}); err != nil {
log.Error("s.arcRpc(%d).ArcCache2(%d, %s) error(%v)", k, nw.AID, arcmdl.CacheAdd, err)
}
} else {
if err = rpc.ArcCache2(c, &arcmdl.ArgCache2{Aid: nw.AID, OldMid: oldMid, Tp: arcmdl.CacheDelete}); err != nil {
log.Error("s.arcRpc(%d).ArcCache2(%d, %s) error(%v)", k, nw.AID, arcmdl.CacheDelete, err)
}
}
if err != nil {
rt := &retry.Info{Action: retry.FailUpCache}
rt.Data.Aid = nw.AID
rt.Data.State = nw.State
s.PushFail(c, rt)
log.Error("updateResultCache error(%v)", err)
}
}
return
}
func (s *Service) updateResultField(nw *result.Archive, old *result.Archive) {
var (
c = context.TODO()
err error
)
if nw.TypeID != old.TypeID {
for k, rpc := range s.arcServices {
if err = rpc.ArcFieldCache2(c, &arcmdl.ArgFieldCache2{Aid: nw.AID, TypeID: nw.TypeID, OldTypeID: old.TypeID}); err != nil {
log.Error("s.arcRPC(%d).ArcFieldCache2(%d, %d, %d) error(%v)", k, nw.AID, nw.TypeID, old.TypeID, err)
}
}
}
}
func (s *Service) tranResult(c context.Context, aid int64) (changed bool, upCids []int64, delCids []int64, err error) {
var (
tx *sql.Tx
rows int64
a *archive.Archive
aResult *result.Archive
ad *archive.Addit
vs []*archive.Video
videosCnt int
staff []*archive.Staff
)
defer func() {
if err != nil {
rt := &retry.Info{Action: retry.FailResultAdd}
rt.Data.Aid = aid
s.PushFail(c, rt)
log.Error("tranResult error(%v)", err)
}
}()
if a, err = s.archiveDao.Archive(c, aid); err != nil || a == nil {
log.Error("s.arc.Archive(%d) error(%v)", aid, err)
return
}
if !a.IsSyncState() {
log.Info("archive(%d) state(%d) cant change", aid, a.State)
// FIXME: eeeee
if s.isPGC(aid) && !s.hadPassed(c, aid) {
log.Info("archive(%d) is PGC first change", aid)
} else {
return
}
}
if ad, err = s.archiveDao.Addit(c, aid); err != nil {
log.Error("s.arc.Addit(%d) error(%v)", aid, err)
return
}
if ad == nil {
ad = &archive.Addit{Aid: aid}
}
// if aid%10 == 0 || aid%10 == 1 || aid%10 == 2 {
if vs, err = s.archiveDao.Videos2(c, aid); err != nil {
log.Error("s.arc.Videos2(%d) error(%v)", aid, err)
return
}
// } else {
// if vs, err = s.archiveDao.Videos(c, aid); err != nil {
// log.Error("s.arc.Videos(%d) error(%v)", aid, err)
// return
// }
// }
for _, v := range vs {
if (v.Status == archive.VideoStatusAccess || v.Status == archive.VideoStatusOpen) && v.State >= 0 {
videosCnt++
}
}
// 辣鸡dede, check cid
for _, v := range vs {
if v.Cid == 0 && v.Status == archive.VideoStatusSubmit {
// NOTE: 刚上传,没必要同步去
log.Error("aid(%d) vid(%d) cid(%d) videoStatus(%d) return", v.Aid, v.ID, v.Cid, v.Status)
return
}
}
if aResult, err = s.resultDao.Archive(c, aid); err != nil {
log.Error("s.resultDao.Archive error(%+v)", err)
return
}
if tx, err = s.resultDao.BeginTran(c); err != nil {
log.Error("s.result.BeginTran error(%v)", err)
return
}
var (
duration int
firstCid int64
dimensions string
)
for _, v := range vs {
if (v.Status == archive.VideoStatusAccess || v.Status == archive.VideoStatusOpen) && v.State == archive.VideoRelationBind {
if _, err = s.resultDao.TxAddVideo(c, tx, v); err != nil {
tx.Rollback()
log.Error("s.result.TxAddVideo error(%v)", err)
break
}
duration += int(v.Duration)
upCids = append(upCids, v.Cid)
if v.Index == 1 && v.SrcType == "vupload" {
firstCid = v.Cid
dimensions = v.Dimensions
}
} else {
if _, err = s.resultDao.TxDelVideoByCid(c, tx, aid, v.Cid); err != nil {
tx.Rollback()
log.Error("s.result.TxDelVideoByCid error(%v)")
break
}
delCids = append(delCids, v.Cid)
}
}
a.Duration = duration
if rows, err = s.resultDao.TxAddArchive(c, tx, a, ad, videosCnt, firstCid, dimensions); err != nil {
tx.Rollback()
log.Error("s.result.TxAddArchive error(%v)", err)
return
}
if rows == 0 {
if _, err = s.resultDao.TxUpArchive(c, tx, a, ad, videosCnt, firstCid, dimensions); err != nil {
tx.Rollback()
log.Error("s.result.TxUpArchive error(%v)")
return
}
}
// 更新联合投稿人
if a.AttrVal(archive.AttrBitIsCooperation) == archive.AttrYes {
if staff, err = s.archiveDao.Staff(c, aid); err != nil {
tx.Rollback()
log.Error("s.archiveDao.Staff aid(%d) error(%v)", aid, err)
return
}
if err = s.resultDao.TxDelStaff(c, tx, aid); err != nil {
tx.Rollback()
log.Error("s.result.TxDelStaff aid(%d) error(%v)", aid, err)
return
}
if staff != nil {
if err = s.resultDao.TxAddStaff(c, tx, aid, staff); err != nil {
tx.Rollback()
log.Error("s.result.TxAddStaff aid(%d) error(%v)", aid, err)
return
}
}
} else { //从联合投稿改为非联合投稿的 删除staff数据
if aResult != nil && aResult.AttrVal(archive.AttrBitIsCooperation) == archive.AttrYes {
if err = s.resultDao.TxDelStaff(c, tx, aid); err != nil {
tx.Rollback()
log.Error("s.result.TxDelStaff aid(%d) error(%v)", aid, err)
return
}
}
}
if err = tx.Commit(); err != nil {
log.Error("tx.Commit error(%v)")
return
}
log.Info("aid(%d) upCids(%d) delCids(%d) db updated", aid, len(upCids), len(delCids))
changed = true
return
}

View File

@@ -0,0 +1,203 @@
package service
import (
"context"
"encoding/json"
"time"
jobmdl "go-common/app/job/main/archive/model/databus"
"go-common/app/job/main/archive/model/result"
accgrpc "go-common/app/service/main/account/api"
"go-common/app/service/main/archive/api"
arcmdl "go-common/app/service/main/archive/model/archive"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/queue/databus"
)
const (
_actForUname = "updateUname"
_actForFace = "updateFace"
_actForAdmin = "updateByAdmin"
)
func (s *Service) cachesubproc() {
defer s.waiter.Done()
var msgs = s.cacheSub.Messages()
for {
var (
msg *databus.Message
ok bool
err error
)
if msg, ok = <-msgs; !ok {
log.Error("s.cachesub.messages closed")
return
}
if s.closeSub {
return
}
m := &jobmdl.Rebuild{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", msg.Value, err)
continue
}
log.Info("cacheSub key(%s) value(%s) start", msg.Key, msg.Value)
var retryError error
for {
var (
a *result.Archive
infoReply *accgrpc.InfoReply
c = context.TODO()
)
if retryError != nil {
time.Sleep(10 * time.Millisecond)
}
a, retryError = s.resultDao.Archive(c, m.Aid)
if retryError != nil {
log.Error("s.resultDao.Archive(%d) error(%v)", m.Aid, retryError)
continue
}
if a == nil || a.Mid == 0 {
log.Info("cache break archive(%d) not exist or mid==0", m.Aid)
break
}
infoReply, retryError = s.accGRPC.Info3(c, &accgrpc.MidReq{Mid: a.Mid})
if retryError != nil {
if ecode.Cause(retryError).Equal(ecode.MemberNotExist) {
log.Info("archive(%d) mid(%d) not exist", m.Aid, a.Mid)
break
}
log.Error("s.acc.RPC.Info3(%d) error(%v)", m.Aid, retryError)
continue
}
if infoReply == nil {
log.Error("infoReply mid(%d) err is nil,but info is nil too", a.Mid)
break
}
if infoReply.Info.Name == "" || infoReply.Info.Face == "" {
log.Error("empty info mid(%d) info(%+v)", infoReply.Info.Mid, infoReply.Info)
break
}
for k, arcRPC := range s.arcServices {
if retryError = arcRPC.ArcCache2(c, &arcmdl.ArgCache2{Aid: m.Aid, Tp: arcmdl.CacheUpdate}); retryError != nil {
log.Error("s.arcRPC(%d).ArcCache2(%d) error(%v)", k, m.Aid, retryError)
continue
}
}
log.Info("archive(%d) mid(%d) uname(%s) update success", m.Aid, infoReply.Info.Mid, infoReply.Info.Name)
break
}
msg.Commit()
}
}
func (s *Service) accountNotifyproc() {
defer s.waiter.Done()
var msgs = s.accountNotifySub.Messages()
for {
var (
msg *databus.Message
ok bool
err error
c = context.TODO()
)
if msg, ok = <-msgs; !ok {
log.Error("s.cachesub.messages closed")
return
}
if s.closeSub {
return
}
msg.Commit()
m := &jobmdl.AccountNotify{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
log.Info("accountNotify got key(%s) value(%s)", msg.Key, msg.Value)
if m.Action != _actForAdmin && m.Action != _actForFace && m.Action != _actForUname {
log.Warn("accountNotify skip action(%s) values(%s)", m.Action, msg.Value)
continue
}
var count int
if count, err = s.arcServices[0].UpCount2(c, &arcmdl.ArgUpCount2{Mid: m.Mid}); err != nil {
log.Error("s.arcRPC.UpCount2(%d) error(%v)", m.Mid, err)
continue
}
if count == 0 {
log.Info("accountNotify mid(%d) passed(%d)", m.Mid, count)
continue
}
if m.Action == _actForAdmin {
// check uname or face is updated
var am []*api.Arc
if am, err = s.arcServices[0].UpArcs3(c, &arcmdl.ArgUpArcs2{Mid: m.Mid, Ps: 2, Pn: 1}); err != nil {
if ecode.Cause(err).Equal(ecode.NothingFound) {
err = nil
log.Info("accountNotify mid(%d) no passed archive", m.Mid)
continue
}
log.Error("accountNotify mid(%d) error(%v)", m.Mid, err)
continue
}
if len(am) == 0 {
log.Info("accountNotify mid(%d) no passed archive", m.Mid)
continue
}
var reply *accgrpc.InfoReply
if reply, err = s.accGRPC.Info3(c, &accgrpc.MidReq{Mid: m.Mid}); err != nil || reply == nil {
log.Error("accountNotify accRPC.info3(%d) error(%v)", m.Mid, err)
continue
}
if reply.Info.Name == am[0].Author.Name && reply.Info.Face == am[0].Author.Face {
log.Info("accountNotify face(%s) name(%s) not change", reply.Info.Face, reply.Info.Name)
continue
}
}
s.notifyMu.Lock()
s.notifyMid[m.Mid] = struct{}{}
s.notifyMu.Unlock()
}
}
func (s *Service) clearMidCache() {
defer s.waiter.Done()
for {
time.Sleep(5 * time.Second)
s.notifyMu.Lock()
mids := s.notifyMid
s.notifyMid = make(map[int64]struct{})
s.notifyMu.Unlock()
for mid := range mids {
s.updateUpperCache(context.TODO(), mid)
}
if s.closeSub && len(s.notifyMid) == 0 {
return
}
}
}
func (s *Service) updateUpperCache(c context.Context, mid int64) (err error) {
// update archive cache
var aids []int64
if aids, err = s.resultDao.UpPassed(c, mid); err != nil {
log.Error("s.resultDao.UpPassed(%d) error(%v)", mid, err)
return
}
failedCnt := 0
for _, aid := range aids {
for k, rpc := range s.arcServices {
if err = rpc.ArcCache2(c, &arcmdl.ArgCache2{Aid: aid}); err != nil {
log.Error("s.arcRPC(%d).ArcCache2(%d) mid(%d) error(%v)", k, aid, mid, err)
failedCnt++
}
}
}
if failedCnt > 0 {
log.Error("accountNotify updateUpperCache mid(%d) failed(%d)", mid, failedCnt)
return
}
log.Info("accountNofity updateUpperCache mid(%d) successed(%d)", mid, len(aids))
return
}

View File

@@ -0,0 +1,155 @@
package service
import (
"context"
"encoding/json"
"strconv"
"strings"
"time"
dmmdl "go-common/app/interface/main/dm2/model"
"go-common/app/job/main/archive/model/dm"
"go-common/app/service/main/archive/api"
"go-common/app/service/main/archive/model/archive"
"go-common/library/log"
"go-common/library/queue/databus"
)
const (
_type = "archive"
_table = "dm_subject_"
_update = "update"
_subjectTypeForAv = 1
)
func (s *Service) dmConsumer() {
defer s.waiter.Done()
for {
var (
msg *databus.Message
ok bool
err error
canal = &dm.Canal{}
)
if msg, ok = <-s.dmSub.Messages(); !ok || s.closeSub {
log.Error("s.dmSub Closed")
return
}
msg.Commit()
if err = json.Unmarshal(msg.Value, canal); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, canal)
continue
}
// not dm_subject_
if !strings.HasPrefix(canal.Table, _table) {
log.Warn("table(%s) message(%s) skiped", canal.Table, msg.Value)
continue
}
// not update
if canal.Action != _update {
log.Warn("action(%s) message(%s) skiped", canal.Action, msg.Value)
continue
}
var subject *dm.Subject
if err = json.Unmarshal(canal.New, &subject); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", canal.New, err)
continue
}
// type must be av
if subject.Type != _subjectTypeForAv {
log.Warn("subject type(%s) is not av message(%+v)", subject.Type, subject)
continue
}
s.dmMu.Lock()
s.dmCids[subject.CID] = struct{}{}
s.dmMu.Unlock()
}
}
func (s *Service) dmCounter() {
defer s.waiter.Done()
for {
time.Sleep(5 * time.Second)
s.dmMu.Lock()
cm := s.dmCids
s.dmCids = make(map[int64]struct{})
s.dmMu.Unlock()
var (
aids []int64
err error
c = context.TODO()
am = make(map[int64][]int64)
allCids []int64
)
for cid := range cm {
if aids, err = s.archiveDao.Aids(c, cid); err != nil {
log.Error("s.archiveDao.Aids(%d) error(%v)", err)
continue
}
for _, aid := range aids {
if _, ok := am[aid]; ok {
continue
}
var pages []*api.Page
if pages, err = s.arcServices[0].Page3(c, &archive.ArgAid2{Aid: aid}); err != nil {
log.Error("s.arcServices[0].Page3(%d) error(%v)", aid, err)
continue
}
for _, p := range pages {
am[aid] = append(am[aid], p.Cid)
allCids = append(allCids, p.Cid)
}
}
}
var (
times int
argCount = 100
cids []int64
cDmCount = make(map[int64]int64)
)
if len(allCids)%argCount == 0 {
times = len(allCids) / argCount
} else {
times = len(allCids)/argCount + 1
}
for i := 0; i < times; i++ {
if i == times-1 {
cids = allCids[i*argCount:]
} else {
cids = allCids[i*argCount : (i+1)*argCount]
}
var sm map[int64]*dmmdl.SubjectInfo
if sm, err = s.dm2RPC.SubjectInfos(c, &dmmdl.ArgOids{Type: 1, Oids: cids}); err != nil {
log.Error("s.dm2RPC.SubjectInfos(%v) error(%v)", cids, err)
continue
}
for cid, s := range sm {
cDmCount[cid] = int64(s.Count)
}
}
L:
for aid, cids := range am {
var sum int64
for _, cid := range cids {
var (
cnt int64
ok bool
)
if cnt, ok = cDmCount[cid]; !ok {
log.Error("dm cid(%d) no count", cid)
break L
}
sum += cnt
}
dMsg := &dm.Count{ID: aid, Count: sum, Type: _type, Timestamp: time.Now().Unix()}
if err = s.dmPub.Send(c, strconv.FormatInt(aid, 10), dMsg); err != nil {
log.Error("s.dmPub.Send error(%v)", err)
continue
}
log.Info("s.dmPub.Send(%+v) success", dMsg)
}
if s.closeSub {
return
}
}
}

View File

@@ -0,0 +1,80 @@
package service
import (
"context"
"encoding/json"
"time"
"go-common/app/job/main/archive/model/result"
"go-common/app/job/main/archive/model/retry"
"go-common/library/cache/redis"
"go-common/library/log"
)
func (s *Service) retryproc() {
defer s.waiter.Done()
for {
if s.closeRetry {
return
}
var (
c = context.TODO()
bs []byte
err error
)
bs, err = s.PopFail(c)
if err != nil || bs == nil {
time.Sleep(5 * time.Second)
continue
}
msg := &retry.Info{}
if err = json.Unmarshal(bs, msg); err != nil {
log.Error("json.Unretry dedeSyncmarshal(%s) error(%v)", bs, err)
continue
}
log.Info("retry %s %s", retry.FailUpCache, bs)
switch msg.Action {
case retry.FailUpCache:
s.updateResultCache(&result.Archive{AID: msg.Data.Aid, State: msg.Data.State}, nil)
case retry.FailDatabus:
var upInfo = &result.ArchiveUpInfo{Table: msg.Data.DatabusMsg.Table, Action: msg.Data.DatabusMsg.Table, Nw: msg.Data.DatabusMsg.Nw, Old: msg.Data.DatabusMsg.Old}
s.sendNotify(upInfo)
case retry.FailUpVideoCache:
s.upVideoCache(msg.Data.Aid, msg.Data.Cids)
case retry.FailDelVideoCache:
s.delVideoCache(msg.Data.Aid, msg.Data.Cids)
case retry.FailResultAdd:
s.arcUpdate(msg.Data.Aid)
default:
continue
}
}
}
// PushFail rpush fail item to redis
func (s *Service) PushFail(c context.Context, a interface{}) (err error) {
var (
conn = s.redis.Get(c)
bs []byte
)
defer conn.Close()
if bs, err = json.Marshal(a); err != nil {
log.Error("json.Marshal(%v) error(%v)", a, err)
return
}
if _, err = conn.Do("RPUSH", retry.FailList, bs); err != nil {
log.Error("conn.Do(RPUSH, %s, %s) error(%v)")
}
return
}
// PopFail lpop fail item from redis
func (s *Service) PopFail(c context.Context) (bs []byte, err error) {
var conn = s.redis.Get(c)
defer conn.Close()
if bs, err = redis.Bytes(conn.Do("LPOP", retry.FailList)); err != nil && err != redis.ErrNil {
log.Error("redis.Bytes(conn.Do(LPOP, %s)) error(%v)", retry.FailList, err)
return
}
return
}

View File

@@ -0,0 +1,198 @@
package service
import (
"context"
"encoding/json"
"fmt"
"strconv"
"sync"
"time"
dm2rpc "go-common/app/interface/main/dm2/rpc/client"
"go-common/app/job/main/archive/conf"
"go-common/app/job/main/archive/dao/archive"
"go-common/app/job/main/archive/dao/email"
"go-common/app/job/main/archive/dao/monitor"
"go-common/app/job/main/archive/dao/reply"
"go-common/app/job/main/archive/dao/result"
dbusmdl "go-common/app/job/main/archive/model/databus"
resmdl "go-common/app/job/main/archive/model/result"
"go-common/app/job/main/archive/model/retry"
accgrpc "go-common/app/service/main/account/api"
arcrpc "go-common/app/service/main/archive/api/gorpc"
xredis "go-common/library/cache/redis"
"go-common/library/log"
"go-common/library/queue/databus"
)
// Service service
type Service struct {
c *conf.Config
closeRetry bool
closeSub bool
archiveDao *archive.Dao
emailDao *email.Dao
monitorDao *monitor.Dao
replyDao *reply.Dao
resultDao *result.Dao
redis *xredis.Pool
waiter sync.WaitGroup
videoupSub *databus.Databus
archiveResultPub *databus.Databus
dmPub *databus.Databus
dmSub *databus.Databus
cacheSub *databus.Databus
accountNotifySub *databus.Databus
sfTpsCache map[int16]int16
adtTpsCache map[int16]struct{}
arcServices []*arcrpc.Service2
accGRPC accgrpc.AccountClient
dm2RPC *dm2rpc.Service
// databus channel
videoupAids []chan int64
pgcAids chan int64
// dm count
dmCids map[int64]struct{}
dmMu sync.Mutex
notifyMid map[int64]struct{}
notifyMu sync.Mutex
}
// New is archive service implementation.
func New(c *conf.Config) (s *Service) {
s = &Service{
c: c,
archiveDao: archive.New(c),
emailDao: email.New(c),
monitorDao: monitor.New(c),
replyDao: reply.New(c),
resultDao: result.New(c),
dm2RPC: dm2rpc.New(c.Dm2RPC),
videoupSub: databus.New(c.VideoupSub),
dmSub: databus.New(c.DmSub),
dmPub: databus.New(c.DmPub),
archiveResultPub: databus.New(c.ArchiveResultPub),
cacheSub: databus.New(c.CacheSub),
accountNotifySub: databus.New(c.AccountNotifySub),
redis: xredis.NewPool(c.Redis),
pgcAids: make(chan int64, 1024),
dmCids: make(map[int64]struct{}),
notifyMid: make(map[int64]struct{}),
arcServices: make([]*arcrpc.Service2, 0),
}
var err error
if s.accGRPC, err = accgrpc.NewClient(nil); err != nil {
panic(fmt.Sprintf("account.service grpc not found!!!!!!!!!!!! error(%v)", err))
}
for _, sc := range s.c.ArchiveServices {
s.arcServices = append(s.arcServices, arcrpc.New2(sc))
}
for i := 0; i < s.c.ChanSize; i++ {
s.videoupAids = append(s.videoupAids, make(chan int64, 1024))
s.waiter.Add(1)
go s.consumerVideoup(i)
s.waiter.Add(1)
go s.pgcConsumer()
}
s.loadType()
go s.cacheproc()
// sync archive_result db!!!!!!!
s.waiter.Add(1)
go s.videoupConsumer()
s.waiter.Add(1)
go s.dmConsumer()
// check consumer
go s.checkConsume()
s.waiter.Add(1)
go s.retryproc()
s.waiter.Add(1)
go s.dmCounter()
s.waiter.Add(1)
go s.cachesubproc()
s.waiter.Add(1)
go s.accountNotifyproc()
s.waiter.Add(1)
go s.clearMidCache()
return s
}
func (s *Service) sendNotify(upInfo *resmdl.ArchiveUpInfo) {
var (
nw []byte
old []byte
err error
msg *dbusmdl.Message
c = context.TODO()
rt = &retry.Info{}
)
if nw, err = json.Marshal(upInfo.Nw); err != nil {
log.Error("json.Marshal(%+v) error(%v)", upInfo.Nw, err)
return
}
if old, err = json.Marshal(upInfo.Old); err != nil {
log.Error("json.Marshal(%+v) error(%v)", upInfo.Old, err)
return
}
msg = &dbusmdl.Message{Action: upInfo.Action, Table: upInfo.Table, New: nw, Old: old}
if err = s.archiveResultPub.Send(c, strconv.FormatInt(upInfo.Nw.AID, 10), msg); err != nil {
log.Error("s.archiveResultPub.Send(%+v) error(%v)", msg, err)
rt.Action = retry.FailDatabus
rt.Data.Aid = upInfo.Nw.AID
rt.Data.DatabusMsg = upInfo
s.PushFail(c, rt)
return
}
msgStr, _ := json.Marshal(msg)
log.Info("sendNotify(%s) successed", msgStr)
}
func (s *Service) loadType() {
tpm, err := s.archiveDao.TypeMapping(context.TODO())
if err != nil {
log.Error("s.dede.TypeMapping error(%v)", err)
return
}
s.sfTpsCache = tpm
// audit types
adt, err := s.archiveDao.AuditTypesConf(context.TODO())
if err != nil {
log.Error("s.dede.AuditTypesConf error(%v)", err)
return
}
s.adtTpsCache = adt
}
func (s *Service) cacheproc() {
for {
time.Sleep(1 * time.Minute)
s.loadType()
}
}
// check consumer stat
func (s *Service) checkConsume() {
if s.c.Env != "pro" {
return
}
for {
time.Sleep(1 * time.Minute)
for i := 0; i < s.c.ChanSize; i++ {
if l := len(s.videoupAids[i]); l > s.c.MonitorSize {
s.monitorDao.Send(context.TODO(), s.c.WeChantUsers, fmt.Sprintf("archive-job报警了啊\n UGC的chan太大了\n s.videoupAids[%d] size(%d) is too large\n 是不是有人在刷数据!!!!", i, l), s.c.WeChatToken, s.c.WeChatSecret)
}
}
if l := len(s.pgcAids); l > s.c.MonitorSize {
s.monitorDao.Send(context.TODO(), s.c.WeChantUsers, fmt.Sprintf("archive-job报警了啊\n PGC的chan太大了\n chan size(%d) is too large \n 是不是有人在刷数据!!!!", l), s.c.WeChatToken, s.c.WeChatSecret)
}
}
}
// Close kafaka consumer close.
func (s *Service) Close() (err error) {
s.closeSub = true
time.Sleep(2 * time.Second)
s.videoupSub.Close()
s.closeRetry = true
s.waiter.Wait()
return
}

View File

@@ -0,0 +1,42 @@
package service
import (
"context"
"flag"
"path/filepath"
"testing"
"go-common/app/job/main/archive/conf"
. "github.com/smartystreets/goconvey/convey"
)
var (
s *Service
)
func init() {
dir, _ := filepath.Abs("../cmd/archive-job-test.toml")
flag.Set("conf", dir)
conf.Init()
s = New(conf.Conf)
}
func Test_loadType(t *testing.T) {
Convey("loadType", t, func() {
s.loadType()
})
}
func Test_PopFail(t *testing.T) {
Convey("PopFail", t, func() {
s.PopFail(context.TODO())
})
}
func Test_TranResult(t *testing.T) {
Convey("tranResult", t, func() {
_, _, _, err := s.tranResult(context.TODO(), 10098500)
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,91 @@
package service
import (
"context"
"encoding/json"
jobmdl "go-common/app/job/main/archive/model/databus"
"go-common/app/job/main/archive/model/retry"
"go-common/app/service/main/archive/model/archive"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/queue/databus"
)
func (s *Service) videoupConsumer() {
defer s.waiter.Done()
var msgs = s.videoupSub.Messages()
for {
var (
msg *databus.Message
ok bool
err error
)
if msg, ok = <-msgs; !ok {
log.Error("s.videoupSub.messages closed")
return
}
if s.closeSub {
return
}
msg.Commit()
m := &jobmdl.Videoup{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", msg.Value, err)
continue
}
log.Info("videoupMessage key(%s) value(%s) start", msg.Key, msg.Value)
if m.Aid <= 0 {
log.Warn("aid(%d) <= 0 WTF(%s)", m.Aid, msg.Value)
continue
}
switch m.Route {
case jobmdl.RouteAutoOpen, jobmdl.RouteDelayOpen, jobmdl.RouteDeleteArchive, jobmdl.RouteSecondRound, jobmdl.RouteFirstRoundForbid, jobmdl.RouteForceSync:
select {
case s.videoupAids[m.Aid%int64(s.c.ChanSize)] <- m.Aid:
default:
rt := &retry.Info{Action: retry.FailResultAdd}
rt.Data.Aid = m.Aid
s.PushFail(context.TODO(), rt)
log.Warn("s.videoupAids is full!!! async databus archive(%d)", m.Aid)
}
}
log.Info("videoupMessage key(%s) value(%s) finish", msg.Key, msg.Value)
}
}
func (s *Service) delVideoCache(aid int64, cids []int64) (err error) {
for _, cid := range cids {
for k, rpc := range s.arcServices {
if err = rpc.DelVideo2(context.TODO(), &archive.ArgVideo2{Aid: aid, Cid: cid}); err != nil {
log.Error("s.arcRpc(%d).DelVideo2(%d, %d) error(%v)", k, aid, cid, err)
if ecode.Cause(err) != ecode.NothingFound {
rt := &retry.Info{Action: retry.FailDelVideoCache}
rt.Data.Aid = aid
rt.Data.Cids = []int64{cid}
s.PushFail(context.TODO(), rt)
log.Error("delVideoCache error(%v)", err)
}
}
}
}
return
}
func (s *Service) upVideoCache(aid int64, cids []int64) (err error) {
for _, cid := range cids {
for k, rpc := range s.arcServices {
if err = rpc.UpVideo2(context.TODO(), &archive.ArgVideo2{Aid: aid, Cid: cid}); err != nil {
log.Error("s.arcRpc(%d).UpVideo2(%d, %d) error(%v)", k, aid, cid, err)
if ecode.Cause(err) != ecode.NothingFound {
rt := &retry.Info{Action: retry.FailUpVideoCache}
rt.Data.Aid = aid
rt.Data.Cids = []int64{cid}
s.PushFail(context.TODO(), rt)
log.Error("upVideoCache error(%v)", err)
}
}
}
}
return
}