Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,90 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"action_test.go",
"archive_test.go",
"dm_seg_test.go",
"dm_sepcial_test.go",
"dm_test.go",
"mask_test.go",
"service_test.go",
"subject_test.go",
"subtitle_test.go",
"task_test.go",
"track_test.go",
"transfer_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/dm2/conf:go_default_library",
"//app/job/main/dm2/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"action.go",
"archive.go",
"bnj.go",
"dm.go",
"dm_seg.go",
"dm_special.go",
"mask.go",
"service.go",
"subject.go",
"subtitle.go",
"task.go",
"track.go",
"transfer.go",
],
importpath = "go-common/app/job/main/dm2/service",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/dm2/conf:go_default_library",
"//app/job/main/dm2/dao:go_default_library",
"//app/job/main/dm2/model:go_default_library",
"//app/job/main/dm2/model/oplog:go_default_library",
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/api/gorpc:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//app/service/main/filter/api/grpc/v1:go_default_library",
"//app/service/main/seq-server/model:go_default_library",
"//app/service/main/seq-server/rpc/client:go_default_library",
"//library/database/sql:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/net/metadata:go_default_library",
"//library/queue/databus:go_default_library",
"//library/sync/pipeline/fanout:go_default_library",
"//library/time:go_default_library",
"//library/xstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,210 @@
package service
import (
"context"
"encoding/json"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
)
func (s *Service) actionAct(c context.Context, act *model.Action) (err error) {
switch act.Action {
case model.ActFlushDM:
fc := new(model.Flush)
if err = json.Unmarshal(act.Data, &fc); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", act.Data, err)
return
}
s.asyncAddFlushDM(c, fc)
case model.ActFlushDMSeg:
fc := new(model.FlushDMSeg)
if err = json.Unmarshal(act.Data, &fc); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", act.Data, err)
return
}
if fc.Page == nil {
log.Error("s.ActFlushDMSeg(+%v) error page nil", fc)
return
}
// async flush cache
s.asyncAddFlushDMSeg(c, fc)
case model.ActAddDM:
var (
dm = &model.DM{}
sub *model.Subject
)
if err = json.Unmarshal(act.Data, &dm); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", act.Data, err)
return
}
if sub, err = s.subject(c, dm.Type, dm.Oid); err != nil {
return
}
if err = s.actionAddDM(c, sub, dm); err != nil {
log.Error("s.actionAddDM(+%v) error(%v)", dm, err)
return
}
if dm.State == model.StateNormal || dm.State == model.StateMonitorAfter {
// 1. 创作中心最新1000条弹幕
s.asyncAddRecent(c, dm)
// 2. 刷新全段弹幕,NOTE 忽略redis缓存报错
if ok, _ := s.dao.ExpireDMCache(c, dm.Type, dm.Oid); ok {
s.dao.AddDMCache(c, dm)
}
s.asyncAddFlushDM(c, &model.Flush{
Type: dm.Type,
Oid: dm.Oid,
Force: false,
})
// 3. 刷新分段弹幕缓存,NOTE 忽略redis缓存报错
var p *model.Page
if p, err = s.pageinfo(c, sub.Pid, dm); err != nil {
return
}
switch dm.Pool {
case model.PoolNormal:
if ok, _ := s.dao.ExpireDMID(c, dm.Type, dm.Oid, p.Total, p.Num); ok {
s.dao.AddDMIDCache(c, dm.Type, dm.Oid, p.Total, p.Num, dm.ID)
}
case model.PoolSubtitle:
if ok, _ := s.dao.ExpireDMIDSubtitle(c, dm.Type, dm.Oid); ok {
s.dao.AddDMIDSubtitleCache(c, dm.Type, dm.Oid, dm)
}
case model.PoolSpecial:
if err = s.specialLocationUpdate(c, dm.Type, dm.Oid); err != nil {
return
}
// TODO add cache
default:
return
}
s.dao.AddIdxContentCaches(c, dm.Type, dm.Oid, dm)
s.asyncAddFlushDMSeg(c, &model.FlushDMSeg{
Type: dm.Type,
Oid: dm.Oid,
Force: false,
Page: p,
})
}
s.bnjDmCount(c, sub, dm)
}
return
}
func (s *Service) actionFlushDM(c context.Context, tp int32, oid int64, force bool) (err error) {
sub, err := s.subject(c, tp, oid)
if err != nil {
return
}
if force {
s.dao.DelDMCache(c, tp, oid) // delete redis cache,ignore error
}
xml, err := s.genXML(c, sub) // generate xml from redis or database
if err != nil {
log.Error("s.genXML(%d) error(%v)", oid, err)
return
}
data, err := s.gzflate(xml, 4)
if err != nil {
log.Error("s.gzflate(type:%d,oid:%d) error(%v)", tp, oid, err)
return
}
if err = s.dao.AddXMLCache(c, sub.Oid, data); err != nil {
return
}
log.Info("actionFlushDM type:%d,oid:%d fore:%v", tp, oid, force)
return
}
// actionAddDM add dm index and content to db by transaction.
func (s *Service) actionAddDM(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
tx, err := s.dao.BeginTran(c)
if err != nil {
return
}
// special dm
if dm.Pool == model.PoolSpecial && dm.ContentSpe != nil {
if _, err = s.dao.TxAddContentSpecial(tx, dm.ContentSpe); err != nil {
return tx.Rollback()
}
}
if _, err = s.dao.TxAddContent(tx, dm.Oid, dm.Content); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxAddIndex(tx, dm); err != nil {
return tx.Rollback()
}
if dm.State == model.StateMonitorBefore || dm.State == model.StateMonitorAfter {
if _, err = s.dao.TxIncrSubMCount(tx, dm.Type, dm.Oid); err != nil {
return tx.Rollback()
}
}
var count int64
if dm.State == model.StateNormal || dm.State == model.StateMonitorAfter || dm.State == model.StateHide {
count = 1
if sub.Childpool == model.PoolNormal && dm.Pool != model.PoolNormal {
sub.Childpool = 1
}
}
if _, err = s.dao.TxIncrSubjectCount(tx, sub.Type, sub.Oid, 1, count, sub.Childpool); err != nil {
return tx.Rollback()
}
return tx.Commit()
}
// actionFlushXMLDmSeg flush xml dm seg
func (s *Service) actionFlushXMLDmSeg(c context.Context, tp int32, oid int64, p *model.Page, force bool) (err error) {
var (
sub *model.Subject
duration int64
seg *model.Segment
)
if sub, err = s.subject(c, tp, oid); err != nil {
return
}
if force {
if err = s.dao.DelDMIDCache(c, tp, oid, p.Total, p.Num); err != nil {
return
}
if sub.Childpool > 0 {
s.dao.DelDMIDSubtitleCache(c, tp, oid)
}
}
if duration, err = s.videoDuration(c, sub.Pid, sub.Oid); err != nil {
return
}
ps, _ := model.SegmentPoint(p.Num, duration)
if seg, err = s.segmentInfo(c, sub.Pid, sub.Oid, ps, duration); err != nil {
return
}
res, err := s.dmSegXML(c, sub, seg)
if err != nil {
return
}
if err = s.dao.SetXMLSegCache(c, tp, oid, seg.Cnt, seg.Num, res); err != nil {
return
}
log.Info("actionFlushXMLDmSeg type:%d,oid:%d,seg:%+v", tp, oid, seg)
return
}
func (s *Service) flushDmSegCache(c context.Context, fc *model.FlushDMSeg) (err error) {
if fc.Page == nil {
return
}
if err = s.actionFlushXMLDmSeg(c, fc.Type, fc.Oid, fc.Page, fc.Force); err != nil {
return
}
return
}
func (s *Service) flushDmCache(c context.Context, fc *model.Flush) (err error) {
if err = s.actionFlushDM(c, fc.Type, fc.Oid, fc.Force); err != nil {
return
}
if err = s.dao.DelAjaxDMCache(c, fc.Oid); err != nil {
return
}
return
}

View File

@@ -0,0 +1,45 @@
package service
import (
"context"
"encoding/json"
"testing"
"time"
"go-common/app/job/main/dm2/model"
. "github.com/smartystreets/goconvey/convey"
)
func TestActionAddDM(t *testing.T) {
id := int64(time.Now().UnixNano())
dm := &model.DM{
ID: id,
Type: 1,
Oid: 1221,
Mid: 4780461,
Progress: 111,
State: 0,
Pool: 0,
Ctime: 1533804859,
Content: &model.Content{
ID: id,
Mode: 4,
IP: 123,
FontSize: 25,
Color: 12345,
Msg: "testtddddddddddddd",
Ctime: 1533804859,
},
}
Convey("", t, func() {
data, err := json.Marshal(dm)
So(err, ShouldBeNil)
act := &model.Action{
Action: model.ActAddDM,
Data: data,
}
err = svr.actionAct(context.TODO(), act)
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,42 @@
package service
import (
"context"
"go-common/app/job/main/dm2/model"
"go-common/app/service/main/archive/model/archive"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/net/metadata"
)
// videoDuration return video duration cid.
func (s *Service) videoDuration(c context.Context, aid, cid int64) (duration int64, err error) {
var cache = true
if duration, err = s.dao.DurationCache(c, cid); err != nil {
log.Error("dao.Duration(cid:%d) error(%v)", cid, err)
err = nil
cache = false
} else if duration != model.NotFound {
return
}
arg := &archive.ArgVideo2{Aid: aid, Cid: cid, RealIP: metadata.String(c, metadata.RemoteIP)}
page, err := s.arcRPC.Video3(c, arg)
if err != nil {
if ecode.Cause(err).Code() == ecode.NothingFound.Code() {
duration = 0
err = nil
log.Warn("acvSvc.Video3(%v) error(duration not exist)", arg)
} else {
log.Error("acvSvc.Video3(%v) error(%v)", arg, err)
}
} else {
duration = page.Duration * 1000
}
if cache {
s.cache.Do(c, func(ctx context.Context) {
s.dao.SetDurationCache(ctx, cid, duration)
})
}
return
}

View File

@@ -0,0 +1,20 @@
package service
import (
"context"
"testing"
)
func TestVideoDuration(t *testing.T) {
var (
aid int64 = 10097265
oid int64 = 1508
c = context.TODO()
)
d, err := svr.videoDuration(c, aid, oid)
if err != nil {
t.Errorf("s.videoDuration(%d %d) error(%v)", aid, oid, err)
t.FailNow()
}
t.Logf("oid:%d duration:%d", oid, d)
}

View File

@@ -0,0 +1,419 @@
package service
import (
"context"
"encoding/json"
"math/rand"
"regexp"
"strings"
"time"
"go-common/app/job/main/dm2/model"
"go-common/app/service/main/archive/api"
arcMdl "go-common/app/service/main/archive/model/archive"
filterMdl "go-common/app/service/main/filter/api/grpc/v1"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/net/metadata"
"go-common/library/queue/databus"
xtime "go-common/library/time"
)
var (
msgRegex = regexp.MustCompile(`^(\s|\xE3\x80\x80)*$`) // 全文仅空格
_bnjDmMsgLen = 100
_dateFormat = "2006-01-02 15:04:05"
)
func init() {
rand.Seed(time.Now().Unix())
}
func (s *Service) initBnj() {
var err error
if s.conf.BNJ.Aid <= 0 {
return
}
s.bnjAid = s.conf.BNJ.Aid
//bnj count
if s.conf.BNJ.BnjCounter != nil {
bnjSubAids := make(map[int64]struct{})
for _, aid := range s.conf.BNJ.BnjCounter.SubAids {
bnjSubAids[aid] = struct{}{}
}
s.bnjSubAids = bnjSubAids
}
// bnj danmu
s.bnjVideos(context.TODO())
s.bnjLiveConfig(context.TODO())
go func() {
ticker := time.NewTicker(time.Second * 30)
for range ticker.C {
s.bnjVideos(context.TODO())
s.bnjLiveConfig(context.TODO())
}
}()
s.bnjIgnoreRate = s.conf.BNJ.BnjLiveDanmu.IgnoreRate
s.bnjIgnoreBeginTime = time.Duration(s.conf.BNJ.BnjLiveDanmu.IgnoreBegin)
s.bnjIgnoreEndTime = time.Duration(s.conf.BNJ.BnjLiveDanmu.IgnoreEnd)
s.bnjliveRoomID = s.conf.BNJ.BnjLiveDanmu.RoomID
s.bnjUserLevel = s.conf.BNJ.BnjLiveDanmu.Level
if s.bnjStart, err = time.ParseInLocation(_dateFormat, s.conf.BNJ.BnjLiveDanmu.Start, time.Now().Location()); err != nil {
panic(err)
}
s.bnjCsmr = databus.New(s.conf.Databus.BnjCsmr)
log.Info("bnj init start:%v room_id:%v", s.bnjStart.String(), s.conf.BNJ.BnjLiveDanmu.RoomID)
go s.bnjProc()
}
func (s *Service) bnjProc() {
var (
err error
c = context.Background()
)
for {
msg, ok := <-s.bnjCsmr.Messages()
if !ok {
log.Error("bnj bnjProc consumer exit")
return
}
log.Info("bnj partition:%d,offset:%d,key:%s,value:%s", msg.Partition, msg.Offset, msg.Key, msg.Value)
m := &model.LiveDanmu{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", string(msg.Value), err)
continue
}
if err = s.bnjLiveDanmu(c, m); err != nil {
log.Error("bnj bnjLiveDanmu(msg:%+v),error(%v)", m, err)
continue
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) bnjVideos(c context.Context) (err error) {
var (
videos []*model.Video
)
if videos, err = s.dao.Videos(c, s.bnjAid); err != nil {
log.Error("bnj bnjVideos(aid:%v) error(%v)", s.bnjAid, err)
return
}
if len(videos) >= 4 {
videos = videos[:4]
}
for _, video := range videos {
if err = s.syncBnjVideo(c, model.SubTypeVideo, video); err != nil {
log.Error("bnj syncBnjVideo(video:%+v) error(%v)", video, err)
return
}
}
s.bnjArcVideos = videos
return
}
func (s *Service) syncBnjVideo(c context.Context, tp int32, v *model.Video) (err error) {
sub, err := s.dao.Subject(c, tp, v.Cid)
if err != nil {
return
}
if sub == nil {
if v.XCodeState >= model.VideoXcodeHDFinish {
if _, err = s.dao.AddSubject(c, tp, v.Cid, v.Aid, v.Mid, s.maxlimit(v.Duration), 0); err != nil {
return
}
}
} else {
if sub.Mid != v.Mid {
if _, err = s.dao.UpdateSubMid(c, tp, v.Cid, v.Mid); err != nil {
return
}
}
}
return
}
// bnjDmCount laji bnj count
func (s *Service) bnjDmCount(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
var (
dmid int64
pages []*api.Page
chosen *api.Page
choseSub *model.Subject
)
if _, ok := s.bnjSubAids[sub.Pid]; !ok {
return
}
if pages, err = s.arcRPC.Page3(c, &arcMdl.ArgAid2{
Aid: s.bnjAid,
RealIP: metadata.String(c, metadata.RemoteIP),
}); err != nil {
log.Error("bnjDmCount Page3(aid:%v) error(%v)", sub.Pid, err)
return
}
if len(pages) <= 0 {
return
}
idx := time.Now().Unix() % int64(len(pages))
if chosen = pages[idx]; chosen == nil {
return
}
if choseSub, err = s.subject(c, model.SubTypeVideo, chosen.Cid); err != nil {
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnjDmCount genDMID() error(%v)", err)
return
}
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: chosen.Cid,
Mid: dm.Mid,
Progress: int32((chosen.Duration + 1) * 1000),
Pool: dm.Pool,
State: model.StateAdminDelete,
Ctime: dm.Ctime,
Mtime: dm.Mtime,
Content: &model.Content{
ID: dmid,
FontSize: dm.Content.FontSize,
Color: dm.Content.Color,
Mode: dm.Content.Mode,
IP: dm.Content.IP,
Plat: dm.Content.Plat,
Msg: dm.Content.Msg,
Ctime: dm.Content.Ctime,
Mtime: dm.Content.Mtime,
},
}
if dm.Pool == model.PoolSpecial {
forkDM.ContentSpe = &model.ContentSpecial{
ID: dmid,
Msg: dm.ContentSpe.Msg,
Ctime: dm.ContentSpe.Ctime,
Mtime: dm.ContentSpe.Mtime,
}
}
if err = s.bnjAddDM(c, choseSub, forkDM); err != nil {
return
}
return
}
// bnjAddDM add dm index and content to db by transaction.
func (s *Service) bnjAddDM(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
if dm.State != model.StateAdminDelete {
return
}
tx, err := s.dao.BeginTran(c)
if err != nil {
return
}
// special dm
if dm.Pool == model.PoolSpecial && dm.ContentSpe != nil {
if _, err = s.dao.TxAddContentSpecial(tx, dm.ContentSpe); err != nil {
return tx.Rollback()
}
}
if _, err = s.dao.TxAddContent(tx, dm.Oid, dm.Content); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxAddIndex(tx, dm); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxIncrSubjectCount(tx, sub.Type, sub.Oid, 1, 0, sub.Childpool); err != nil {
return tx.Rollback()
}
return tx.Commit()
}
func (s *Service) genDMID(c context.Context) (dmid int64, err error) {
if dmid, err = s.seqRPC.ID(c, s.seqArg); err != nil {
log.Error("seqRPC.ID() error(%v)", err)
return
}
return
}
// bnjLiveDanmu laji live to video
// TODO stime
func (s *Service) bnjLiveDanmu(c context.Context, liveDanmu *model.LiveDanmu) (err error) {
var (
cid, dmid int64
progress float64
)
// ignore time before
if time.Since(s.bnjStart) < 0 {
return
}
// limit
if liveDanmu == nil || s.bnjliveRoomID <= 0 || s.bnjliveRoomID != liveDanmu.RoomID || liveDanmu.MsgType != model.LiveDanmuMsgTypeNormal {
return
}
if liveDanmu.UserLevel < s.bnjUserLevel {
return
}
if s.bnjIgnoreRate <= 0 || rand.Int63n(s.bnjIgnoreRate) != 0 {
return
}
if cid, progress, err = s.pickBnjVideo(c, liveDanmu.Time); err != nil {
return
}
// ignore illegal progress
if progress <= 0 {
return
}
if err = s.checkBnjDmMsg(c, liveDanmu.Content); err != nil {
log.Error("bnj bnjLiveDanmu checkBnjDmMsg(liveDanmu:%+v) error(%v)", liveDanmu, err)
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnj bnjLiveDanmu genDMID() error(%v)", err)
return
}
now := time.Now().Unix()
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: cid,
Mid: liveDanmu.UID,
Progress: int32(progress * 1000),
Pool: model.PoolNormal,
State: model.StateMonitorAfter,
Ctime: model.ConvertStime(time.Now()),
Mtime: model.ConvertStime(time.Now()),
Content: &model.Content{
ID: dmid,
FontSize: 25,
Color: 16777215,
Mode: model.ModeRolling,
Plat: 0,
Msg: liveDanmu.Content,
Ctime: xtime.Time(now),
Mtime: xtime.Time(now),
},
}
if err = s.bnjCheckFilterService(c, forkDM); err != nil {
log.Error("s.bnjCheckFilterService(%+v) error(%v)", forkDM, err)
return
}
var (
bs []byte
)
if bs, err = json.Marshal(forkDM); err != nil {
log.Error("json.Marshal(%+v) error(%v)", forkDM, err)
return
}
act := &model.Action{
Action: model.ActAddDM,
Data: bs,
}
if err = s.actionAct(c, act); err != nil {
log.Error("s.actionAddDM(%+v) error(%v)", liveDanmu, err)
return
}
return
}
func (s *Service) pickBnjVideo(c context.Context, timestamp int64) (cid int64, progress float64, err error) {
var (
idx int
video *model.Video
)
progress = float64(timestamp - s.bnjStart.Unix())
for idx, video = range s.bnjArcVideos {
if progress > float64(video.Duration) {
progress = progress - float64(video.Duration)
continue
}
// ignore p1 start
if idx != 0 && progress < s.bnjIgnoreBeginTime.Seconds() {
err = ecode.DMProgressTooBig
return
}
if float64(video.Duration)-progress < s.bnjIgnoreEndTime.Seconds() {
err = ecode.DMProgressTooBig
return
}
if progress >= 0 {
progress = progress + float64(rand.Int31n(1000)/1000)
}
cid = video.Cid
return
}
err = ecode.DMProgressTooBig
return
}
func (s *Service) bnjCheckFilterService(c context.Context, dm *model.DM) (err error) {
var (
filterReply *filterMdl.FilterReply
)
if filterReply, err = s.filterRPC.Filter(c, &filterMdl.FilterReq{
Area: "danmu",
Message: dm.Content.Msg,
Id: dm.ID,
Oid: dm.Oid,
Mid: dm.Mid,
}); err != nil {
log.Error("checkFilterService(dm:%+v),err(%v)", dm, err)
return
}
if filterReply.Level > 0 || filterReply.Limit == model.SpamBlack || filterReply.Limit == model.SpamOverflow {
dm.State = model.StateFilter
log.Info("bnj filter service delete(dmid:%d,data:+%v)", dm.ID, filterReply)
}
return
}
func (s *Service) checkBnjDmMsg(c context.Context, msg string) (err error) {
var (
msgLen = len([]rune(msg))
)
if msgRegex.MatchString(msg) { // 空白弹幕
err = ecode.DMMsgIlleagel
return
}
if msgLen > _bnjDmMsgLen {
err = ecode.DMMsgTooLong
return
}
if strings.Contains(msg, `\n`) || strings.Contains(msg, `/n`) {
err = ecode.DMMsgIlleagel
return
}
return
}
func (s *Service) bnjLiveConfig(c context.Context) (err error) {
var (
bnjConfig *model.BnjLiveConfig
start time.Time
)
if bnjConfig, err = s.dao.BnjConfig(c); err != nil {
log.Error("bnjLiveConfig error current:%v err:%+v", time.Now().String(), err)
return
}
if bnjConfig == nil {
log.Error("bnjLiveConfig error current:%v bnjConfig nil", time.Now().String())
return
}
if start, err = time.ParseInLocation(_dateFormat, bnjConfig.DanmuDtarTime, time.Now().Location()); err != nil {
log.Error("bnjLiveConfig start time error current:%v config:%+v", time.Now().String(), bnjConfig)
return
}
if bnjConfig.CommentID <= 0 || bnjConfig.RoomID <= 0 {
log.Info("bnjLiveConfig illegal current:%v config:%+v", time.Now().String(), bnjConfig)
return
}
s.bnjAid = bnjConfig.CommentID
s.bnjliveRoomID = bnjConfig.RoomID
s.bnjStart = start
log.Info("bnjLiveConfig ok current:%v config:%+v", time.Now().String(), bnjConfig)
return
}

View File

@@ -0,0 +1,271 @@
package service
import (
"bytes"
"compress/flate"
"context"
"fmt"
"math"
"sort"
"go-common/app/job/main/dm2/model"
arcMdl "go-common/app/service/main/archive/model/archive"
"go-common/library/log"
)
// Gzflate flate 压缩
func (s *Service) gzflate(in []byte, level int) (out []byte, err error) {
if len(in) == 0 {
return
}
buf := new(bytes.Buffer)
w, err := flate.NewWriter(buf, level)
if err != nil {
return
}
if _, err = w.Write(in); err != nil {
return
}
if err = w.Close(); err != nil {
return
}
out = buf.Bytes()
return
}
func (s *Service) dmsCache(c context.Context, tp int32, oid, maxlimit int64) (dms []*model.DM, err error) {
ok, err := s.dao.ExpireDMCache(c, tp, oid)
if err != nil || !ok {
return
}
values, err := s.dao.DMCache(c, tp, oid)
if err != nil || len(values) == 0 {
return
}
var (
start, trimCnt int
normal, protect, special []*model.DM
)
for _, value := range values {
dm := &model.DM{}
if err = dm.Unmarshal(value); err != nil {
log.Error("proto.Unmarshal(%s) error(%v)", value, err)
return
}
if dm.Pool == model.PoolNormal {
if dm.AttrVal(model.AttrProtect) == model.AttrYes {
protect = append(protect, dm)
} else {
normal = append(normal, dm)
}
} else {
special = append(special, dm)
}
}
// 保护弹幕
if start = len(protect) - int(maxlimit); start > 0 { // 只保留maxlimit条保护弹幕
trimCnt += start
protect = protect[start:]
}
dms = append(dms, protect...)
// 普通弹幕
if start = len(normal) + len(protect) - int(maxlimit); start > 0 { // 保护弹幕+普通弹幕=maxlimit
trimCnt += start
normal = normal[start:]
}
dms = append(dms, normal...)
// 追加字幕弹幕和特殊弹幕
dms = append(dms, special...)
if trimCnt > 0 {
err = s.dao.TrimDMCache(c, tp, oid, int64(trimCnt))
}
return
}
// 返回所有每个弹幕池对应的弹幕列表
func (s *Service) dms(c context.Context, tp int32, oid, maxlimit int64, childpool int32) (dms []*model.DM, err error) {
var (
count int
keyprotect = "kp"
dmMap = make(map[string][]*model.DM)
contentSpeMap = make(map[int64]*model.ContentSpecial)
)
idxMap, dmids, spedmids, err := s.dao.Indexs(c, tp, oid)
if err != nil {
return
}
if len(dmids) == 0 {
return
}
ctsMap, err := s.dao.Contents(c, oid, dmids)
if err != nil {
return
}
if len(spedmids) > 0 {
if contentSpeMap, err = s.dao.ContentsSpecial(c, spedmids); err != nil {
return
}
}
for _, content := range ctsMap {
if dm, ok := idxMap[content.ID]; ok {
key := fmt.Sprint(dm.Pool)
dm.Content = content
if dm.Pool == model.PoolNormal {
if dm.AttrVal(model.AttrProtect) == model.AttrYes {
key = keyprotect
}
}
if dm.Pool == model.PoolSpecial {
contentSpe, ok := contentSpeMap[dm.ID]
if ok {
dm.ContentSpe = contentSpe
}
}
dmMap[key] = append(dmMap[key], dm)
}
}
// dm sort
for _, dmsTmp := range dmMap {
sort.Sort(model.DMSlice(dmsTmp))
}
// pool = 0 保护弹幕和普通弹幕总和为maxlimit
if protect, ok := dmMap[keyprotect]; ok {
if start := len(protect) - int(maxlimit); start > 0 { // 只保留maxlimit条保护弹幕
protect = protect[start:]
}
dms = append(dms, protect...)
count = len(protect)
}
if normal, ok := dmMap[fmt.Sprint(model.PoolNormal)]; ok {
start := len(normal) + count - int(maxlimit)
if start > 0 {
normal = normal[start:]
}
dms = append(dms, normal...)
}
// pool = 1 字幕弹幕
if subtitle, ok := dmMap[fmt.Sprint(model.PoolSubtitle)]; ok {
dms = append(dms, subtitle...)
}
// pool =2 特殊弹幕
if special, ok := dmMap[fmt.Sprint(model.PoolSpecial)]; ok {
dms = append(dms, special...)
}
return
}
func (s *Service) genXML(c context.Context, sub *model.Subject) (xml []byte, err error) {
realname := s.isRealname(c, sub.Pid, sub.Oid)
buf := new(bytes.Buffer)
buf.WriteString(`<?xml version="1.0" encoding="UTF-8"?><i>`)
buf.WriteString(`<chatserver>chat.bilibili.com</chatserver><chatid>`)
buf.WriteString(fmt.Sprint(sub.Oid))
buf.WriteString(`</chatid><mission>`)
buf.WriteString(fmt.Sprint(sub.AttrVal(model.AttrSubMission)))
buf.WriteString(`</mission><maxlimit>`)
buf.WriteString(fmt.Sprint(sub.Maxlimit))
buf.WriteString(`</maxlimit>`)
buf.WriteString(fmt.Sprintf(`<state>%d</state>`, sub.State))
if realname {
buf.WriteString(`<real_name>1</real_name>`)
} else {
buf.WriteString(`<real_name>0</real_name>`)
}
if sub.State == model.SubStateClosed {
buf.WriteString(`</i>`)
xml = buf.Bytes()
return
}
dms, err := s.dmsCache(c, sub.Type, sub.Oid, sub.Maxlimit)
if err != nil {
return
}
if len(dms) > 0 {
buf.WriteString(`<source>k-v</source>`)
} else {
buf.WriteString(`<source>e-r</source>`)
if dms, err = s.dms(c, sub.Type, sub.Oid, sub.Maxlimit, int32(sub.Childpool)); err != nil {
return
}
if err = s.dao.SetDMCache(c, sub.Type, sub.Oid, dms); err != nil { // add redis cache
return
}
}
for _, dm := range dms {
buf.WriteString(dm.ToXML(realname))
}
buf.WriteString("</i>")
xml = buf.Bytes()
return
}
func (s *Service) isRealname(c context.Context, aid, oid int64) (realname bool) {
if oid == 13196688 || oid == 290932 {
realname = true
return
}
arg := &arcMdl.ArgAid2{Aid: aid}
archive, err := s.arcRPC.Archive3(c, arg)
if err != nil {
log.Error("arcRPC.Archive3(%v) error(%v)", arg, err)
return
}
if v, ok := s.realname[int64(archive.TypeID)]; ok && oid >= v {
realname = true
} else {
realname = false
}
return
}
// flushXMLSegCache 刷新每个分段的缓存NOTE:目前只是单纯删除缓存
func (s *Service) flushXMLSegCache(c context.Context, sub *model.Subject) (err error) {
duration, err := s.videoDuration(c, sub.Pid, sub.Oid)
if err != nil {
return
}
seg := model.SegmentInfo(0, duration)
for i := int64(1); i <= seg.Cnt; i++ {
if err = s.dao.DelXMLSegCache(c, sub.Type, sub.Oid, seg.Cnt, i); err != nil {
continue
}
}
return
}
// rebuildDmSegCache 刷新视频每个分段弹幕缓存
func (s *Service) flushAllDmSegCache(c context.Context, oid int64, tp int32) (err error) {
var (
sub *model.Subject
duration, total int64
)
if sub, err = s.subject(c, tp, oid); err != nil {
return
}
if duration, err = s.videoDuration(c, sub.Pid, sub.Oid); err != nil {
return
}
total = int64(math.Ceil(float64(duration) / float64(model.DefaultPageSize)))
for i := int64(1); i <= total; i++ {
s.asyncAddFlushDMSeg(c, &model.FlushDMSeg{
Type: tp,
Oid: oid,
Force: true,
Page: &model.Page{
Num: i,
Size: model.DefaultPageSize,
Total: total,
},
})
}
log.Info("flushAllDmSegCache oid:%v total:%v", oid, total)
return
}
func (s *Service) asyncAddFlushDM(c context.Context, fc *model.Flush) {
select {
case s.flushMergeChan[fc.Oid%int64(s.routineSize)] <- fc:
default:
log.Warn("flush merge channel is full,flush(%+v)", fc)
}
}

View File

@@ -0,0 +1,242 @@
package service
import (
"bytes"
"compress/gzip"
"context"
"math"
"go-common/app/job/main/dm2/model"
"go-common/library/ecode"
"go-common/library/log"
)
func (s *Service) gzip(input []byte) ([]byte, error) {
buf := new(bytes.Buffer)
zw := gzip.NewWriter(buf)
if _, err := zw.Write(input); err != nil {
return nil, err
}
if err := zw.Close(); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (s *Service) dmsByid(c context.Context, tp int32, oid int64, missed []int64) (dms []*model.DM, err error) {
idxMap, special, err := s.dao.IndexsByid(c, tp, oid, missed)
if err != nil || len(idxMap) == 0 {
return
}
ctsMap, err := s.dao.Contents(c, oid, missed)
if err != nil {
return
}
ctsSpeMap := make(map[int64]*model.ContentSpecial)
if len(special) > 0 {
if ctsSpeMap, err = s.dao.ContentsSpecial(c, special); err != nil {
return
}
}
for _, content := range ctsMap {
if idx, ok := idxMap[content.ID]; ok {
dm := &model.DM{
ID: idx.ID,
Type: idx.Type,
Oid: idx.Oid,
Mid: idx.Mid,
Progress: idx.Progress,
Pool: idx.Pool,
Attr: idx.Attr,
State: idx.State,
Ctime: idx.Ctime,
Mtime: idx.Mtime,
Content: content,
}
if idx.Pool == model.PoolSpecial {
if _, ok = ctsSpeMap[dm.ID]; ok {
dm.ContentSpe = ctsSpeMap[dm.ID]
}
}
dms = append(dms, dm)
}
}
return
}
func (s *Service) dmSeg(c context.Context, tp int32, oid, limit int64, childpool int32, p *model.Page) (res *model.DMSeg, err error) {
var (
ids []int64
cache = true
dmids = make([]int64, 0, limit)
elems = make([]*model.Elem, 0, limit)
ps = (p.Num - 1) * p.Size
pe = p.Num * p.Size
)
res = new(model.DMSeg)
if ids, err = s.dmidsSeg(c, tp, oid, p.Total, p.Num, ps, pe, limit); err != nil {
return
}
dmids = append(dmids, ids...)
if childpool > 0 {
if ids, err = s.dmidSubtitle(c, tp, oid, ps, pe, limit); err != nil {
return
}
dmids = append(dmids, ids...)
}
if len(dmids) <= 0 {
return
}
elemsCache, missed, err := s.dao.IdxContentCacheV2(c, tp, oid, dmids)
if err != nil {
missed = dmids
cache = false
} else {
elems = append(elems, elemsCache...)
}
if len(missed) == 0 {
res.Elems = elems
return
}
dms, err := s.dmsByid(c, tp, oid, missed)
if err != nil {
return
}
for _, dm := range dms {
if e := dm.ToElem(); e != nil {
elems = append(elems, e)
}
}
res.Elems = elems
if cache && len(dms) > 0 {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddIdxContentCaches(ctx, tp, oid, dms...)
})
}
return
}
func (s *Service) dmidsSeg(c context.Context, tp int32, oid, total, num, ps, pe, limit int64) (dmids []int64, err error) {
if dmids, err = s.dao.DMIDCache(c, tp, oid, total, num, limit); err != nil || len(dmids) == 0 {
if dmids, err = s.dao.IndexsSegID(c, tp, oid, ps, pe, limit, model.PoolNormal); err != nil {
return
}
if len(dmids) > 0 {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddDMIDCache(ctx, tp, oid, total, num, dmids...)
})
}
}
return
}
func (s *Service) dmidSubtitle(c context.Context, tp int32, oid, ps, pe, limit int64) (dmids []int64, err error) {
if dmids, err = s.dao.DMIDSubtitleCache(c, tp, oid, ps, pe, limit); err != nil || len(dmids) == 0 {
var dms []*model.DM
if dms, dmids, err = s.dao.IndexsSeg(c, tp, oid, ps, pe, limit, model.PoolSubtitle); err != nil {
return
}
if len(dms) > 0 {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddDMIDSubtitleCache(ctx, tp, oid, dms...)
})
}
}
return
}
// add flush dm segment action to flush channel.
func (s *Service) asyncAddFlushDMSeg(c context.Context, fc *model.FlushDMSeg) (err error) {
select {
case s.flushSegChan[fc.Oid%int64(s.routineSize)] <- fc:
default:
log.Warn("segment flush merge channel is full,fc:%+v page:%+v", fc, fc.Page)
}
return
}
func (s *Service) pageinfo(c context.Context, pid int64, dm *model.DM) (p *model.Page, err error) {
duration, err := s.videoDuration(c, pid, dm.Oid)
if err != nil {
return
}
if duration != 0 {
p = &model.Page{
Num: int64(math.Ceil(float64(dm.Progress) / float64(model.DefaultPageSize))),
Size: model.DefaultPageSize,
Total: int64(math.Ceil(float64(duration) / float64(model.DefaultPageSize))),
}
if p.Num == 0 { // fix progress == 0
p.Num = 1
}
} else { // duration not exist
p = model.DefaultPage
}
// NOTE PoolSpecial store in the first segment
if dm.Pool == model.PoolSpecial {
p.Num = 1
}
return
}
func (s *Service) dmSegXML(c context.Context, sub *model.Subject, seg *model.Segment) (res []byte, err error) {
var (
cache = true
buf = new(bytes.Buffer)
dms []*model.DM
dmids, normalIds, subtitleIds []int64
)
buf.WriteString(seg.ToXMLHeader(sub.Oid, sub.State, 0))
defer func() {
if err == nil {
buf.WriteString(`</i>`)
res, err = s.gzip(buf.Bytes())
}
}()
if normalIds, err = s.dmidsSeg(c, sub.Type, sub.Oid, seg.Cnt, seg.Num, seg.Start, seg.End, 2*sub.Maxlimit); err != nil {
return
}
dmids = append(dmids, normalIds...)
if sub.Childpool > 0 {
if subtitleIds, err = s.dmidSubtitle(c, sub.Type, sub.Oid, seg.Start, seg.End, 2*sub.Maxlimit); err != nil {
return
}
dmids = append(dmids, subtitleIds...)
}
if len(dmids) <= 0 {
return
}
content, missed, err := s.dao.IdxContentCache(c, sub.Type, sub.Oid, dmids)
if err != nil {
missed = dmids
cache = false
} else {
buf.Write(content)
}
if len(missed) > 0 {
if dms, err = s.dmsByid(c, sub.Type, sub.Oid, missed); err != nil {
return
}
for _, dm := range dms {
buf.WriteString(dm.ToXMLSeg())
}
}
if cache && len(dms) > 0 {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddIdxContentCaches(ctx, sub.Type, sub.Oid, dms...)
})
}
return
}
// segmentInfo get segment info of oid.
func (s *Service) segmentInfo(c context.Context, aid, oid, ps int64, duration int64) (seg *model.Segment, err error) {
if duration != 0 && ps >= duration {
log.Warn("oid:%d ps:%d larger than duration:%d", oid, ps, duration)
err = ecode.NotModified
return
}
seg = model.SegmentInfo(ps, duration)
return
}

View File

@@ -0,0 +1,51 @@
package service
import (
"context"
"testing"
"go-common/app/job/main/dm2/model"
. "github.com/smartystreets/goconvey/convey"
)
func TestDmsByid(t *testing.T) {
var (
tp int32 = 1
oid int64 = 1221
missed = []int64{719150141, 719150142}
)
Convey("", t, func() {
dms, err := svr.dmsByid(context.TODO(), tp, oid, missed)
So(err, ShouldBeNil)
So(dms, ShouldNotBeEmpty)
for _, dm := range dms {
t.Log(dm)
}
})
}
func TestDMSeg(t *testing.T) {
var (
tp int32 = 1
oid int64 = 1221
childpool int32 = 1
limit int64 = 10
p = &model.Page{Num: 1, Size: model.DefaultVideoEnd, Total: 1}
)
Convey("", t, func() {
res, err := svr.dmSeg(context.TODO(), tp, oid, limit, childpool, p)
So(err, ShouldBeNil)
So(res, ShouldNotBeNil)
t.Logf("%v,length:%d", res, len(res.Elems))
})
}
func TestPageInfo(t *testing.T) {
Convey("", t, func() {
dm := &model.DM{ID: 719182141, Type: 1, Oid: 1221, Progress: 0, Pool: 2}
p, err := svr.pageinfo(context.TODO(), 12345, dm)
So(err, ShouldBeNil)
t.Log(p)
})
}

View File

@@ -0,0 +1,40 @@
package service
import (
"context"
"fmt"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
// func TestSpecialDmRemove(t *testing.T) {
// Convey("", t, func() {
// dmid := int64(920249977)
// dm := &model.DM{
// ID: dmid,
// Type: 1,
// Oid: 19,
// Mid: 1,
// State: 1,
// Pool: 2,
// Progress: 10,
// }
// _, err := testSvc.dao.UpdateDM(context.TODO(), dm)
// if err != nil {
// fmt.Println(err)
// }
// So(err, ShouldBeNil)
// })
// }
func TestSpecialLocationUpdate(t *testing.T) {
Convey("", t, func() {
err := svr.specialLocationUpdate(context.TODO(), 1, 19)
if err != nil {
fmt.Println(err)
}
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,133 @@
package service
import (
"context"
"encoding/json"
"fmt"
"sort"
"go-common/app/job/main/dm2/model"
)
const (
_bfsMaxSize = 16 * 1024 * 1024 // size MediumText
_specialJSONItemSize = 20 + 1 // {"id":,"content":""},
_specialJSONAtLeastSize = 2 // []
)
// buildSpeicalDms build when db is no record
func (s *Service) speicalDms(c context.Context, tp int32, oid int64) (dms []*model.DM, err error) {
var (
dmids []int64
spContentMap map[int64]*model.ContentSpecial
contentMap map[int64]*model.Content
)
if dms, dmids, err = s.dao.IndexsByPool(c, tp, oid, model.PoolSpecial); err != nil {
return
}
if len(dmids) == 0 {
return
}
if contentMap, err = s.dao.Contents(c, oid, dmids); err != nil {
return
}
if spContentMap, err = s.dao.ContentsSpecial(c, dmids); err != nil {
return
}
for _, dm := range dms {
if v, ok := contentMap[dm.ID]; ok {
dm.Content = v
}
if v, ok := spContentMap[dm.ID]; ok {
dm.ContentSpe = v
}
}
sort.Slice(dms, func(i, j int) bool {
return dms[i].Progress < dms[j].Progress
})
return
}
func (s *Service) buildSpecialDms(c context.Context, dms []*model.DM) (bss [][]byte, err error) {
var (
dmSpecialContents []*model.DmSpecialContent
bs []byte
length int
)
if len(dms) == 0 {
return
}
dmSpecialContents = make([]*model.DmSpecialContent, 0, len(dms))
length = _specialJSONAtLeastSize
for _, dm := range dms {
if len(dm.GetSpecialSeg()) == 0 {
continue
}
itemSize := len(fmt.Sprint(dm.ID)) + len(dm.GetSpecialSeg()) + _specialJSONItemSize
if length+itemSize > _bfsMaxSize {
if bs, err = json.Marshal(dmSpecialContents); err != nil {
return
}
bss = append(bss, bs)
dmSpecialContents = make([]*model.DmSpecialContent, 0, len(dms))
length = _specialJSONAtLeastSize
}
length += itemSize
dmSpecialContents = append(dmSpecialContents, &model.DmSpecialContent{
ID: dm.ID,
Content: dm.GetSpecialSeg(),
})
}
if len(dmSpecialContents) > 0 {
if bs, err = json.Marshal(dmSpecialContents); err != nil {
return
}
bss = append(bss, bs)
}
return
}
func (s *Service) updateSpecualDms(c context.Context, tp int32, oid int64, bss [][]byte) (err error) {
var (
location string
locations []string
ds *model.DmSpecial
)
for _, bs := range bss {
if len(bs) == 0 {
continue
}
if location, err = s.dao.BfsDmUpload(c, "", bs); err != nil {
return
}
locations = append(locations, location)
}
ds = &model.DmSpecial{
Type: tp,
Oid: oid,
}
ds.Join(locations)
if err = s.dao.UpsertDmSpecialLocation(c, ds.Type, ds.Oid, ds.Locations); err != nil {
return
}
return
}
func (s *Service) specialLocationUpdate(c context.Context, tp int32, oid int64) (err error) {
var (
dms []*model.DM
bss [][]byte
)
if dms, err = s.speicalDms(c, tp, oid); err != nil {
return
}
if bss, err = s.buildSpecialDms(c, dms); err != nil {
return
}
if err = s.updateSpecualDms(c, tp, oid, bss); err != nil {
return
}
return
}

View File

@@ -0,0 +1,18 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestDMSCache(t *testing.T) {
Convey("should return dms and nil", t, func() {
dms, err := svr.dms(context.TODO(), 1, 1221, 1, 0)
So(err, ShouldBeNil)
Convey("dms shoule not be empty", func() {
So(len(dms), ShouldNotBeEmpty)
})
})
}

View File

@@ -0,0 +1,188 @@
package service
import (
"context"
"time"
"go-common/app/job/main/dm2/model"
"go-common/app/service/main/archive/api"
archiveMdl "go-common/app/service/main/archive/model/archive"
"go-common/library/log"
)
var (
_maskJobDay = []int32{3, 7}
)
// maskProc .
func (s *Service) maskProc() {
var (
err error
c = context.Background()
ticker *time.Ticker
)
if s.conf.MaskCate == nil {
return
}
ticker = time.NewTicker(time.Duration(s.conf.MaskCate.Interval))
for range ticker.C {
if err = s.maskSchedule(c); err != nil {
log.Error("maskProc.error(%v)", err)
continue
}
}
}
func (s *Service) maskSchedule(c context.Context) (err error) {
var (
ok bool
now = time.Now()
expire = now.Add(time.Duration(s.conf.MaskCate.Interval))
expireStr = expire.Format(time.RFC3339)
oldExpireStr, oldExpireGetSetStr string
oldExpire time.Time
)
if ok, err = s.dao.SetnxMaskJob(c, expireStr); err != nil {
return
}
// redis中不存在
if ok {
if err = s.maskJob(c); err != nil {
s.dao.DelMaskJob(c)
log.Error("maskJob,error(%v)", err)
return
}
return
}
// redis中已经存在
// 判断是否过期了
if oldExpireStr, err = s.dao.GetMaskJob(c); err != nil {
return
}
if oldExpire, err = time.Parse(time.RFC3339, oldExpireStr); err != nil {
return
}
if oldExpire.Sub(now) > 0 {
return
}
if oldExpireGetSetStr, err = s.dao.GetSetMaskJob(c, expireStr); err != nil {
return
}
if oldExpireGetSetStr != oldExpireStr {
return
}
if err = s.maskJob(c); err != nil {
s.dao.DelMaskJob(c)
log.Error("maskJob,error(%v)", err)
return
}
return
}
// 执行任务
func (s *Service) maskJob(c context.Context) (err error) {
for _, tid := range s.conf.MaskCate.Tids {
if err = s.maskOneCate(c, tid); err != nil {
log.Error("maskOneCate(tid:%v),error(%v)", tid, err)
return
}
}
return
}
func (s *Service) maskOneCate(c context.Context, tid int64) (err error) {
var (
err1 error
resp *model.RankRecentResp
aids []int64
)
for _, day := range _maskJobDay {
if resp, err = s.dao.RankList(c, tid, day); err != nil {
log.Error("RankList(tid:%v,day:%v),error(%v)", tid, day, err)
return
}
for idx, recentRegion := range resp.List {
if idx >= s.conf.MaskCate.Limit {
break
}
aids = append(aids, recentRegion.Aid)
for _, other := range recentRegion.Others {
aids = append(aids, other.Aid)
}
}
}
for _, aid := range aids {
if err1 = s.maskOneArchive(c, aid); err1 != nil {
log.Error("maskOneArchive.err aid:%v,error(%v)", aid, err1)
continue
}
log.Info("maskOneArchive.ok aid:%v", aid)
}
return
}
func (s *Service) maskOneArchive(c context.Context, aid int64) (err error) {
var (
pages []*api.Page
)
if pages, err = s.arcRPC.Page3(c, &archiveMdl.ArgAid2{Aid: aid}); err != nil {
log.Error("s.arcRPC.Page3(aid:%v),error(%v)", aid, err)
return
}
for _, page := range pages {
if err = s.maskOneVideo(c, page.Cid); err != nil {
log.Error("maskOneVideo(oid:%v),error(%v)", page.Cid, err)
return
}
}
return
}
// runGenMask send to gen mask url
func (s *Service) maskOneVideo(c context.Context, oid int64) (err error) {
var (
subject *model.Subject
archive3 *api.Arc
err1 error
duration int64
typeID int32
)
if subject, err = s.subject(c, model.SubTypeVideo, oid); err != nil {
log.Error("s.subject(oid:%v),error(%v)", oid, err)
return
}
if subject.AttrVal(model.AttrSubMaskOpen) == model.AttrYes {
return
}
if archive3, err1 = s.arcRPC.Archive3(c, &archiveMdl.ArgAid2{Aid: subject.Pid}); err1 == nil && archive3 != nil {
duration = archive3.Duration
typeID = archive3.TypeID
}
if err = s.dao.GenerateMask(c, oid, subject.Mid, model.MaskPlatAll, model.MaskPriorityLow, subject.Pid, duration, typeID); err != nil {
log.Error("GenerateMask(oid:%v),error(%v)", oid, err)
return
}
subject.AttrSet(model.AttrYes, model.AttrSubMaskOpen)
if _, err = s.dao.UpdateSubAttr(c, subject.Type, subject.Oid, subject.Attr); err != nil {
log.Error("UpdateSubAttr(oid:%v,attr:%v),error(%v)", oid, subject.Attr, err)
return
}
return
}
func (s *Service) maskMidProc() {
var (
c = context.Background()
mids []int64
err error
)
ticker := time.NewTicker(time.Minute * 5)
defer ticker.Stop()
for range ticker.C {
if mids, err = s.dao.MaskMids(c); err != nil {
continue
}
s.maskMid = mids
log.Info("update mask mid(%v)", s.maskMid)
}
}

View File

@@ -0,0 +1,32 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestMaskOneVideo(t *testing.T) {
Convey("mask one video", t, func() {
err := svr.maskOneVideo(context.TODO(), 8936701)
So(err, ShouldBeNil)
t.Logf("err:%v", err)
})
}
func TestMaskOneArchive(t *testing.T) {
Convey("mask one archive", t, func() {
err := svr.maskOneArchive(context.TODO(), 10098039)
So(err, ShouldBeNil)
t.Logf("err:%v", err)
})
}
func TestMaskOneCate(t *testing.T) {
Convey("mask one cate", t, func() {
err := svr.maskOneCate(context.TODO(), 185)
So(err, ShouldBeNil)
t.Logf("err:%v", err)
})
}

View File

@@ -0,0 +1,457 @@
package service
import (
"context"
"encoding/json"
"fmt"
"regexp"
"time"
"go-common/app/job/main/dm2/conf"
"go-common/app/job/main/dm2/dao"
"go-common/app/job/main/dm2/model"
"go-common/app/job/main/dm2/model/oplog"
arcCli "go-common/app/service/main/archive/api/gorpc"
filterCli "go-common/app/service/main/filter/api/grpc/v1"
seqMdl "go-common/app/service/main/seq-server/model"
seqCli "go-common/app/service/main/seq-server/rpc/client"
"go-common/library/log"
"go-common/library/log/infoc"
"go-common/library/queue/databus"
"go-common/library/sync/pipeline/fanout"
"go-common/library/xstr"
)
const (
_routineSizeDefault = 10
_chanSize = 10240
_batchSize = 1000
_maxUpRecent = 1000
)
// Service service struct
type Service struct {
conf *conf.Config
dao *dao.Dao
cache *fanout.Fanout
arcRPC *arcCli.Service2
// seq serer
seqArg *seqMdl.ArgBusiness
seqRPC *seqCli.Service2
indexCsmr *databus.Databus
subjectCsmr *databus.Databus
actionCsmr *databus.Databus
reportCsmr *databus.Databus
videoupCsmr *databus.Databus
subtitleAuditCsmr *databus.Databus
flushMergeChan []chan *model.Flush
flushSegChan []chan *model.FlushDMSeg
dmRecentChan []chan *model.DM
routineSize int
realname map[int64]int64 // key分区idvalue:cid即该分区中大于cid的视频开启实名制
// filter service
filterRPC filterCli.FilterClient
maskMid []int64
dmOperationLogSvc *infoc.Infoc
opsLogCh chan *oplog.Infoc
// bnj
bnjAid int64
bnjSubAids map[int64]struct{}
bnjCsmr *databus.Databus
bnjliveRoomID int64
bnjStart time.Time
bnjIgnoreBeginTime time.Duration
bnjIgnoreEndTime time.Duration
bnjArcVideos []*model.Video
bnjIgnoreRate int64
bnjUserLevel int32
}
// New new service.
func New(c *conf.Config) (s *Service) {
s = &Service{
conf: c,
dao: dao.New(c),
cache: fanout.New("cache", fanout.Worker(1), fanout.Buffer(1024)),
arcRPC: arcCli.New2(c.ArchiveRPC),
seqArg: &seqMdl.ArgBusiness{BusinessID: c.Seq.BusinessID, Token: c.Seq.Token},
seqRPC: seqCli.New2(c.SeqRPC),
subjectCsmr: databus.New(c.Databus.SubjectCsmr),
indexCsmr: databus.New(c.Databus.IndexCsmr),
actionCsmr: databus.New(c.Databus.ActionCsmr),
reportCsmr: databus.New(c.Databus.ReportCsmr),
videoupCsmr: databus.New(c.Databus.VideoupCsmr),
subtitleAuditCsmr: databus.New(c.Databus.SubtitleAuditCsmr),
routineSize: c.RoutineSize,
realname: make(map[int64]int64),
dmOperationLogSvc: infoc.New(c.Infoc2),
opsLogCh: make(chan *oplog.Infoc, 1024),
}
if c.RoutineSize <= 0 {
s.routineSize = _routineSizeDefault
}
s.flushMergeChan = make([]chan *model.Flush, s.routineSize)
s.flushSegChan = make([]chan *model.FlushDMSeg, s.routineSize)
s.dmRecentChan = make([]chan *model.DM, s.routineSize)
filterRPC, err := filterCli.NewClient(c.FliterRPC)
if err != nil {
panic(err)
}
s.filterRPC = filterRPC
for idStr, cid := range conf.Conf.Realname {
ids, err := xstr.SplitInts(idStr)
if err != nil {
panic(err)
}
for _, id := range ids {
if _, ok := s.realname[id]; !ok {
s.realname[id] = cid
}
}
}
// laji bnj
if s.conf.BNJ != nil {
//bnj count
s.initBnj()
}
//消费DMReport-T消息
go s.reportCsmproc()
// 消费DMAction-T消息
go s.actionCsmproc()
// 消费DMSubject-T消息
go s.subjectCsmproc()
// 消费DMMeta-T消息
go s.indexCsmproc()
// 消费 Videoup2Bvc消息
go s.videoupCsmrproc()
// 消费 字幕 提交 消息
go s.subtitleAuditProc()
// 刷新全段弹幕
for i := 0; i < s.routineSize; i++ {
flushChan := make(chan *model.Flush, _chanSize)
s.flushMergeChan[i] = flushChan
go s.flushmergeproc(flushChan)
}
// 刷新分段弹幕
for i := 0; i < s.routineSize; i++ {
flushSegChan := make(chan *model.FlushDMSeg, _chanSize)
s.flushSegChan[i] = flushSegChan
go s.flushSegproc(flushSegChan)
}
// 异步处理创作中心最新弹幕列表缓存
for i := 0; i < s.routineSize; i++ {
recentChan := make(chan *model.DM, _chanSize)
s.dmRecentChan[i] = recentChan
go s.dmRecentproc(recentChan)
}
go s.transferProc()
// 处理热门二级分类视频的弹幕蒙版
go s.maskProc()
// 刷新开启蒙版mid
s.maskMid, _ = s.dao.MaskMids(context.TODO())
log.Info("update mask mid(%v)", s.maskMid)
go s.maskMidProc()
// dm task
go s.taskResProc()
go s.taskDelProc()
// oplog
go s.oplogproc()
return
}
// Ping check thrid resource.
func (s *Service) Ping(c context.Context) (err error) {
return s.dao.Ping(c)
}
func (s *Service) subjectCsmproc() {
var (
err error
c = context.TODO()
regexSubject = regexp.MustCompile("dm_subject_[0-9]+")
)
for {
msg, ok := <-s.subjectCsmr.Messages()
if !ok {
log.Error("subject binlog consumer exit")
return
}
m := &model.BinlogMsg{}
if err = json.Unmarshal(msg.Value, &m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
log.Info("%s", m)
if regexSubject.MatchString(m.Table) {
if err = s.trackSubject(c, m); err != nil {
log.Error("s.trackSubject(%s) error(%v)", m, err)
continue
}
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) indexCsmproc() {
var (
err error
c = context.TODO()
regexIndex = regexp.MustCompile("dm_index_[0-9]+")
)
for {
msg, ok := <-s.indexCsmr.Messages()
if !ok {
log.Error("index binlog consumer exit")
return
}
m := &model.BinlogMsg{}
if err = json.Unmarshal(msg.Value, &m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
log.Info("%s", m)
if regexIndex.MatchString(m.Table) {
if err = s.trackIndex(c, m); err != nil {
log.Error("s.traceIndex(%s) error(%v)", m, err)
continue
}
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) flushmergeproc(flushChan chan *model.Flush) {
var (
flushs = make(map[int64]*model.Flush)
ticker = time.NewTicker(60 * time.Second)
err error
)
for {
select {
case flush, ok := <-flushChan:
if !ok {
log.Error("action channel closed")
return
}
if _, ok := flushs[flush.Oid]; !ok || flush.Force { // key不存在或者需要强制刷新的
flushs[flush.Oid] = flush
}
if len(flushs) < _batchSize {
continue
}
case <-ticker.C:
}
if len(flushs) > 0 {
for _, flush := range flushs {
if err = s.flushDmCache(context.TODO(), flush); err != nil {
log.Error("action:flushmergeproc,flush:%+v,error(%v)", flush, err)
}
}
flushs = make(map[int64]*model.Flush)
}
}
}
func keySegFlush(tp int32, oid, total, num int64) string {
return fmt.Sprintf("f_%d_%d_%d_%d", tp, oid, total, num)
}
func (s *Service) flushSegproc(ch chan *model.FlushDMSeg) {
var (
key string
merge = make(map[string]*model.FlushDMSeg)
ticker = time.NewTicker(60 * time.Second)
err error
)
for {
select {
case msg, ok := <-ch:
if !ok {
log.Error("action channel closed")
return
}
key = keySegFlush(msg.Type, msg.Oid, msg.Page.Total, msg.Page.Num)
if _, ok := merge[key]; !ok || msg.Force { // key不存在或者需要强制刷新的
merge[key] = msg
}
if len(merge) < _batchSize {
continue
}
case <-ticker.C:
}
if len(merge) > 0 {
for _, v := range merge {
if err = s.flushDmSegCache(context.TODO(), v); err != nil {
log.Error("action:flushSegproc,data:%+v,error(%v)", v, err)
continue
}
}
merge = make(map[string]*model.FlushDMSeg)
}
}
}
func (s *Service) actionCsmproc() {
for {
msg, ok := <-s.actionCsmr.Messages()
if !ok {
log.Error("action consumer exit")
return
}
act := &model.Action{}
err := json.Unmarshal(msg.Value, &act)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
start := time.Now()
if err = s.actionAct(context.TODO(), act); err != nil {
log.Error("action:%s,data:%s,error(%v)", act.Action, act.Data, err)
continue
}
log.Info("partition:%d,offset:%d,key:%s,value:%s costing:%+v", msg.Partition, msg.Offset, msg.Key, msg.Value, time.Since(start))
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
// add recent dm
func (s *Service) dmRecentproc(dmChan chan *model.DM) {
var (
count int64
c = context.TODO()
)
for {
dm, ok := <-dmChan
if !ok {
log.Error("recent dm channel is closed")
return
}
sub, err := s.subject(c, dm.Type, dm.Oid)
if err != nil {
continue
}
if dm.State != model.StateNormal && dm.State != model.StateHide && dm.State != model.StateMonitorAfter {
if err = s.dao.ZRemRecentDM(c, sub.Mid, dm.ID); err != nil {
continue
}
} else {
if dm.Content == nil {
if dm.Content, err = s.dao.Content(c, dm.Oid, dm.ID); err != nil {
continue
}
}
if dm.Pool == model.PoolSpecial && dm.ContentSpe == nil {
if dm.ContentSpe, err = s.dao.ContentSpecial(c, dm.ID); err != nil {
continue
}
}
if count, err = s.dao.AddRecentDM(c, sub.Mid, dm); err != nil {
continue
}
if trimCnt := count - _maxUpRecent; trimCnt > 0 {
if err = s.dao.TrimRecentDM(c, sub.Mid, trimCnt); err != nil {
continue
}
}
}
}
}
func (s *Service) asyncAddRecent(c context.Context, dm *model.DM) {
select {
case s.dmRecentChan[dm.Oid%int64(s.routineSize)] <- dm:
default:
log.Warn("dm recent channel is full,dm(%+v)", dm)
}
}
func (s *Service) reportCsmproc() {
for {
msg, ok := <-s.reportCsmr.Messages()
if !ok {
log.Error("report consumer exit")
return
}
log.Info("partition:%d,offset:%d,value:%s", msg.Partition, msg.Offset, msg.Value)
act := &model.ReportAction{}
err := json.Unmarshal(msg.Value, &act)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if diff := act.HideTime - time.Now().Unix(); diff > 0 {
log.Info("action:%+v will be processed after %d seconds", act, diff)
time.Sleep(time.Duration(diff) * time.Second)
}
if _, err = s.dao.DelDMHideState(context.TODO(), 1, act.Cid, act.Did); err != nil {
log.Error("DelDMHideState(%+v) error(%v)", act, err)
} else {
log.Info("DelDMHideState(%+v) success ", act)
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) videoupCsmrproc() {
var (
err error
c = context.TODO()
)
for {
msg, ok := <-s.videoupCsmr.Messages()
if !ok {
log.Error("videoup consumer exit")
return
}
log.Info("partition:%d,offset:%d,key:%s,value:%s", msg.Partition, msg.Offset, msg.Key, msg.Value)
m := &model.VideoupMsg{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", string(msg.Value), err)
continue
}
if m.Route == model.RouteSecondRound || m.Route == model.RouteAutoOpen ||
m.Route == model.RouteForceSync || m.Route == model.RouteDelayOpen {
if err = s.trackVideoup(c, m.Aid); err != nil {
continue
}
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) subtitleAuditProc() {
var (
err error
c = context.Background()
)
for {
msg, ok := <-s.subtitleAuditCsmr.Messages()
if !ok {
log.Error("subtitle_audit consumer exit")
return
}
log.Info("partition:%d,offset:%d,key:%s,value:%s", msg.Partition, msg.Offset, msg.Key, msg.Value)
m := &model.SubtitleAuditMsg{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", string(msg.Value), err)
continue
}
if err = s.SubtitleFilter(c, m.Oid, m.SubtitleID); err != nil {
log.Error("SubtitleFilter(oid:%v,subtitleID:%v),error(%v)", m.Oid, m.SubtitleID, err)
continue
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}

View File

@@ -0,0 +1,39 @@
package service
import (
"context"
"flag"
"os"
"path/filepath"
"testing"
"go-common/app/job/main/dm2/conf"
. "github.com/smartystreets/goconvey/convey"
)
var (
svr *Service
)
func TestMain(m *testing.M) {
var (
err error
)
dir, _ := filepath.Abs("../cmd/dm2-job.toml")
if err = flag.Set("conf", dir); err != nil {
panic(err)
}
if err = conf.Init(); err != nil {
panic(err)
}
svr = New(conf.Conf)
os.Exit(m.Run())
}
func TestPing(t *testing.T) {
Convey("", t, func() {
err := svr.Ping(context.TODO())
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,39 @@
package service
import (
"context"
"go-common/app/job/main/dm2/model"
"go-common/library/ecode"
"go-common/library/log"
)
func (s *Service) subject(c context.Context, tp int32, oid int64) (sub *model.Subject, err error) {
var cache = true
if sub, err = s.dao.SubjectCache(c, tp, oid); err != nil {
err = nil
cache = false
}
if sub == nil {
if sub, err = s.dao.Subject(c, tp, oid); err != nil {
return
}
if sub == nil {
sub = &model.Subject{
Type: tp,
Oid: oid,
}
}
if cache {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddSubjectCache(ctx, sub)
})
}
}
if sub.ID == 0 {
err = ecode.NothingFound
log.Error("subject not exist,type:%d,oid:%d", tp, oid)
return
}
return
}

View File

@@ -0,0 +1,17 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestSubject(t *testing.T) {
Convey("", t, func() {
sub, err := svr.subject(context.TODO(), 1, 1221)
So(err, ShouldBeNil)
So(sub, ShouldNotBeNil)
t.Logf("subject:%+v", sub)
})
}

View File

@@ -0,0 +1,242 @@
package service
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"go-common/app/job/main/dm2/model"
filterMdl "go-common/app/service/main/filter/api/grpc/v1"
"go-common/library/database/sql"
"go-common/library/ecode"
"go-common/library/log"
)
const (
_filterAreaSubtitle = "subtitle"
_filterCapacity = 5
_contentSizeLimit = 300
)
// SubtitleFilter .
// 1、只处理状态为审核待检测发布待检测的数据
// 2、如果是审核待检测的数据检测完毕状态改为带审核删除草稿缓存删除字幕缓存
// 3、如果检测失败状态改为审核驳回并且更新驳回理由 删除缓存
// 4、如果是发布待检测的状态检测完毕状态改为发布更新数据库逻辑发布表更新删除字幕缓存
// 5、如果发布检测失败。状态改为审核驳回并且更新驳回理由。删除缓存
// 6、如果消费失败 数据丢失 容错
func (s *Service) SubtitleFilter(c context.Context, oid int64, subtitleID int64) (err error) {
var (
subtitle *model.Subtitle
)
if subtitle, err = s.dao.GetSubtitle(c, oid, subtitleID); err != nil {
log.Error("params(oid::%v,subtitleID:%v)", oid, subtitleID)
return
}
if subtitle == nil {
log.Error("params(oid:%v,subtitleID:%v not found)", oid, subtitleID)
return
}
switch subtitle.Status {
case model.SubtitleStatusCheckToAudit:
if err = s.checkToAudit(c, subtitle); err != nil {
log.Error("checkToAudit.params(subtitle:%+v),error(%v)", subtitle, err)
return
}
case model.SubtitleStatusCheckPublish:
if err = s.checkToPublish(c, subtitle); err != nil {
log.Error("checkToPublish.params(subtitle:%+v),error(%v)", subtitle, err)
return
}
default:
return
}
return
}
func (s *Service) checkToAudit(c context.Context, subtitle *model.Subtitle) (err error) {
var (
status = model.SubtitleStatusToAudit
hits []string
)
if hits, err = s.checkBfsData(c, subtitle); err != nil {
log.Error("checkBfsData(subtitle:%+v),error(%v)", subtitle, err)
return
}
if len(hits) > 0 {
subtitle.RejectComment = "敏感词:" + strings.Join(hits, ",")
status = model.SubtitleStatusAuditBack
subtitle.PubTime = time.Now().Unix()
}
subtitle.Status = status
if err = s.dao.UpdateSubtitle(c, subtitle); err != nil {
log.Error("UpdateSubtitleStatus(subtitle:%+v),error(%v)", subtitle, err)
return
}
s.dao.DelSubtitleDraftCache(c, subtitle.Oid, subtitle.Type, subtitle.Mid, subtitle.Lan)
s.dao.DelSubtitleCache(c, subtitle.Oid, subtitle.ID)
return
}
func (s *Service) checkToPublish(c context.Context, subtitle *model.Subtitle) (err error) {
var (
status = model.SubtitleStatusPublish
hits []string
)
if hits, err = s.checkBfsData(c, subtitle); err != nil && err != ecode.SubtitleSizeLimit {
log.Error("checkBfsData(subtitle:%+v),error(%v)", subtitle, err)
return
}
if err == ecode.SubtitleSizeLimit {
subtitle.RejectComment = "单条字幕数超过限制"
status = model.SubtitleStatusAuditBack
}
if len(hits) > 0 {
subtitle.RejectComment = "敏感词:" + strings.Join(hits, ",")
status = model.SubtitleStatusAuditBack
}
subtitle.Status = status
if err = s.dao.UpdateSubtitle(c, subtitle); err != nil {
log.Error("UpdateSubtitleStatus(subtitle:%+v),error(%v)", subtitle, err)
return
}
if status == model.SubtitleStatusPublish {
if err = s.auditPass(c, subtitle); err != nil {
log.Error("auditPass(subtitle:%+v),error(%v)", subtitle, err)
return
}
return
}
if err = s.auditReject(c, subtitle); err != nil {
log.Error("auditReject(subtitle:%+v),error(%v)", subtitle, err)
return
}
return
}
// checkBfsData .
func (s *Service) checkBfsData(c context.Context, subtitle *model.Subtitle) (hits []string, err error) {
var (
body *model.SubtitleBody
bs []byte
)
if bs, err = s.dao.BfsData(c, subtitle.SubtitleURL); err != nil {
log.Error("BfsData.params(SubtitleURL:%v),error(%v)", subtitle.SubtitleURL, err)
return
}
body = &model.SubtitleBody{}
if err = json.Unmarshal(bs, body); err != nil {
log.Error("checkToAudit.Unmarshal,error(%v)", err)
return
}
if hits, err = s.checkFilter(c, body); err != nil {
log.Error("checkFilter(body:%+v),error(%v)", body, err)
return
}
return
}
// checkFilter .
func (s *Service) checkFilter(c context.Context, body *model.SubtitleBody) (hits []string, err error) {
var (
msgMap map[string]string
msgMaps []map[string]string
reply *filterMdl.MHitReply
hitMap map[string]struct{}
)
msgMap = make(map[string]string)
for idx, item := range body.Bodys {
if len(item.Content) > _contentSizeLimit {
err = ecode.SubtitleSizeLimit
return
}
msgMap[fmt.Sprint(idx)] = item.Content
if (idx+1)%_filterCapacity == 0 {
msgMaps = append(msgMaps, msgMap)
msgMap = make(map[string]string)
}
}
if len(msgMap) > 0 {
msgMaps = append(msgMaps, msgMap)
}
hitMap = make(map[string]struct{})
for _, msgMap = range msgMaps {
if reply, err = s.filterRPC.MHit(c, &filterMdl.MHitReq{
Area: _filterAreaSubtitle,
MsgMap: msgMap,
}); err != nil {
log.Error("checkFilter(msgMap:%+v),error(%v)", msgMap, err)
return
}
for _, rl := range reply.GetRMap() {
for _, hit := range rl.GetHits() {
hitMap[hit] = struct{}{}
}
}
}
for k := range hitMap {
hits = append(hits, k)
}
return
}
// auditReject subtitle reject
func (s *Service) auditReject(c context.Context, subtitle *model.Subtitle) (err error) {
subtitle.Status = model.SubtitleStatusAuditBack
if err = s.dao.UpdateSubtitle(c, subtitle); err != nil {
log.Error("params(%+v).error(%v)", subtitle, err)
return
}
s.dao.DelSubtitleDraftCache(context.Background(), subtitle.Oid, subtitle.Type, subtitle.Mid, subtitle.Lan)
s.dao.DelSubtitleCache(context.Background(), subtitle.Oid, subtitle.ID)
return
}
// auditPass .
func (s *Service) auditPass(c context.Context, subtitle *model.Subtitle) (err error) {
var (
tx *sql.Tx
subtitlePub *model.SubtitlePub
)
defer func() {
if err != nil {
tx.Rollback()
log.Error("params(subtitle:%+v).err(%v)", subtitle, err)
return
}
if err = tx.Commit(); err != nil {
log.Error("params(subtitle:%+v).err(%v)", subtitle, err)
return
}
}()
subtitle.RejectComment = ""
if tx, err = s.dao.BeginBiliDMTran(c); err != nil {
log.Error("error(%v)", err)
return
}
if err = s.dao.TxUpdateSubtitle(tx, subtitle); err != nil {
log.Error("params(%+v).error(%v)", subtitle, err)
return
}
subtitlePub = &model.SubtitlePub{
Oid: subtitle.Oid,
Type: subtitle.Type,
Lan: subtitle.Lan,
SubtitleID: subtitle.ID,
}
if err = s.dao.TxAddSubtitlePub(tx, subtitlePub); err != nil {
log.Error("params(%+v).error(%v)", subtitlePub, err)
return
}
if err = s.dao.DelSubtitleCache(c, subtitle.Oid, subtitle.ID); err != nil {
log.Error("DelSubtitleCache.params(subtitle:%+v).err(%v)", subtitle, err)
return
}
if err = s.dao.DelVideoSubtitleCache(c, subtitle.Oid, subtitle.Type); err != nil {
log.Error("DelVideoSubtitleCache.params(subtitle:%+v).err(%v)", subtitle, err)
return
}
return
}

View File

@@ -0,0 +1,53 @@
package service
import (
"context"
"testing"
"go-common/app/job/main/dm2/model"
. "github.com/smartystreets/goconvey/convey"
)
func TestSubtitle(t *testing.T) {
var (
oid int64 = 10109227
subtitleID int64 = 1
)
Convey("", t, func() {
err := svr.SubtitleFilter(context.Background(), oid, subtitleID)
So(err, ShouldBeNil)
})
}
func TestSubtitleFilter(t *testing.T) {
body := &model.SubtitleBody{
Bodys: []*model.SubtitleItem{
{
From: 0,
To: 10,
Content: "习近平",
},
{
From: 0,
To: 10,
Content: "习大大",
},
{
From: 0,
To: 10,
Content: "不要哇",
},
{
From: 0,
To: 10,
Content: "呀咩爹",
},
},
}
Convey("subtitle filter", t, func() {
hits, err := svr.checkFilter(context.Background(), body)
So(err, ShouldBeNil)
t.Logf("hits:%v", hits)
})
}

View File

@@ -0,0 +1,277 @@
package service
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"time"
"go-common/app/job/main/dm2/model"
"go-common/app/job/main/dm2/model/oplog"
"go-common/library/log"
)
func (s *Service) taskResProc() {
var (
c = context.Background()
tasks []*model.TaskInfo
err error
)
ticker := time.NewTicker(time.Duration(s.conf.TaskConf.ResInterval))
defer ticker.Stop()
for range ticker.C {
if tasks, err = s.dao.TaskInfos(c, model.TaskStateSearch); err != nil {
log.Error("s.dao.TaskInfos error(%v)", err)
continue
}
for _, task := range tasks {
count, url, state, err := s.dao.TaskSearchRes(c, task)
if err != nil {
log.Error("s.dao.TaskSearchRes(%+v) error(%v)", task, err)
continue
}
if state == model.TaskSearchFail {
task.State = model.TaskStateFail
} else if state == model.TaskSearchSuc {
task.Result = url
task.Count = count
if task.Sub > 0 {
task.State = model.TaskStateWait
} else {
task.State = model.TaskStateSuc
}
}
s.dao.UpdateTask(c, task)
}
}
}
func (s *Service) taskDelProc() {
var (
c = context.Background()
err error
)
ticker := time.NewTicker(time.Duration(s.conf.TaskConf.DelInterval))
defer ticker.Stop()
for range ticker.C {
if err = s.taskSchedule(c); err != nil {
log.Error("taskDelProc error(%v)", err)
continue
}
}
}
func (s *Service) taskSchedule(c context.Context) (err error) {
var (
ok bool
now = time.Now()
expire = now.Add(time.Duration(s.conf.TaskConf.DelInterval))
expireStr = expire.Format(time.RFC3339)
oldExpireStr, oldExpireGetSetStr string
oldExpire time.Time
)
if ok, err = s.dao.SetnxTaskJob(c, expireStr); err != nil {
return
}
// redis中不存在
if ok {
if err = s.taskDelJob(c); err != nil {
s.dao.DelTaskJob(c)
log.Error("taskDelJob,error(%v)", err)
return
}
return
}
// redis中已经存在
// 判断是否过期了
if oldExpireStr, err = s.dao.GetTaskJob(c); err != nil {
return
}
if oldExpire, err = time.Parse(time.RFC3339, oldExpireStr); err != nil {
return
}
if oldExpire.Sub(now) > 0 {
return
}
if oldExpireGetSetStr, err = s.dao.GetSetTaskJob(c, expireStr); err != nil {
return
}
if oldExpireGetSetStr != oldExpireStr {
return
}
if err = s.taskDelJob(c); err != nil {
s.dao.DelTaskJob(c)
log.Error("taskDelJob,error(%v)", err)
return
}
return
}
// TODO: operation_time && operation_rate
func (s *Service) taskDelJob(c context.Context) (err error) {
var (
task *model.TaskInfo
)
if task, err = s.dao.OneTask(c); err != nil || task == nil {
return
}
task.State = model.TaskStateDelDM
s.dao.UpdateTask(c, task)
var delCount int64
if delCount, task.LastIndex, task.State, err = s.taskDelDM(c, task); err != nil {
return
}
if task.State == model.TaskStateDelDM {
task.State = model.TaskStateSuc
}
if _, err = s.dao.UptSubTask(c, task.ID, delCount, time.Now()); err != nil {
return
}
_, err = s.dao.UpdateTask(c, task)
return
}
func (s *Service) taskDelDM(c context.Context, eTask *model.TaskInfo) (delCount int64, lastIndex, state int32, err error) {
taskDelNum := s.conf.TaskConf.DelNum
taskResFieldLen := s.conf.TaskConf.ResFieldLen
res, err := http.Get(eTask.Result)
if err != nil {
log.Error("s.taskDelDM.HttpGet(%s) error(%v)", eTask.Result, err)
return
}
resp, err := ioutil.ReadAll(res.Body)
if err != nil {
res.Body.Close()
log.Error("s.taskDelDM.ioutilRead error(%v)", err)
return
}
res.Body.Close()
lines := bytes.Split(resp, []byte("\n"))
total := len(lines)
n := (total-1)/taskDelNum + 1
for i := int(eTask.LastIndex); i < n; i++ {
var (
task *model.TaskInfo
subTask *model.SubTask
)
start := i * taskDelNum
end := (i + 1) * taskDelNum
if end > total {
end = total
}
OidDMid := make(map[int64][]int64)
for _, line := range lines[start:end] {
var dmid, oid int64
fields := bytes.Split(line, []byte("\001"))
if len(fields) < taskResFieldLen {
log.Error("fields lenth too small:%d", len(fields))
continue
}
if dmid, err = strconv.ParseInt(string(fields[0]), 10, 64); err != nil {
log.Error("ParseInt(%s) error(%v)", string(fields[0]), err)
continue
}
if oid, err = strconv.ParseInt(string(fields[1]), 10, 64); err != nil {
log.Error("ParseInt(%s) error(%v)", string(fields[1]), err)
continue
}
OidDMid[oid] = append(OidDMid[oid], dmid)
}
for oid, dmids := range OidDMid {
var affected int64
if affected, err = s.dao.DelDMs(c, oid, dmids, model.StateTaskDel); err != nil {
log.Error("dm task(id:%d) del dm(oid:%d,dmids:%v) error(%v)", eTask.ID, oid, dmids, err)
continue
}
if affected > 0 {
s.OpLog(c, oid, 0, time.Now().Unix(), int(model.SubTypeVideo), dmids, "status", "", strconv.FormatInt(int64(model.StateTaskDel), 10), "弹幕任务删除", oplog.SourceManager, oplog.OperatorSystem)
delCount += affected
if _, err = s.dao.UptSubjectCount(c, model.SubTypeVideo, oid, affected); err != nil {
log.Error("dm task update count(oid:%d,affected:%d) error(%v)", oid, affected, err)
}
}
time.Sleep(50 * time.Millisecond)
}
if len(OidDMid) > 0 {
log.Warn("dm task(id:%d) del dm(oid,dmids:%+v)", eTask.ID, OidDMid)
}
lastIndex = int32(i + 1)
task, err = s.dao.OneTask(c)
if err == nil && task != nil && task.ID != eTask.ID && task.Priority > eTask.Priority {
state = model.TaskStateWait
return
}
if eTask, err = s.dao.TaskInfoByID(c, eTask.ID); err != nil || task == nil {
continue
}
state = eTask.State
if state != model.TaskStateDelDM {
return
}
if subTask, err = s.dao.SubTask(c, eTask.ID); err != nil || subTask == nil {
continue
}
tCount := subTask.Tcount + delCount
if tCount >= s.conf.TaskConf.DelLimit && subTask.Tcount < s.conf.TaskConf.DelLimit {
log.Warn("task(id:%d) del dm reach limit(count:%d)", eTask.ID, tCount)
s.sendWechatWorkMsg(c, eTask, tCount)
state = model.TaskStatePause
return
}
}
return
}
func (s *Service) sendWechatWorkMsg(c context.Context, task *model.TaskInfo, count int64) (err error) {
content := fmt.Sprintf(model.TaskNoticeContent, task.ID, task.Title, count)
users := s.conf.TaskConf.MsgCC
users = append(users, task.Creator, task.Reviewer)
return s.dao.SendWechatWorkMsg(c, content, model.TaskNoticeTitle, users)
}
// OpLog put a new infoc format operation log into the channel
func (s *Service) OpLog(c context.Context, cid, operator, OperationTime int64, typ int, dmids []int64, subject, originVal, currentVal, remark string, source oplog.Source, operatorType oplog.OperatorType) (err error) {
infoLog := new(oplog.Infoc)
infoLog.Oid = cid
infoLog.Type = typ
infoLog.DMIds = dmids
infoLog.Subject = subject
infoLog.OriginVal = originVal
infoLog.CurrentVal = currentVal
infoLog.OperationTime = strconv.FormatInt(OperationTime, 10)
infoLog.Source = source
infoLog.OperatorType = operatorType
infoLog.Operator = operator
infoLog.Remark = remark
select {
case s.opsLogCh <- infoLog:
default:
err = fmt.Errorf("opsLogCh full")
log.Error("opsLogCh full (%v)", infoLog)
}
return
}
func (s *Service) oplogproc() {
for opLog := range s.opsLogCh {
if len(opLog.Subject) == 0 || len(opLog.CurrentVal) == 0 || opLog.Source <= 0 ||
opLog.Operator < 0 || opLog.OperatorType <= 0 {
log.Warn("oplogproc() it is an illegal log, warn(%v, %v, %v)", opLog.Subject, opLog.Subject, opLog.CurrentVal)
continue
} else {
for _, dmid := range opLog.DMIds {
if dmid > 0 {
s.dmOperationLogSvc.Info(opLog.Subject, strconv.FormatInt(opLog.Oid, 10), strconv.Itoa(opLog.Type),
strconv.FormatInt(dmid, 10), opLog.Source.String(), opLog.OriginVal,
opLog.CurrentVal, strconv.FormatInt(opLog.Operator, 10), opLog.OperatorType.String(),
opLog.OperationTime, opLog.Remark)
} else {
log.Warn("oplogproc() it is an illegal log, for dmid value, warn(%d, %+v)", dmid, opLog)
}
}
}
}
}

View File

@@ -0,0 +1,50 @@
package service
import (
"context"
"testing"
"go-common/app/job/main/dm2/model"
"github.com/smartystreets/goconvey/convey"
)
func TestServicetaskResProc(t *testing.T) {
convey.Convey("taskResProc", t, func(ctx convey.C) {
ctx.Convey("When everything goes positive", func(ctx convey.C) {
svr.taskResProc()
ctx.Convey("No return values", func(ctx convey.C) {
})
})
})
}
func TestServicetaskDelProc(t *testing.T) {
convey.Convey("taskDelProc", t, func(ctx convey.C) {
ctx.Convey("When everything goes positive", func(ctx convey.C) {
svr.taskDelProc()
ctx.Convey("No return values", func(ctx convey.C) {
})
})
})
}
func TestServicetaskDelDM(t *testing.T) {
convey.Convey("taskDelDM", t, func(ctx convey.C) {
var (
c = context.Background()
task = &model.TaskInfo{
Result: "http://berserker.bilibili.co/avenger/download/hdfs?path=/api/hive/query/148/672bc22888af701529e8b3052fd2c4a7/1543546463/1547966/result",
}
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
delCount, lastIndex, state, err := svr.taskDelDM(c, task)
ctx.Convey("Then err should be nil.delCount,pause should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(state, convey.ShouldNotBeNil)
ctx.So(delCount, convey.ShouldNotBeNil)
t.Log(delCount, lastIndex, state)
})
})
})
}

View File

@@ -0,0 +1,204 @@
package service
import (
"context"
"encoding/json"
"time"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
)
func (s *Service) trackSubject(c context.Context, m *model.BinlogMsg) (err error) {
nw := &model.Subject{}
if err = json.Unmarshal(m.New, &nw); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", m.New, err)
return
}
switch m.Action {
case "insert":
if err = s.dao.AddSubjectCache(c, nw); err != nil {
log.Error("s.dao.AddSubjectCache(%v) error(%v)", nw, err)
return
}
case "delete":
if err = s.dao.DelSubjectCache(c, nw.Type, nw.Oid); err != nil {
log.Error("s.dao.DelSubjectCahce(%v) error(%v)", nw, err)
return
}
case "update":
old := model.Subject{}
if err = json.Unmarshal(m.Old, &old); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", m.Old, err)
return
}
if err = s.dao.AddSubjectCache(c, nw); err != nil { // 全量缓存subject
log.Error("s.dao.AddSubjectCache(%v) error(%v)", nw, err)
return
}
if nw.Childpool != old.Childpool || nw.Maxlimit != old.Maxlimit || nw.State != old.State {
// 立刻刷新全段弹幕缓存
flush := &model.Flush{Oid: nw.Oid, Type: nw.Type, Force: true}
s.flushDmCache(c, flush)
// 立刻刷新分段弹幕缓存
s.flushXMLSegCache(c, nw)
}
}
return
}
func (s *Service) trackIndex(c context.Context, m *model.BinlogMsg) (err error) {
if m.Action != "update" {
return
}
dm := &model.DM{}
old := &model.DM{}
if err = json.Unmarshal(m.New, &dm); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", m.New, err)
return
}
if err = json.Unmarshal(m.Old, &old); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", m.Old, err)
return
}
s.asyncAddRecent(c, dm) // 更新up主最新1000条弹幕
s.asyncAddFlushDM(c, &model.Flush{
Type: dm.Type,
Oid: dm.Oid,
Force: true,
}) // 刷新全段弹幕
sub, err := s.subject(c, dm.Type, dm.Oid)
if err != nil {
return
}
p, err := s.pageinfo(c, sub.Pid, dm)
if err != nil {
return
}
if dm.NeedUpdateSpecial(old) {
if err = s.specialLocationUpdate(c, dm.Type, dm.Oid); err != nil {
return
}
}
s.dao.DelIdxContentCaches(c, dm.Type, dm.Oid, dm.ID) // 删除content cache
s.asyncAddFlushDMSeg(c, &model.FlushDMSeg{
Type: dm.Type,
Oid: dm.Oid,
Force: true,
Page: p,
})
return
}
func (s *Service) trackVideoup(c context.Context, aid int64) (err error) {
var (
retry = 5
tp = model.SubTypeVideo
videos []*model.Video
)
for i := 0; i < retry; i++ {
if videos, err = s.dao.Videos(c, aid); err == nil {
break
}
time.Sleep(time.Second)
}
if err != nil {
log.Error("track video failed,aid(%d),error(%v)", aid, err)
return
}
for _, v := range videos {
for i := 0; i < retry; i++ {
if err = s.syncVideo(c, tp, v); err == nil {
break
}
time.Sleep(time.Second)
}
}
return
}
func (s *Service) syncVideo(c context.Context, tp int32, v *model.Video) (err error) {
log.Info("sync video:%+v", v)
sub, err := s.dao.Subject(c, tp, v.Cid)
if err != nil {
return
}
if sub == nil {
if v.XCodeState >= model.VideoXcodeHDFinish {
// 生成弹幕蒙版
var attr int32
for _, mid := range s.maskMid {
if mid == v.Mid {
if err = s.dao.GenerateMask(c, v.Cid, mid, model.MaskPlatAll, model.MaskPriorityHgih, v.Aid, 0, 0); err != nil {
break
}
attr = attr | (model.AttrYes << model.AttrSubMaskOpen)
break
}
}
if _, err = s.dao.AddSubject(c, tp, v.Cid, v.Aid, v.Mid, s.maxlimit(v.Duration), attr); err != nil {
return
}
}
} else {
if sub.Mid != v.Mid {
if _, err = s.dao.UpdateSubMid(c, tp, v.Cid, v.Mid); err != nil {
return
}
if err = s.updateSubtilte(c, tp, v); err != nil {
log.Error("updateSubtilte(params:%+v),error(%v)", v, err)
return
}
}
}
return
}
func (s *Service) updateSubtilte(c context.Context, tp int32, v *model.Video) (err error) {
var (
subtitles []*model.Subtitle
subtitle *model.Subtitle
)
if subtitles, err = s.dao.GetSubtitles(c, tp, v.Cid); err != nil {
log.Error("updateSubtilte(params:%+v),error(%v)", v, err)
return
}
for _, subtitle = range subtitles {
subtitle.UpMid = v.Mid
if err = s.dao.UpdateSubtitle(c, subtitle); err != nil {
log.Error("updateSubtilte(params:%+v),error(%v)", v, err)
return
}
s.dao.DelSubtitleCache(c, v.Cid, subtitle.ID)
if subtitle.Status == model.SubtitleStatusDraft || subtitle.Status == model.SubtitleStatusToAudit {
s.dao.DelSubtitleDraftCache(c, v.Cid, tp, subtitle.Mid, subtitle.Lan)
}
}
s.dao.DelVideoSubtitleCache(c, v.Cid, tp)
return
}
func (s *Service) maxlimit(duration int64) (limit int64) {
switch {
case duration == 0:
limit = 1500
case duration > 3600:
limit = 8000
case duration > 2400:
limit = 6000
case duration > 900:
limit = 3000
case duration > 600:
limit = 1500
case duration > 150:
limit = 1000
case duration > 60:
limit = 500
case duration > 30:
limit = 300
case duration <= 30:
limit = 100
}
return
}

View File

@@ -0,0 +1,15 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestTrackVideoup(t *testing.T) {
Convey("", t, func() {
err := svr.trackVideoup(context.TODO(), 10114205)
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,157 @@
package service
import (
"context"
"time"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
)
func (s *Service) transferProc() {
var (
c = context.TODO()
interval = time.Duration(time.Second * 60)
)
for {
time.Sleep(interval)
if !s.dao.AddTransferLock(c) {
continue
}
trans, err := s.dao.Transfers(c, model.StatInit)
if err != nil || len(trans) == 0 {
continue
}
for _, t := range trans {
log.Info("dm transfer(%+v) start", t)
s.transfer(c, t)
}
}
}
// transfer transfer dm.
func (s *Service) transfer(c context.Context, t *model.Transfer) {
var (
err error
limit int64 = 500
startID = t.Dmid
tp = model.SubTypeVideo
)
t.State = model.StatTransfing
if _, err = s.dao.UpdateTransfer(c, t); err != nil {
log.Error("s.dao.UpdateTransfer(%+v) error(%v)", t, err)
return
}
if err = s.dao.DelTransferLock(c); err != nil {
log.Error("s.dao.DelTransferLock() error")
}
targetSub, err := s.dao.Subject(c, tp, t.ToCid)
if err != nil || targetSub == nil {
log.Error("s.dao.Subject(cid:%d) error(%v)", t.ToCid, err)
s.transerFailNow(c, t)
return
}
originSub, err := s.dao.Subject(c, tp, t.FromCid)
if err != nil || originSub == nil {
log.Error("s.dao.Subject(cid:%d) error(%v)", t.ToCid, err)
s.transerFailNow(c, t)
return
}
for {
// get transfer dm per page
var dms []*model.DM
if dms, err = s.transferDMS(c, tp, originSub.Oid, startID, limit); err != nil {
time.Sleep(1 * time.Second)
continue
}
if len(dms) == 0 {
break
}
for _, dm := range dms {
if dm.ID <= startID {
continue
} else {
startID = dm.ID
}
var id int64
if id, err = s.seqRPC.ID(c, s.seqArg); err != nil {
log.Error("seqRPC.ID() error(%v)", err)
time.Sleep(100 * time.Millisecond)
continue
}
if dm.Pool == model.PoolSpecial {
dm.ContentSpe.ID = id
}
dm.Oid = targetSub.Oid // 修改这个dm 的主键id和oid
dm.ID = id
dm.Content.ID = id
if t.Offset != 0 {
dm.Progress = dm.Progress + int32(t.Offset*1000)
}
if err = s.actionAddDM(c, targetSub, dm); err != nil {
continue
}
t.Dmid = startID //记录转移到的dmid
}
s.dao.UpdateTransfer(c, t)
time.Sleep(1 * time.Second)
}
t.State = model.StatFinished
if _, err = s.dao.UpdateTransfer(c, t); err != nil {
log.Error("s.dao.UpdateTransfer(%+v) error(%v)", t, err)
}
// 刷新弹幕缓存
s.flushDmCache(c, &model.Flush{Oid: t.ToCid, Type: tp, Force: true})
s.flushAllDmSegCache(c, t.ToCid, tp)
}
func (s *Service) transerFailNow(c context.Context, t *model.Transfer) {
t.State = model.StatFailed
if _, err := s.dao.UpdateTransfer(c, t); err != nil {
log.Error("s.dao.UpdateTransfer(%+v) error(%v)", t, err)
}
}
// NewCommentList get dm list from new db
func (s *Service) transferDMS(c context.Context, tp int32, oid, minID, limit int64) (dms []*model.DM, err error) {
contentSpec := make(map[int64]*model.ContentSpecial)
idxMap, dmids, special, err := s.dao.DMIndexs(c, tp, oid, minID, limit)
if err != nil {
log.Error("s.dao.DMIndexs(oid:%d mindID:%d) error(%v)", oid, minID, err)
return
}
if len(dmids) == 0 {
return
}
contents, err := s.dao.Contents(c, oid, dmids)
if err != nil {
log.Error("s.dao.Contents(oid:%d dmids:%v) error(%v)", oid, dmids, err)
return
}
if len(special) > 0 {
if contentSpec, err = s.dao.ContentsSpecial(c, special); err != nil {
log.Error("s.dao.ContentSpecials(oid:%d special:%v) error(%v)", oid, special, err)
return
}
}
for _, dmid := range dmids {
dm, ok := idxMap[dmid]
if !ok {
continue
}
content, ok := contents[dmid]
if !ok {
continue
}
dm.Content = content
if dm.Pool == model.PoolSpecial {
contentspe, ok := contentSpec[dm.ID]
if !ok {
continue
}
dm.ContentSpe = contentspe
}
dms = append(dms, dm)
}
return
}

View File

@@ -0,0 +1,33 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
"go-common/app/job/main/dm2/model"
)
func TestTransferDMS(t *testing.T) {
Convey("test NewCommentList", t, func() {
ll, err := svr.transferDMS(context.TODO(), 1, 1012, 0, 10)
So(err, ShouldBeNil)
So(ll, ShouldNotBeEmpty)
})
}
func TestTransfer(t *testing.T) {
trans := &model.Transfer{
ID: 265,
FromCid: 1012,
ToCid: 1211,
Mid: 0,
Dmid: 123,
Offset: 0,
State: 0,
}
Convey("transfer", t, func() {
svr.transfer(context.TODO(), trans)
})
}