Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

21
app/job/main/dm2/BUILD Normal file
View File

@@ -0,0 +1,21 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/dm2/cmd:all-srcs",
"//app/job/main/dm2/conf:all-srcs",
"//app/job/main/dm2/dao:all-srcs",
"//app/job/main/dm2/http:all-srcs",
"//app/job/main/dm2/model:all-srcs",
"//app/job/main/dm2/service:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,307 @@
### dm2-job 主站弹幕异步刷新服务。
#### V1.7.13
> 1. archive duration cache
#### V1.7.12
> 1. bnj danmu msg type
#### V1.7.11
> 1. remove cache.Cache
#### V1.7.10
> 1. bnj 随机数换成rand
#### V1.7.9
> 1. 批量查询数量可配置
#### V1.7.8
> 1. 去掉刷新新分段的逻辑
#### V1.7.7
> 1. 拜年祭配置通过直播的接口获取
#### V1.7.6
> 1. bnj 弹幕数据结构
#### V1.7.5
> 1. 去调sync.pool
#### V1.7.4
> 1. 投稿的蒙版只传递aid参数
#### V1.7.3
> 1. fix bug
#### V1.7.2
> 1. bnj dm cache
#### V1.7.1
> 1. 弹幕蒙版参数
#### V1.7.0
> 1. bfs 使用sdk
#### V1.6.7
> 1. bnj 弹幕 添加等级限制
> 2. bnj 弹幕颜色为白色
> 3. bnj 弹幕 先发后审
#### V1.6.6
> 1. rebuild
#### V1.6.5
> 1. fix http response close
#### V1.6.4
> 1. 弹幕任务接口迭代
#### V1.6.4
> 1. 弹幕子任务增加删除计数
#### V1.6.3
> 1. fix
#### V1.6.2
> 1. dm mask add mid
#### V1.6.1
> 1. 弹幕大型任务
#### V1.6.0
> 1. 拜年祭弹幕直播转点播
#### V1.5.15
> 1. 拜年祭弹幕计数
#### V1.5.14
> 1. 增加web蒙版
#### V1.5.13
> 1. 弹幕转移 刷新分段弹幕缓存
#### V1.5.12
> 1. 弹幕缓存异步刷新
#### V1.5.11
> 1. fix 分段缓存的bug
#### V1.5.10
> 1. 注释调新缓存的代码
#### V1.5.9
> 1. fix return
#### V1.5.8
> 1. 敏感词字数限制
#### V1.5.7
> 1. 添加全量弹幕的刷新日志
#### V1.5.6
> 1. 增加弹幕蒙版灰度up主名单
#### V1.5.5
> 1. xml分段弹幕刷新
#### V1.5.4
> 1. 类目生成的蒙版优先级降低
#### V1.5.3
> 1. 去掉debug
#### V1.5.2
> 1. dm transfer content
#### V1.5.1
> 1. 去掉debug日志
#### V1.5.0
> 1. 新增分段弹幕刷新逻辑
#### V1.4.19
> 1. 取消maskjob 验证已生成的逻辑
#### V1.4.18
> 1. 添加二级分区热门视频开启弹幕蒙版
#### V1.4.17
> 1. 字幕缓存更换key
#### V1.4.16
> 1. 字幕memcache 配置文件
#### V1.4.15
> 1. 增加字幕的job
#### V1.4.14
> 1. 增加生成蒙版请求日志
#### V1.4.13
> 1. rebuild master
#### V1.4.12
> 1. 增加指定mid新投稿生成弹幕蒙版
#### V1.4.11
> 1. 修复最新弹幕属性、弹幕池变更时不更新的bug
#### V1.4.10
> 1. 修复最新弹幕属性、弹幕池变更时不更新的bug
#### V1.4.9
> 1. 优化创作中心最新弹幕逻辑
#### V1.4.8
> 1. ajax缓存更新
#### V1.4.7
> 1. 删除老库的代码
#### V1.4.6
> 1. 稿件状态类型int8-->int32
#### V1.4.5
> 1. 修复未转码完成视频maxlimit问题
#### V1.4.4
> 1. 优化bytes.Buffer使用
#### V1.4.3
> 1. 新增稿件定时开放浏览消息
#### V1.4.2
> 1. 修复字幕弹幕不显示的bug
#### V1.4.1
> 1. 修复弹幕转移存在的bug
#### V1.4.0
> 1. 移除dm_index表的写入
> 2. 优化弹幕转移逻辑
> 3. 弹幕转移对接发号器
#### V1.3.3
> 1. 移除同步稿件逻辑中的无限重试策略
#### V1.3.2
> 1. 修复view接口数据格式不对导致的panic
#### V1.3.1
> 1. 修复稿件msg log格式
#### V1.3.0
> 1. 同步稿件过审消息,写入弹幕主题表
#### V1.2.21
> 1. 弹幕计数迁移到job
#### V1.2.20
> 1. subject 缓存穿透
#### V1.2.19
> 1. 更新最新弹幕
#### V1.2.18
> 1. update redis score in sortset
#### V1.2.17
> 1. update redis score in sortset
#### V1.2.16
> 1. 迁移blade master
#### V1.2.15
> 1. 重新构建databus sdk
#### V1.2.14
> 1. 弹幕隐藏增加详细日志
#### V1.2.13
> 1. 增加隐藏弹幕databus24h后隐藏弹幕恢复正常
#### V1.2.12
> 1. dm_subject新增字段
#### V1.2.11
> 1. 迁移至main目录
#### V1.2.10
> 1. 移除Stat-T的弹幕计数写入
#### V1.2.9
> 1. 移除无用的account rpc client
> 2. 暂停StatDM-T的弹幕计数写入
#### V1.2.8
> 1. 替换realname-->real_name
#### V1.2.7
> 1. 替换xml非法字符
#### V1.2.6
> 1. 弹幕转移代码优化
#### V1.2.5
> 1. 迁移弹幕转移到弹幕dm2-job
#### V1.2.4
> 1. 弹幕发送计数迁移到dm2-job
#### V1.2.3
> 1. 增加禁止发送弹幕功能
> 2. xml header中增加state字段
#### V1.2.2
> 1. 迁移最新弹幕以及弹幕缓存增量更新逻辑
#### V1.2.1
> 1. 弹幕计数双写databus topic
#### V1.2.0
> 1. 新增弹幕发送落库双写逻辑
#### V1.1.2
> 1. 弹幕列表实名制
> 2. 更改弹幕实名制白名单
#### V1.1.1
> 1. 移除childpool 3相应逻辑
#### V1.1.0
> 1. 更改弹幕列表v1回源逻辑
> 2. 增加最新弹幕消费协程数
#### V1.0.10
> 1. Page2 to Page3
#### V1.0.9
> 1. 每次聚合操作后强制刷新缓存
#### V1.0.8
> 1. 修复不同弹幕池弹幕缓存不同时过期的问题
#### V1.0.7
> 1. 新增up主最新弹幕刷新逻辑
#### V1.0.6
> 1. 新增一波单元测试
#### V1.0.5
> 1. 新增异步通知刷新逻辑
#### V1.0.4
> 1. 移除并发锁逻辑
> 2. 聚合delete刷新操作
#### V1.0.3
> 1. redis缓存采用pb结构
#### V1.0.2
> 1. 新增弹幕列表刷新逻辑
#### V1.0.1
> 1. 新增稿件弹幕数刷新功能
#### V1.0.0
> 1. 初始版本

View File

@@ -0,0 +1,10 @@
# Owner
liangkai
renwei
# Author
liangkai
# Reviewer
liangkai
renwei

16
app/job/main/dm2/OWNERS Normal file
View File

@@ -0,0 +1,16 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- liangkai
- renwei
labels:
- job
- job/main/dm2
- main
options:
no_parent_owners: true
reviewers:
- guhao
- liangkai
- renwei
- renyashun

View File

@@ -0,0 +1,5 @@
### dm2-job
##### 项目说明
1. 主站弹幕列表刷新服务
2. 稿件弹幕数刷新服务

View File

@@ -0,0 +1,43 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "cmd",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
data = ["dm2-job.toml"],
importpath = "go-common/app/job/main/dm2/cmd",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/dm2/conf:go_default_library",
"//app/job/main/dm2/http:go_default_library",
"//app/job/main/dm2/service:go_default_library",
"//library/log:go_default_library",
"//library/net/trace:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,263 @@
# This is a TOML document. Boom.
version = "1.0.0"
user = "nobody"
pid = "/tmp/dm2-job.pid"
dir = "./"
checkFile = "/data/www/dm2-job.html"
[infoc2]
taskID = "000279"
proto = "tcp"
addr = "172.18.33.125:15140"
chanSize = 10240
[db]
[db.dmwriter]
addr = "172.16.33.205:3310"
dsn = "test_3308:test_3308@tcp(172.16.33.205:3310)/bilibili_dm_meta?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8mb4"
active = 10
idle = 5
idleTimeout ="4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[db.dmwriter.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.dmreader]
addr = "172.16.33.205:3310"
dsn = "test_3308:test_3308@tcp(172.16.33.205:3310)/bilibili_dm_meta?timeout=5s&readTimeout=5s&writeTimeout=5s&parseTime=true&loc=Local&charset=utf8mb4"
active = 10
idle = 5
idleTimeout ="4h"
queryTimeout = "500ms"
execTimeout = "500ms"
tranTimeout = "500ms"
[db.dmreader.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[db.biliDMWriter]
addr = "172.16.33.205:3308"
dsn = "test:test@tcp(172.16.33.205:3308)/bilibili_dm?timeout=5s&readTimeout=5s&writeTimeout=15s&parseTime=true&loc=Local&charset=utf8"
active = 5
idle = 5
idleTimeout ="4h"
queryTimeout = "3s"
execTimeout = "3s"
tranTimeout = "3s"
[db.biliDMWriter.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[redis]
[redis.dm]
name = "dm2-job"
proto = "tcp"
addr = "127.0.0.1:6379"
active = 10
idle = 5
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "10s"
expire = "24h"
[redis.dmRct]
name = "dm2-job"
proto = "tcp"
addr = "172.16.33.54:6379"
active = 10
idle = 5
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "10s"
expire = "720h"
[memcache]
name = "dm2-job"
proto = "tcp"
addr = "172.16.33.54:11211"
idle = 5
active = 10
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "80s"
expire = "24h"
[subtitlememcache]
name = "dm2-job"
proto = "tcp"
addr = "172.16.33.54:11211"
idle = 5
active = 10
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "80s"
expire = "24h"
[archiveRPC]
group = "test"
timeout = "1s"
[databus]
[databus.indexCsmr]
key = "0Pub71KvEMKXu63qtztq"
secret = "0Pub71KvEMKXu63qtztr"
group = "DMMeta-UGC-S"
topic = "DMMeta-T"
action = "sub"
name = "dm2-job"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "40s"
writeTimeout = "1s"
idleTimeout = "60s"
[databus.subjectCsmr]
key = "0Pub71KvEMKXu63qtztq"
secret = "0Pub71KvEMKXu63qtztr"
group = "DMSubject-UGC-S"
topic = "DMSubject-T"
action = "sub"
name = "dm2-job"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "40s"
writeTimeout = "1s"
idleTimeout = "60s"
[databus.actionCsmr]
key = "0Pub71KvEMKXu63qtztq"
secret = "0Pub71KvEMKXu63qtztr"
group = "DMAction-UGC-S"
topic = "DMAction-T"
action = "sub"
name = "dm2-job"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "40s"
writeTimeout = "1s"
idleTimeout = "60s"
[databus.reportCsmr]
key = "170e302355453683"
secret = "51b66df3ebeca2b08f0017f350b6f0ce"
group = "DMReport-MainCommunity-S"
topic = "DMReport-T"
action = "sub"
name = "dm2-job"
proto = "tcp"
addr = "172.16.33.158:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "40s"
writeTimeout = "1s"
idleTimeout = "60s"
[databus.videoupCsmr]
key = "170e302355453683"
secret = "3d0e8db7bed0503949e545a469789279"
group = "Videoup2Bvc-MainCommunity-S"
topic = "Videoup2Bvc"
action = "sub"
name = "dm2-job"
proto = "tcp"
addr = "172.18.33.50:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "40s"
writeTimeout = "1s"
idleTimeout = "60s"
[databus.subtitleAuditCsmr]
key = "170e302355453683"
secret = "3d0e8db7bed0503949e545a469789279"
group = "SubtitleCheck-MainCommunity-S"
topic = "SubtitleCheck-T"
action = "sub"
name = "dm2-job"
proto = "tcp"
addr = "172.16.38.154:6205"
idle = 1
active = 1
dialTimeout = "1s"
readTimeout = "40s"
writeTimeout = "1s"
idleTimeout = "60s"
[httpClient]
key = "f6433799dbd88751"
secret = "36f8ddb1806207fe07013ab6a77a3935"
dial = "1s"
timeout = "2s"
keepAlive = "60s"
timer = 1000
[httpClient.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[bfsClient]
key = "f6433799dbd88751"
secret = "36f8ddb1806207fe07013ab6a77a3935"
dial = "1s"
timeout = "2s"
keepAlive = "60s"
timer = 1000
[bfsClient.breaker]
window = "3s"
sleep = "100ms"
bucket = 10
ratio = 0.5
request = 100
[host]
videoup = "http://uat-archive.api.bilibili.co"
mask = "http://172.22.33.111:8200"
datarank = "http://data-test.bilibili.co"
merakHost="http://merak.bilibili.co"
[realname]
"1,2" = 2
[seqRPC]
policy = "sharding"
timeout = "1s"
[seq]
businessID = 10
token = "Nf9phmDdzjTMW9M5V8YQuLpVTwhvn5IO"
[maskCate]
tids=[185]
interval = "10m"
[taskConf]
delInterval = "1m"
resInterval = "10m"
ResFieldLen = 7
DelNum = 100
DelLimit = 20
msgCC = ["guhao","fengduzhen"]
MsgPublicKey= "71f079db59672ecec5b8d6f252c4b59ab2a8a227mainsite@bilibili.com"

View File

@@ -0,0 +1,51 @@
package main
import (
"flag"
"os"
"os/signal"
"syscall"
"time"
"go-common/app/job/main/dm2/conf"
"go-common/app/job/main/dm2/http"
"go-common/app/job/main/dm2/service"
"go-common/library/log"
"go-common/library/net/trace"
)
func main() {
flag.Parse()
if err := conf.Init(); err != nil {
log.Error("conf.Init() error(%v)", err)
panic(err)
}
// init log
log.Init(conf.Conf.Xlog)
trace.Init(conf.Conf.Tracer)
defer trace.Close()
defer func() {
log.Close()
// wait for a while to guarantee that all log messages are written
time.Sleep(50 * time.Millisecond)
}()
// service init
svc := service.New(conf.Conf)
http.Init(conf.Conf, svc)
// init signal
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
for {
s := <-c
log.Info("dm2-job get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGSTOP, syscall.SIGINT:
log.Info("dm2-job exit")
return
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}

View File

@@ -0,0 +1,44 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["conf.go"],
importpath = "go-common/app/job/main/dm2/conf",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/cache/memcache:go_default_library",
"//library/cache/redis:go_default_library",
"//library/conf:go_default_library",
"//library/database/bfs:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/rpc:go_default_library",
"//library/net/rpc/warden:go_default_library",
"//library/net/trace:go_default_library",
"//library/queue/databus:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/BurntSushi/toml:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,230 @@
package conf
import (
"errors"
"flag"
"go-common/library/cache/memcache"
"go-common/library/cache/redis"
"go-common/library/conf"
"go-common/library/database/bfs"
"go-common/library/database/sql"
"go-common/library/log"
"go-common/library/log/infoc"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/rpc"
"go-common/library/net/rpc/warden"
"go-common/library/net/trace"
"go-common/library/queue/databus"
"go-common/library/time"
"github.com/BurntSushi/toml"
)
var (
confPath string
client *conf.Client
// Conf export config var
Conf = &Config{}
)
// Config danmaku config
type Config struct {
// base
// log
Xlog *log.Config
Infoc2 *infoc.Config
// tracer
Tracer *trace.Config
// http
HTTPServer *bm.ServerConfig
// database
DB *DB
// redis
Redis *Redis
// memcache
Memcache *Memcache
// Subtitle Cache
SubtitleMemcache *Memcache
DMMemcache *Memcache
// archive rpc client
ArchiveRPC *rpc.ClientConfig
// seq-server rpc client
SeqRPC *rpc.ClientConfig
Seq *Seq
// databus config
Databus *Databus
// dm list realname
Realname map[string]int64
HTTPClient *bm.ClientConfig
Host *Host
BFSClient *bm.ClientConfig
// client
FliterRPC *warden.ClientConfig
// MaskCate
MaskCate *MaskCate
// Bfs
Bfs *Bfs
// cache routine size
RoutineSize int
// bnj
BNJ *BNJ
// task config
TaskConf *TaskConf
}
// BNJ .
type BNJ struct {
Aid int64
BnjCounter *BnjCounter
BnjLiveDanmu *BnjLiveDanmu
}
// BnjCounter .
type BnjCounter struct {
SubAids []int64
}
// BnjLiveDanmu .
type BnjLiveDanmu struct {
RoomID int64
Start string
IgnoreRate int64
Level int32
IgnoreBegin time.Duration
IgnoreEnd time.Duration
}
// BNJVideo .
type BNJVideo struct {
Cid int64
Duration float64
}
// Bfs .
type Bfs struct {
Client *bfs.Config
Dm string
}
//Seq Conf
type Seq struct {
BusinessID int64
Token string
}
// DB mysql config struct
type DB struct {
DMReader *sql.Config
DMWriter *sql.Config
BiliDMWriter *sql.Config
QueryPageSize int32
}
// Redis dm redis
type Redis struct {
DM *struct {
*redis.Config
Expire time.Duration
}
DMRct *struct {
*redis.Config
Expire time.Duration
}
DMSeg *struct {
*redis.Config
Expire time.Duration
}
}
// Memcache dm memcache
type Memcache struct {
*memcache.Config
Expire time.Duration
}
// Databus databus config
type Databus struct {
IndexCsmr *databus.Config
SubjectCsmr *databus.Config
ActionCsmr *databus.Config
ReportCsmr *databus.Config
VideoupCsmr *databus.Config
SubtitleAuditCsmr *databus.Config
BnjCsmr *databus.Config
}
// Host hosts used in dm admin
type Host struct {
Videoup string
Mask string
DataRank string
MerakHost string
APILive string
}
// MaskCate .
type MaskCate struct {
Tids []int64
Interval time.Duration
Limit int
}
// TaskConf .
type TaskConf struct {
DelInterval time.Duration
ResInterval time.Duration
ResFieldLen int
DelNum int
DelLimit int64
MsgCC []string
MsgPublicKey string
}
func init() {
flag.StringVar(&confPath, "conf", "", "config path")
}
//Init int config
func Init() error {
if confPath != "" {
return local()
}
return remote()
}
func local() (err error) {
_, err = toml.DecodeFile(confPath, &Conf)
return
}
func remote() (err error) {
if client, err = conf.New(); err != nil {
return
}
if err = load(); err != nil {
return
}
go func() {
for range client.Event() {
log.Info("config reload")
if load() != nil {
log.Error("config reload error (%v)", err)
}
}
}()
return
}
func load() (err error) {
var tmpConf *Config
value, ok := client.Toml2()
if !ok {
return errors.New("load config center error")
}
if _, err = toml.Decode(value, &tmpConf); err != nil {
return errors.New("could not decode config")
}
*Conf = *tmpConf
return
}

View File

@@ -0,0 +1,95 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"bfs_test.go",
"dao_test.go",
"mask_test.go",
"mc_special_test.go",
"mc_subtitle_test.go",
"memcache_seg_test.go",
"memcache_test.go",
"mysql_dm_special_test.go",
"mysql_test.go",
"rank_list_test.go",
"redis_mask_test.go",
"redis_rct_test.go",
"redis_seg_test.go",
"redis_task_test.go",
"redis_test.go",
"task_test.go",
"transfer_test.go",
"videoup_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/dm2/conf:go_default_library",
"//app/job/main/dm2/model:go_default_library",
"//library/log:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"bfs.go",
"bnj.go",
"dao.go",
"mask.go",
"mc_special.go",
"mc_subtitle.go",
"memcache.go",
"memcache_seg.go",
"mysql.go",
"mysql_dm_special.go",
"rank_list.go",
"redis.go",
"redis_mask.go",
"redis_rct.go",
"redis_seg.go",
"redis_task.go",
"task.go",
"transfer.go",
"videoup.go",
],
importpath = "go-common/app/job/main/dm2/dao",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/dm2/conf:go_default_library",
"//app/job/main/dm2/model:go_default_library",
"//library/cache/memcache:go_default_library",
"//library/cache/redis:go_default_library",
"//library/database/bfs:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/metadata:go_default_library",
"//library/sync/errgroup:go_default_library",
"//library/xstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,39 @@
package dao
import (
"context"
"net/http"
"go-common/library/database/bfs"
"go-common/library/log"
)
// BfsData get bfs data
func (d *Dao) BfsData(c context.Context, bfsURL string) (bs []byte, err error) {
var (
req *http.Request
)
if req, err = http.NewRequest(http.MethodGet, bfsURL, nil); err != nil {
log.Error("NewRequest(bfsURL:%v),error(%v)", bfsURL, err)
return
}
if bs, err = d.httpCli.Raw(c, req); err != nil {
log.Error("Raw(bfsURL:%v),error(%v)", bfsURL, err)
return
}
return
}
// BfsDmUpload .
func (d *Dao) BfsDmUpload(c context.Context, fileName string, bs []byte) (location string, err error) {
if location, err = d.bfsCli.Upload(c, &bfs.Request{
Bucket: d.conf.Bfs.Dm,
Filename: fileName,
ContentType: "application/json",
File: bs,
}); err != nil {
log.Error("bfs.BfsDmUpload error(%v)", err)
return
}
return
}

View File

@@ -0,0 +1,33 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoBfsData(t *testing.T) {
convey.Convey("BfsData", t, func(ctx convey.C) {
var (
c = context.Background()
bfsURL = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.BfsData(c, bfsURL)
})
})
}
func TestDaoBfsDmUpload(t *testing.T) {
convey.Convey("BfsDmUpload", t, func(ctx convey.C) {
var (
c = context.Background()
fileName = ""
bs = []byte("231231231231")
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.BfsDmUpload(c, fileName, bs)
})
})
}

View File

@@ -0,0 +1,34 @@
package dao
import (
"context"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
"go-common/library/net/metadata"
)
const (
_bnjLiveConfig = "/activity/v0/bainian/config"
)
func (d *Dao) bnjConfigURI() string {
return d.conf.Host.APILive + _bnjLiveConfig
}
// BnjConfig .
func (d *Dao) BnjConfig(c context.Context) (bnjConfig *model.BnjLiveConfig, err error) {
var (
res struct {
Code int64 `json:"code"`
Message string `json:"message"`
Data *model.BnjLiveConfig `json:"data"`
}
)
if err = d.httpCli.Get(c, d.bnjConfigURI(), metadata.String(c, metadata.RemoteIP), nil, &res); err != nil {
log.Error("bnjLiveConfig BnjConfig(url:%v) error(%v)", d.bnjConfigURI(), err)
return
}
bnjConfig = res.Data
return
}

138
app/job/main/dm2/dao/dao.go Normal file
View File

@@ -0,0 +1,138 @@
package dao
import (
"context"
"time"
"go-common/app/job/main/dm2/conf"
"go-common/library/cache/memcache"
"go-common/library/cache/redis"
"go-common/library/database/bfs"
"go-common/library/database/sql"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
)
const (
_pageSize = 1000
)
// Dao dao struct
type Dao struct {
conf *conf.Config
// batch query size
pageSize int
// database
dmWriter *sql.DB
dmReader *sql.DB
biliDMWriter *sql.DB
// redis
dmRds *redis.Pool
dmRdsExpire int32
// recent dm redis
dmRctRds *redis.Pool
dmRctExpire int32
// segment dm redis
dmSegRds *redis.Pool
dmSegExpire int32
// memcache
mc *memcache.Pool
mcExpire int32
subtitleMc *memcache.Pool
subtitleMcExpire int32
// memcache new
dmSegMC *memcache.Pool
dmSegMCExpire int32
// recent dm redis
rctRds *redis.Pool
rctRdsExpire int32
// http client
httpCli *bm.Client
// upload dm
bfsCli *bfs.BFS
}
// New new a dao.
func New(c *conf.Config) (d *Dao) {
d = &Dao{
conf: c,
dmWriter: sql.NewMySQL(c.DB.DMWriter),
dmReader: sql.NewMySQL(c.DB.DMReader),
biliDMWriter: sql.NewMySQL(c.DB.BiliDMWriter),
dmRds: redis.NewPool(c.Redis.DM.Config),
dmRdsExpire: int32(time.Duration(c.Redis.DM.Expire) / time.Second),
dmRctRds: redis.NewPool(c.Redis.DMRct.Config),
dmRctExpire: int32(time.Duration(c.Redis.DMRct.Expire) / time.Second),
dmSegRds: redis.NewPool(c.Redis.DMSeg.Config),
dmSegExpire: int32(time.Duration(c.Redis.DMSeg.Expire) / time.Second),
mc: memcache.NewPool(c.Memcache.Config),
mcExpire: int32(time.Duration(c.Memcache.Expire) / time.Second),
subtitleMc: memcache.NewPool(c.SubtitleMemcache.Config),
subtitleMcExpire: int32(time.Duration(c.SubtitleMemcache.Expire) / time.Second),
dmSegMC: memcache.NewPool(c.DMMemcache.Config),
dmSegMCExpire: int32(time.Duration(c.DMMemcache.Expire) / time.Second),
rctRds: redis.NewPool(c.Redis.DMRct.Config),
rctRdsExpire: int32(time.Duration(c.Redis.DMRct.Expire) / time.Second),
httpCli: bm.NewClient(c.HTTPClient),
bfsCli: bfs.New(c.Bfs.Client),
pageSize: int(c.DB.QueryPageSize),
}
if d.pageSize <= 0 {
d.pageSize = _pageSize
}
return
}
// BeginTran begin mysql transaction
func (d *Dao) BeginTran(c context.Context) (*sql.Tx, error) {
return d.dmWriter.Begin(c)
}
// BeginBiliDMTran .
func (d *Dao) BeginBiliDMTran(c context.Context) (*sql.Tx, error) {
return d.biliDMWriter.Begin(c)
}
// Ping dm dao ping.
func (d *Dao) Ping(c context.Context) (err error) {
if err = d.dmWriter.Ping(c); err != nil {
log.Error("dmWriter.Ping() error(%v)", err)
return
}
if err = d.dmReader.Ping(c); err != nil {
log.Error("dmReader.Ping() error(%v)", err)
return
}
if err = d.biliDMWriter.Ping(c); err != nil {
log.Error("biliDMWriter.Ping() error(%v)", err)
return
}
// mc
mconn := d.mc.Get(c)
defer mconn.Close()
if err = mconn.Set(&memcache.Item{Key: "ping", Value: []byte("pong"), Expiration: 0}); err != nil {
log.Error("mc.Set error(%v)", err)
return
}
// dm redis
dmRdsConn := d.dmRds.Get(c)
defer dmRdsConn.Close()
if _, err = dmRdsConn.Do("SET", "ping", "pong"); err != nil {
log.Error("dmRds.Set error(%v)", err)
return
}
rctRdsConn := d.dmRctRds.Get(c)
defer rctRdsConn.Close()
if _, err = rctRdsConn.Do("SET", "ping", "pong"); err != nil {
log.Error("rctRds.Set error(%v)", err)
return
}
dmSegConn := d.dmSegRds.Get(c)
defer dmSegConn.Close()
if _, err = dmSegConn.Do("SET", "ping", "pong"); err != nil {
log.Error("dmSegConn.Set error(%v)", err)
return
}
return
}

View File

@@ -0,0 +1,32 @@
package dao
import (
"flag"
"os"
"testing"
"go-common/app/job/main/dm2/conf"
"go-common/library/log"
)
var testDao *Dao
func TestMain(m *testing.M) {
flag.Set("app_id", "main.community.dm2-job")
flag.Set("conf_token", "m7xxj8RU7YxRK0fmRocmD9SoGUGzsZSA")
flag.Set("tree_id", "5391")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
log.Init(conf.Conf.Xlog)
defer log.Close()
testDao = New(conf.Conf)
os.Exit(m.Run())
}

View File

@@ -0,0 +1,61 @@
package dao
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"go-common/library/log"
)
const _mask = "/dl/api/masks/v1"
func (d *Dao) maskURI() string {
return d.conf.Host.Mask + _mask
}
// GenerateMask ask AI to generate dm mask
func (d *Dao) GenerateMask(c context.Context, cid, mid int64, plat int8, priority int32, aid int64, duration int64, typeID int32) (err error) {
var (
res struct {
Code int64 `json:"errcode"`
Message string `json:"errmsg"`
}
params = make(map[string]interface{})
)
params["cid"] = cid
params["mask_platform"] = plat
params["priority"] = priority
params["mid"] = mid
params["aid"] = aid
params["duration"] = duration
params["region_2"] = typeID
data, err := json.Marshal(params)
if err != nil {
log.Error("json.Marshal(%v) error(%v)", params, err)
return
}
reader := bytes.NewReader(data)
req, err := http.NewRequest("POST", d.maskURI(), reader)
if err != nil {
log.Error("http.NewRequest error(%v)", err)
return
}
req.Header.Set("Content-Type", "application/json")
for i := 0; i < 3; i++ {
if err = d.httpCli.Do(c, req, &res); err != nil {
log.Error("d.httpCli.DO(%v) error(%v)", req, err)
continue
}
if res.Code != 200 {
err = fmt.Errorf("uri:%s,code:%d", d.maskURI(), res.Code)
log.Error("http code error(%v)", err)
continue
}
log.Info("send genarate mask request succeed(cid:%d)", cid)
break
}
return
}

View File

@@ -0,0 +1,34 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaomaskURI(t *testing.T) {
convey.Convey("maskURI", t, func(ctx convey.C) {
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.maskURI()
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoGenerateMask(t *testing.T) {
convey.Convey("GenerateMask", t, func(ctx convey.C) {
var (
c = context.Background()
cid = int64(0)
mid = int64(0)
plat = int8(0)
priority = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.GenerateMask(c, cid, mid, plat, priority, 0, 0, 0)
})
})
}

View File

@@ -0,0 +1,52 @@
package dao
import (
"context"
"fmt"
"go-common/app/job/main/dm2/model"
"go-common/library/cache/memcache"
"go-common/library/log"
)
const (
_fmtSpecialDm = "s_special_%d_%d"
)
func (d *Dao) specialDmKey(oid int64, tp int32) string {
return fmt.Sprintf(_fmtSpecialDm, oid, tp)
}
// DelSpecialDmCache .
func (d *Dao) DelSpecialDmCache(c context.Context, oid int64, tp int32) (err error) {
var (
key = d.specialDmKey(oid, tp)
conn = d.dmSegMC.Get(c)
)
defer conn.Close()
if err = conn.Delete(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("memcache.Delete(%s) error(%v)", key, err)
}
}
return
}
// AddSpecialDmCache add special content to memcache.
func (d *Dao) AddSpecialDmCache(c context.Context, ds *model.DmSpecial) (err error) {
conn := d.dmSegMC.Get(c)
key := d.specialDmKey(ds.Oid, ds.Type)
defer conn.Close()
item := &memcache.Item{
Key: key,
Object: ds,
Flags: memcache.FlagJSON,
Expiration: d.dmSegMCExpire,
}
if err = conn.Set(item); err != nil {
log.Error("conn.Set(%s) error(%v)", key, err)
}
return
}

View File

@@ -0,0 +1,52 @@
package dao
import (
"context"
"go-common/app/job/main/dm2/model"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaospecialDmKey(t *testing.T) {
convey.Convey("specialDmKey", t, func(ctx convey.C) {
var (
oid = int64(0)
tp = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.specialDmKey(oid, tp)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoDelSpecialDmCache(t *testing.T) {
convey.Convey("DelSpecialDmCache", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
tp = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.DelSpecialDmCache(c, oid, tp)
})
})
}
func TestDaoAddSpecialDmCache(t *testing.T) {
convey.Convey("AddSpecialDmCache", t, func(ctx convey.C) {
var (
c = context.Background()
ds = &model.DmSpecial{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := testDao.AddSpecialDmCache(c, ds)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}

View File

@@ -0,0 +1,78 @@
package dao
import (
"context"
"fmt"
"go-common/library/cache/memcache"
"go-common/library/log"
)
const (
_fmtSubtitle = "s_subtitle_%d_%d"
_fmtVideoSubtitle = "s_video_%d_%d"
_fmtSubtitleDraft = "s_draft_%v_%v_%v_%v"
)
func (d *Dao) subtitleKey(oid int64, subtitleID int64) string {
return fmt.Sprintf(_fmtSubtitle, oid, subtitleID)
}
func (d *Dao) subtitleVideoKey(oid int64, tp int32) string {
return fmt.Sprintf(_fmtVideoSubtitle, oid, tp)
}
func (d *Dao) subtitleDraftKey(oid int64, tp int32, mid int64, lan uint8) string {
return fmt.Sprintf(_fmtSubtitleDraft, oid, tp, mid, lan)
}
// DelVideoSubtitleCache .
func (d *Dao) DelVideoSubtitleCache(c context.Context, oid int64, tp int32) (err error) {
var (
key = d.subtitleVideoKey(oid, tp)
conn = d.subtitleMc.Get(c)
)
defer conn.Close()
if err = conn.Delete(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("memcache.Delete(%s) error(%v)", key, err)
}
}
return
}
// DelSubtitleDraftCache .
func (d *Dao) DelSubtitleDraftCache(c context.Context, oid int64, tp int32, mid int64, lan uint8) (err error) {
var (
key = d.subtitleDraftKey(oid, tp, mid, lan)
conn = d.subtitleMc.Get(c)
)
defer conn.Close()
if err = conn.Delete(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("memcache.Delete(%s) error(%v)", key, err)
}
}
return
}
// DelSubtitleCache .
func (d *Dao) DelSubtitleCache(c context.Context, oid int64, subtitleID int64) (err error) {
var (
key = d.subtitleKey(oid, subtitleID)
conn = d.subtitleMc.Get(c)
)
defer conn.Close()
if err = conn.Delete(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("memcache.Delete(%s) error(%v)", key, err)
}
}
return
}

View File

@@ -0,0 +1,96 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaosubtitleKey(t *testing.T) {
convey.Convey("subtitleKey", t, func(ctx convey.C) {
var (
oid = int64(0)
subtitleID = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.subtitleKey(oid, subtitleID)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaosubtitleVideoKey(t *testing.T) {
convey.Convey("subtitleVideoKey", t, func(ctx convey.C) {
var (
oid = int64(0)
tp = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.subtitleVideoKey(oid, tp)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaosubtitleDraftKey(t *testing.T) {
convey.Convey("subtitleDraftKey", t, func(ctx convey.C) {
var (
oid = int64(0)
tp = int32(0)
mid = int64(0)
lan = uint8(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.subtitleDraftKey(oid, tp, mid, lan)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoDelVideoSubtitleCache(t *testing.T) {
convey.Convey("DelVideoSubtitleCache", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
tp = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.DelVideoSubtitleCache(c, oid, tp)
})
})
}
func TestDaoDelSubtitleDraftCache(t *testing.T) {
convey.Convey("DelSubtitleDraftCache", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
tp = int32(0)
mid = int64(0)
lan = uint8(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.DelSubtitleDraftCache(c, oid, tp, mid, lan)
})
})
}
func TestDaoDelSubtitleCache(t *testing.T) {
convey.Convey("DelSubtitleCache", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
subtitleID = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.DelSubtitleCache(c, oid, subtitleID)
})
})
}

View File

@@ -0,0 +1,280 @@
package dao
import (
"context"
"fmt"
"strconv"
"go-common/app/job/main/dm2/model"
"go-common/library/cache/memcache"
"go-common/library/log"
)
const (
_prefixXML = "dm_xml_"
_prefixSub = "s_"
_prefixAjax = "dm_ajax_"
_keyDuration = "d_" // video duration
)
func keyXML(oid int64) string {
return _prefixXML + strconv.FormatInt(oid, 10)
}
func keySubject(tp int32, oid int64) string {
return _prefixSub + fmt.Sprintf("%d_%d", tp, oid)
}
func keyAjax(oid int64) string {
return _prefixAjax + strconv.FormatInt(oid, 10)
}
// keyDuration return video duration key.
func keyDuration(oid int64) string {
return _keyDuration + strconv.FormatInt(oid, 10)
}
func keyTransferLock() string {
return "dm_transfer_lock"
}
// DelXMLCache delete xml content.
func (d *Dao) DelXMLCache(c context.Context, oid int64) (err error) {
conn := d.mc.Get(c)
key := keyXML(oid)
if err = conn.Delete(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("conn.Delete(%s) error(%v)", key, err)
}
}
conn.Close()
return
}
// AddXMLCache add xml content to memcache.
func (d *Dao) AddXMLCache(c context.Context, oid int64, value []byte) (err error) {
conn := d.mc.Get(c)
defer conn.Close()
item := &memcache.Item{
Key: keyXML(oid),
Value: value,
Expiration: d.mcExpire,
}
if err = conn.Set(item); err != nil {
log.Error("conn.Set(%s) error(%v)", keyXML(oid), err)
}
return
}
// XMLCache get xml content.
func (d *Dao) XMLCache(c context.Context, oid int64) (data []byte, err error) {
key := keyXML(oid)
conn := d.mc.Get(c)
defer conn.Close()
item, err := conn.Get(key)
if err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("mc.Get(%s) error(%v)", key, err)
}
return
}
data = item.Value
return
}
// SubjectCache get subject from memcache.
func (d *Dao) SubjectCache(c context.Context, tp int32, oid int64) (sub *model.Subject, err error) {
var (
conn = d.mc.Get(c)
key = keySubject(tp, oid)
rp *memcache.Item
)
defer conn.Close()
if rp, err = conn.Get(key); err != nil {
if err == memcache.ErrNotFound {
sub = nil
err = nil
} else {
log.Error("mc.Get(%s) error(%v)", key, err)
}
return
}
sub = &model.Subject{}
if err = conn.Scan(rp, &sub); err != nil {
log.Error("mc.Scan(%d) error(%v)", oid, err)
}
return
}
// SubjectsCache multi get subject from memcache.
func (d *Dao) SubjectsCache(c context.Context, tp int32, oids []int64) (cached map[int64]*model.Subject, missed []int64, err error) {
var (
conn = d.mc.Get(c)
keys []string
oidMap = make(map[string]int64, len(oids))
)
cached = make(map[int64]*model.Subject, len(oids))
defer conn.Close()
for _, oid := range oids {
k := keySubject(tp, oid)
if _, ok := oidMap[k]; !ok {
keys = append(keys, k)
oidMap[k] = oid
}
}
rs, err := conn.GetMulti(keys)
if err != nil {
log.Error("conn.GetMulti(%v) error(%v)", keys, err)
return
}
for k, r := range rs {
sub := &model.Subject{}
if err = conn.Scan(r, sub); err != nil {
log.Error("conn.Scan(%s) error(%v)", r.Value, err)
err = nil
continue
}
cached[oidMap[k]] = sub
// delete hit key
delete(oidMap, k)
}
// missed key
missed = make([]int64, 0, len(oidMap))
for _, oid := range oidMap {
missed = append(missed, oid)
}
return
}
// AddSubjectCache add subject cache.
func (d *Dao) AddSubjectCache(c context.Context, sub *model.Subject) (err error) {
var (
conn = d.mc.Get(c)
key = keySubject(sub.Type, sub.Oid)
)
defer conn.Close()
item := &memcache.Item{
Key: key,
Object: sub,
Flags: memcache.FlagJSON,
Expiration: d.mcExpire,
}
if err = conn.Set(item); err != nil {
log.Error("conn.Set(%v) error(%v)", item, err)
}
return
}
// DelSubjectCache delete subject memcache cache.
func (d *Dao) DelSubjectCache(c context.Context, tp int32, oid int64) (err error) {
conn := d.mc.Get(c)
key := keySubject(tp, oid)
if err = conn.Delete(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("conn.Delete(%s) error(%v)", key, err)
}
}
conn.Close()
return
}
// AddTransferLock 添加弹幕转移并发锁
func (d *Dao) AddTransferLock(c context.Context) (succeed bool) {
var (
key = keyTransferLock()
conn = d.mc.Get(c)
)
defer conn.Close()
item := &memcache.Item{
Key: key,
Value: []byte("0"),
Expiration: 60,
}
if err := conn.Add(item); err != nil {
if err != memcache.ErrNotStored {
log.Error("conn.Add(%s) error(%v)", key, err)
}
} else {
succeed = true
}
return
}
// DelTransferLock 删除弹幕转移并发锁
func (d *Dao) DelTransferLock(c context.Context) (err error) {
var (
key = keyTransferLock()
conn = d.mc.Get(c)
)
if err = conn.Delete(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("conn.Delete(%s) error(%v)", key, err)
}
}
conn.Close()
return
}
// DelAjaxDMCache delete ajax dm from memcache.
func (d *Dao) DelAjaxDMCache(c context.Context, oid int64) (err error) {
conn := d.mc.Get(c)
defer conn.Close()
key := keyAjax(oid)
if err = conn.Delete(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("DelAjaxDMCache.conn.Delete(%s) error(%v)", key, err)
}
}
return
}
// DurationCache return duration of video.
func (d *Dao) DurationCache(c context.Context, oid int64) (duration int64, err error) {
var (
key = keyDuration(oid)
conn = d.mc.Get(c)
item *memcache.Item
)
defer conn.Close()
if item, err = conn.Get(key); err != nil {
if err == memcache.ErrNotFound {
duration = model.NotFound
err = nil
} else {
log.Error("conn.Get(%s) error(%v)", key, err)
}
return
}
if duration, err = strconv.ParseInt(string(item.Value), 10, 64); err != nil {
log.Error("strconv.ParseInt(%s) error(%v)", item.Value, err)
}
return
}
// SetDurationCache set video duration to redis.
func (d *Dao) SetDurationCache(c context.Context, oid, duration int64) (err error) {
key := keyDuration(oid)
conn := d.mc.Get(c)
item := memcache.Item{
Key: key,
Value: []byte(fmt.Sprint(duration)),
Expiration: d.mcExpire,
Flags: memcache.FlagRAW,
}
if err = conn.Set(&item); err != nil {
log.Error("mc.Set(%v) error(%v)", item, err)
}
conn.Close()
return
}

View File

@@ -0,0 +1,95 @@
package dao
import (
"context"
"fmt"
"go-common/app/job/main/dm2/model"
"go-common/library/cache/memcache"
"go-common/library/log"
)
const (
_keySegMC = "sg_%d_%d_%d_%d"
)
func keySegMC(tp int32, oid, total, num int64) string {
return fmt.Sprintf(_keySegMC, tp, oid, total, num)
}
func keyXMLSeg(tp int32, oid, cnt, num int64) string {
return fmt.Sprintf("%d_%d_%d_%d", tp, oid, cnt, num)
}
// DelXMLSegCache delete segment xml content.
func (d *Dao) DelXMLSegCache(c context.Context, tp int32, oid, cnt, num int64) (err error) {
conn := d.mc.Get(c)
key := keyXMLSeg(tp, oid, cnt, num)
if err = conn.Delete(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
log.Error("conn.Delete(%s) error(%v)", key, err)
}
}
conn.Close()
return
}
// SetXMLSegCache set dm xml content into memcache.
func (d *Dao) SetXMLSegCache(c context.Context, tp int32, oid, cnt, num int64, value []byte) (err error) {
key := keyXMLSeg(tp, oid, cnt, num)
conn := d.mc.Get(c)
item := memcache.Item{
Key: key,
Value: value,
Expiration: d.mcExpire,
Flags: memcache.FlagRAW,
}
if err = conn.Set(&item); err != nil {
log.Error("mc.Set(%v) error(%v)", item, err)
}
conn.Close()
return
}
// SetDMSegCache set segment dm to cache.
func (d *Dao) SetDMSegCache(c context.Context, tp int32, oid, total, num int64, dmSeg *model.DMSeg) (err error) {
key := keySegMC(tp, oid, total, num)
conn := d.dmSegMC.Get(c)
item := memcache.Item{
Key: key,
Object: dmSeg,
Expiration: d.mcExpire,
Flags: memcache.FlagProtobuf | memcache.FlagGzip,
}
if err = conn.Set(&item); err != nil {
log.Error("conn.Set(%v) error(%v)", item, err)
}
conn.Close()
return
}
// DMSegCache dm segment pb cache.
func (d *Dao) DMSegCache(c context.Context, tp int32, oid, total, num int64) (dmSeg *model.DMSeg, err error) {
var (
key = keySegMC(tp, oid, total, num)
conn = d.dmSegMC.Get(c)
item *memcache.Item
)
dmSeg = new(model.DMSeg)
defer conn.Close()
if item, err = conn.Get(key); err != nil {
if err == memcache.ErrNotFound {
err = nil
dmSeg = nil
} else {
log.Error("mc.Get(%s) error(%v)", key, err)
}
return
}
if err = conn.Scan(item, dmSeg); err != nil {
log.Error("conn.Scan() error(%v)", err)
}
return
}

View File

@@ -0,0 +1,35 @@
package dao
import (
"testing"
"go-common/app/job/main/dm2/model"
. "github.com/smartystreets/goconvey/convey"
)
func TestDelXMLSegCache(t *testing.T) {
Convey("check delete segment xml cache,error should be nil", t, func() {
err := testDao.DelXMLSegCache(c, 1, 1221, 1, 1)
So(err, ShouldBeNil)
})
}
func TestSetDMSegCache(t *testing.T) {
Convey("set dm segment cache, error should be nil", t, func() {
dmseg := new(model.DMSeg)
dmseg.Elems = append(dmseg.Elems, &model.Elem{Content: "dm msg"})
err := testDao.SetDMSegCache(c, 1, 1221, 1, 1, dmseg)
So(err, ShouldBeNil)
})
}
func TestDMSegCache(t *testing.T) {
Convey("get dm segment cache", t, func() {
dmseg, err := testDao.DMSegCache(c, 1, 1221, 1, 1)
if err != nil {
t.Fatal(err)
}
t.Logf("%+v", dmseg)
})
}

View File

@@ -0,0 +1,194 @@
package dao
import (
"context"
"go-common/app/job/main/dm2/model"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaokeyXML(t *testing.T) {
convey.Convey("keyXML", t, func(ctx convey.C) {
var (
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := keyXML(oid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaokeySubject(t *testing.T) {
convey.Convey("keySubject", t, func(ctx convey.C) {
var (
tp = int32(0)
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := keySubject(tp, oid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaokeyAjax(t *testing.T) {
convey.Convey("keyAjax", t, func(ctx convey.C) {
var (
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := keyAjax(oid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaokeyTransferLock(t *testing.T) {
convey.Convey("keyTransferLock", t, func(ctx convey.C) {
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := keyTransferLock()
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoDelXMLCache(t *testing.T) {
convey.Convey("DelXMLCache", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.DelXMLCache(c, oid)
})
})
}
func TestDaoAddXMLCache(t *testing.T) {
convey.Convey("AddXMLCache", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
value = []byte("")
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := testDao.AddXMLCache(c, oid, value)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoXMLCache(t *testing.T) {
convey.Convey("XMLCache", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.XMLCache(c, oid)
})
})
}
func TestDaoSubjectCache(t *testing.T) {
convey.Convey("SubjectCache", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.SubjectCache(c, tp, oid)
})
})
}
func TestDaoSubjectsCache(t *testing.T) {
convey.Convey("SubjectsCache", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oids = []int64{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.SubjectsCache(c, tp, oids)
})
})
}
func TestDaoAddSubjectCache(t *testing.T) {
convey.Convey("AddSubjectCache", t, func(ctx convey.C) {
var (
c = context.Background()
sub = &model.Subject{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := testDao.AddSubjectCache(c, sub)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoDelSubjectCache(t *testing.T) {
convey.Convey("DelSubjectCache", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.DelSubjectCache(c, tp, oid)
})
})
}
func TestDaoAddTransferLock(t *testing.T) {
convey.Convey("AddTransferLock", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
succeed := testDao.AddTransferLock(c)
ctx.Convey("Then succeed should not be nil.", func(ctx convey.C) {
ctx.So(succeed, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoDelTransferLock(t *testing.T) {
convey.Convey("DelTransferLock", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.DelTransferLock(c)
})
})
}
func TestDaoDelAjaxDMCache(t *testing.T) {
convey.Convey("DelAjaxDMCache", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.DelAjaxDMCache(c, oid)
})
})
}

View File

@@ -0,0 +1,502 @@
package dao
import (
"context"
"fmt"
"sync"
"go-common/app/job/main/dm2/model"
"go-common/library/database/sql"
"go-common/library/log"
"go-common/library/sync/errgroup"
"go-common/library/xstr"
)
const (
_subtitleSharding = 100
_subjectSharding = 100
_indexSharding = 1000
_contentSharding = 1000
_addSubjectSQL = "INSERT INTO dm_subject_%02d(type,oid,pid,mid,maxlimit,attr) VALUES(?,?,?,?,?,?)"
_updateChildpoolSQL = "UPDATE dm_subject_%02d SET childpool=? WHERE type=? AND oid=?"
_updateSubMidSQL = "UPDATE dm_subject_%02d SET mid=? WHERE type=? AND oid=?"
_updateSubAttrSQL = "UPDATE dm_subject_%02d SET attr=? WHERE type=? AND oid=?"
_incrSubMCountSQL = "UPDATE dm_subject_%02d SET mcount=mcount+1 WHERE type=? AND oid=?"
_incrSubCountSQL = "UPDATE dm_subject_%02d SET acount=acount+?,count=count+?,childpool=? WHERE type=? AND oid=?"
_getSubjectSQL = "SELECT id,type,oid,pid,mid,state,attr,acount,count,mcount,move_count,maxlimit,childpool,ctime,mtime FROM dm_subject_%02d WHERE type=? AND oid=?"
_addIndexSQL = "INSERT INTO dm_index_%03d(id,type,oid,mid,progress,state,pool,attr,ctime) VALUES(?,?,?,?,?,?,?,?,?)"
_getIndexSQL = "SELECT id,type,oid,mid,progress,state,pool,attr,ctime,mtime FROM dm_index_%03d WHERE type=? AND oid=? AND state IN(0,6)"
_idxSegIDSQL = "SELECT id FROM dm_index_%03d WHERE type=? AND oid=? AND progress>=? AND progress<? AND state IN(0,6) AND pool = ? limit ?"
_idxSegSQL = "SELECT id,type,oid,mid,progress,state,pool,attr,ctime,mtime FROM dm_index_%03d WHERE type=? AND oid=? AND state IN(0,6) AND progress>=? AND progress<? AND pool=? limit ?"
_idxIDSQL = "SELECT id FROM dm_index_%03d WHERE type=? AND oid=? AND state IN(0,6) AND pool=?"
_idxsByidSQL = "SELECT id,type,oid,mid,progress,state,pool,attr,ctime,mtime FROM dm_index_%03d WHERE id IN(%s)"
_idxsByPoolSQL = "SELECT id,type,oid,mid,progress,state,pool,attr,ctime,mtime FROM dm_index_%03d WHERE type=? AND oid=? AND state IN(0,6) AND pool=?"
_addContentSQL = "REPLACE INTO dm_content_%03d(dmid,fontsize,color,mode,ip,plat,msg,ctime) VALUES(?,?,?,?,?,?,?,?)"
_getContentsSQL = "SELECT dmid,fontsize,color,mode,ip,plat,msg,ctime,mtime FROM dm_content_%03d WHERE dmid IN(%s)"
_getContentSQL = "SELECT dmid,fontsize,color,mode,ip,plat,msg,ctime,mtime FROM dm_content_%03d WHERE dmid=?"
_addContentSpeSQL = "REPLACE INTO dm_special_content(dmid,msg,ctime) VALUES(?,?,?)"
_getContentSpeSQL = "SELECT dmid,msg,ctime,mtime FROM dm_special_content WHERE dmid=?"
_getContentsSpeSQL = "SELECT dmid,msg,ctime,mtime FROM dm_special_content WHERE dmid IN(%s)"
//delete dm hide state
_delDMHideState = "UPDATE dm_index_%03d SET state=? WHERE oid=? AND id=? AND state=?"
// update subtitle upmid
_getSubtitle = "SELECT id,oid,type,lan,status,mid,up_mid,subtitle_url,pub_time,reject_comment from subtitle_%02d WHERE id=?"
_getSubtitles = "SELECT id,oid,type,lan,status,mid,up_mid,subtitle_url,pub_time,reject_comment from subtitle_%02d WHERE oid=? AND type=?"
_updateSubtitle = "UPDATE subtitle_%02d SET up_mid=?,status=?,pub_time=?,reject_comment=? WHERE id=?"
_addSubtitlePub = "INSERT INTO subtitle_pub(oid,type,lan,subtitle_id,is_delete) VALUES(?,?,?,?,?) ON DUPLICATE KEY UPDATE subtitle_id=?,is_delete=?"
// get mask mid
_getMaskMids = "SELECT mid from dm_mask_up where state=1"
)
func (d *Dao) hitSubject(oid int64) int64 {
return oid % _subjectSharding
}
func (d *Dao) hitIndex(oid int64) int64 {
return oid % _indexSharding
}
func (d *Dao) hitContent(oid int64) int64 {
return oid % _contentSharding
}
func (d *Dao) hitSubtile(oid int64) int64 {
return oid % _subtitleSharding
}
// AddSubject insert subject.
func (d *Dao) AddSubject(c context.Context, tp int32, oid, pid, mid, maxlimit int64, attr int32) (lastID int64, err error) {
res, err := d.dmWriter.Exec(c, fmt.Sprintf(_addSubjectSQL, d.hitSubject(oid)), tp, oid, pid, mid, maxlimit, attr)
if err != nil {
log.Error("dmWriter.Exec(%d,%d,%d,%d,%d,%d) error(%v)", tp, oid, pid, mid, maxlimit, attr, err)
return
}
lastID, err = res.LastInsertId()
return
}
// UpdateSubAttr .
func (d *Dao) UpdateSubAttr(c context.Context, tp int32, oid int64, attr int32) (affect int64, err error) {
res, err := d.dmWriter.Exec(c, fmt.Sprintf(_updateSubAttrSQL, d.hitSubject(oid)), attr, tp, oid)
if err != nil {
log.Error("dmWriter.Exec(%s,%d,%d) error(%v)", _updateSubMidSQL, oid, attr, err)
return
}
affect, err = res.RowsAffected()
return
}
// UpdateSubMid update mid in dm_subject.
func (d *Dao) UpdateSubMid(c context.Context, tp int32, oid, mid int64) (affect int64, err error) {
res, err := d.dmWriter.Exec(c, fmt.Sprintf(_updateSubMidSQL, d.hitSubject(oid)), mid, tp, oid)
if err != nil {
log.Error("dmWriter.Exec(%s,%d,%d) error(%v)", _updateSubMidSQL, oid, mid, err)
return
}
affect, err = res.RowsAffected()
return
}
// Subject get subject info from db.
func (d *Dao) Subject(c context.Context, tp int32, oid int64) (s *model.Subject, err error) {
s = &model.Subject{}
row := d.dmReader.QueryRow(c, fmt.Sprintf(_getSubjectSQL, d.hitSubject(oid)), tp, oid)
if err = row.Scan(&s.ID, &s.Type, &s.Oid, &s.Pid, &s.Mid, &s.State, &s.Attr, &s.ACount, &s.Count, &s.MCount, &s.MoveCnt, &s.Maxlimit, &s.Childpool, &s.Ctime, &s.Mtime); err != nil {
if err == sql.ErrNoRows {
s = nil
err = nil
} else {
log.Error("row.Scan() error(%v)", err)
}
}
return
}
// UpdateChildpool update childpool.
func (d *Dao) UpdateChildpool(c context.Context, tp int32, oid int64, childpool int32) (affect int64, err error) {
res, err := d.dmWriter.Exec(c, fmt.Sprintf(_updateChildpoolSQL, d.hitSubject(oid)), childpool, tp, oid)
if err != nil {
log.Error("dmWriter.Exec(%s %d) error(%v)", _updateChildpoolSQL, oid, err)
return
}
affect, err = res.RowsAffected()
return
}
// TxIncrSubjectCount update acount,count,childpool of dm by transcation.
func (d *Dao) TxIncrSubjectCount(tx *sql.Tx, tp int32, oid, acount, count int64, childpool int32) (affect int64, err error) {
res, err := tx.Exec(fmt.Sprintf(_incrSubCountSQL, d.hitSubject(oid)), acount, count, childpool, tp, oid)
if err != nil {
log.Error("tx.Exec error(%v)", err)
return
}
return res.RowsAffected()
}
// TxAddIndex add index of dm by transcation.
func (d *Dao) TxAddIndex(tx *sql.Tx, m *model.DM) (id int64, err error) {
res, err := tx.Exec(fmt.Sprintf(_addIndexSQL, d.hitIndex(m.Oid)), m.ID, m.Type, m.Oid, m.Mid, m.Progress, m.State, m.Pool, m.Attr, m.Ctime)
if err != nil {
log.Error("tx.Exec error(%v)", err)
return
}
return res.LastInsertId()
}
// Indexs get dm index by type and oid.
func (d *Dao) Indexs(c context.Context, tp int32, oid int64) (idxMap map[int64]*model.DM, dmids, special []int64, err error) {
rows, err := d.dmReader.Query(c, fmt.Sprintf(_getIndexSQL, d.hitIndex(oid)), tp, oid)
if err != nil {
log.Error("dmReader.Query(%d,%d) error(%v)", tp, oid, err)
return
}
defer rows.Close()
idxMap = make(map[int64]*model.DM)
for rows.Next() {
idx := &model.DM{}
if err = rows.Scan(&idx.ID, &idx.Type, &idx.Oid, &idx.Mid, &idx.Progress, &idx.State, &idx.Pool, &idx.Attr, &idx.Ctime, &idx.Mtime); err != nil {
log.Error("row.Scan() error(%v)", err)
return
}
idxMap[idx.ID] = idx
dmids = append(dmids, idx.ID)
if idx.Pool == model.PoolSpecial {
special = append(special, idx.ID)
}
}
err = rows.Err()
return
}
// IndexsSeg get segment index info from db by ps and pe.
func (d *Dao) IndexsSeg(c context.Context, tp int32, oid, ps, pe, limit int64, pool int32) (res []*model.DM, dmids []int64, err error) {
rows, err := d.dmReader.Query(c, fmt.Sprintf(_idxSegSQL, d.hitIndex(oid)), tp, oid, ps, pe, pool, limit)
if err != nil {
log.Error("db.Query(%d %d %d %d) error(%v)", tp, oid, ps, pe, err)
return
}
defer rows.Close()
for rows.Next() {
dm := &model.DM{}
if err = rows.Scan(&dm.ID, &dm.Type, &dm.Oid, &dm.Mid, &dm.Progress, &dm.State, &dm.Pool, &dm.Attr, &dm.Ctime, &dm.Mtime); err != nil {
log.Error("row.Scan() error(%v)", err)
return
}
res = append(res, dm)
dmids = append(dmids, dm.ID)
}
return
}
// IndexsSegID get segment dmids.
func (d *Dao) IndexsSegID(c context.Context, tp int32, oid, ps, pe, limit int64, pool int32) (dmids []int64, err error) {
rows, err := d.dmReader.Query(c, fmt.Sprintf(_idxSegIDSQL, d.hitIndex(oid)), tp, oid, ps, pe, pool, limit)
if err != nil {
log.Error("db.Query() error(%v)", err)
return
}
defer rows.Close()
var dmid int64
for rows.Next() {
if err = rows.Scan(&dmid); err != nil {
log.Error("rows.Scan() error(%v)", err)
return
}
dmids = append(dmids, dmid)
}
if err = rows.Err(); err != nil {
log.Error("rows.Err() error(%v)", err)
}
return
}
// IndexsID get dmids.
func (d *Dao) IndexsID(c context.Context, tp int32, oid int64, pool int32) (dmids []int64, err error) {
rows, err := d.dmReader.Query(c, fmt.Sprintf(_idxIDSQL, d.hitIndex(oid)), tp, oid, pool)
if err != nil {
log.Error("db.Query() error(%v)", err)
return
}
defer rows.Close()
var dmid int64
for rows.Next() {
if err = rows.Scan(&dmid); err != nil {
log.Error("rows.Scan() error(%v)", err)
return
}
dmids = append(dmids, dmid)
}
if err = rows.Err(); err != nil {
log.Error("rows.Err() error(%v)", err)
}
return
}
// IndexsByid get dm index by dmids.
func (d *Dao) IndexsByid(c context.Context, tp int32, oid int64, dmids []int64) (idxMap map[int64]*model.DM, special []int64, err error) {
query := fmt.Sprintf(_idxsByidSQL, d.hitIndex(oid), xstr.JoinInts(dmids))
rows, err := d.dmReader.Query(c, query)
if err != nil {
log.Error("db.Query(%s) error(%v)", query, err)
return
}
defer rows.Close()
idxMap = make(map[int64]*model.DM)
for rows.Next() {
idx := new(model.DM)
if err = rows.Scan(&idx.ID, &idx.Type, &idx.Oid, &idx.Mid, &idx.Progress, &idx.State, &idx.Pool, &idx.Attr, &idx.Ctime, &idx.Mtime); err != nil {
log.Error("row.Scan() error(%v)", err)
return
}
idxMap[idx.ID] = idx
if idx.Pool == model.PoolSpecial {
special = append(special, idx.ID)
}
}
if err = rows.Err(); err != nil {
log.Error("rows.Err() error(%v)", err)
}
return
}
// IndexsByPool get dm index by type,oid and pool.
func (d *Dao) IndexsByPool(c context.Context, tp int32, oid int64, pool int32) (dms []*model.DM, dmids []int64, err error) {
rows, err := d.dmReader.Query(c, fmt.Sprintf(_idxsByPoolSQL, d.hitIndex(oid)), tp, oid, pool)
if err != nil {
log.Error("dmReader.Query(tp:%v,oid:%v) error(%v)", tp, oid, err)
return
}
defer rows.Close()
dms = make([]*model.DM, 0, 100)
for rows.Next() {
dm := &model.DM{}
if err = rows.Scan(&dm.ID, &dm.Type, &dm.Oid, &dm.Mid, &dm.Progress, &dm.State, &dm.Pool, &dm.Attr, &dm.Ctime, &dm.Mtime); err != nil {
log.Error("row.Scan() error(%v)", err)
return
}
dms = append(dms, dm)
dmids = append(dmids, dm.ID)
}
if err = rows.Err(); err != nil {
log.Error("rows.Err() error(%v)", err)
}
return
}
// TxAddContent add content of dm by transcation.
func (d *Dao) TxAddContent(tx *sql.Tx, oid int64, m *model.Content) (id int64, err error) {
res, err := tx.Exec(fmt.Sprintf(_addContentSQL, d.hitContent(oid)), m.ID, m.FontSize, m.Color, m.Mode, m.IP, m.Plat, m.Msg, m.Ctime)
if err != nil {
log.Error("tx.Exec error(%v)", err)
return
}
return res.LastInsertId()
}
// TxAddContentSpecial add special dm by transcation.
func (d *Dao) TxAddContentSpecial(tx *sql.Tx, m *model.ContentSpecial) (id int64, err error) {
res, err := tx.Exec(_addContentSpeSQL, m.ID, m.Msg, m.Ctime)
if err != nil {
log.Error("tx.Exec error(%v)", err)
return
}
return res.LastInsertId()
}
// Content dm content by dmid
func (d *Dao) Content(c context.Context, oid, dmid int64) (ct *model.Content, err error) {
ct = &model.Content{}
row := d.dmReader.QueryRow(c, fmt.Sprintf(_getContentSQL, d.hitContent(oid)), dmid)
if err = row.Scan(&ct.ID, &ct.FontSize, &ct.Color, &ct.Mode, &ct.IP, &ct.Plat, &ct.Msg, &ct.Ctime, &ct.Mtime); err != nil {
ct = nil
log.Error("row.Scan() error(%v)", err)
}
return
}
// Contents multi get dm content by dmids.
func (d *Dao) Contents(c context.Context, oid int64, dmids []int64) (ctsMap map[int64]*model.Content, err error) {
var (
wg errgroup.Group
lock sync.Mutex
)
ctsMap = make(map[int64]*model.Content)
pageNum := len(dmids) / d.pageSize
if len(dmids)%d.pageSize > 0 {
pageNum = pageNum + 1
}
for i := 0; i < pageNum; i++ {
start := i * d.pageSize
end := (i + 1) * d.pageSize
if end > len(dmids) {
end = len(dmids)
}
wg.Go(func() (err error) {
rows, err := d.dmReader.Query(c, fmt.Sprintf(_getContentsSQL, d.hitContent(oid), xstr.JoinInts(dmids[start:end])))
if err != nil {
log.Error("db.Query(%s) error(%v)", fmt.Sprintf(_getContentsSQL, d.hitContent(oid), xstr.JoinInts(dmids)), err)
return
}
defer rows.Close()
for rows.Next() {
ct := &model.Content{}
if err = rows.Scan(&ct.ID, &ct.FontSize, &ct.Color, &ct.Mode, &ct.IP, &ct.Plat, &ct.Msg, &ct.Ctime, &ct.Mtime); err != nil {
log.Error("rows.Scan() error(%v)", err)
return
}
lock.Lock()
ctsMap[ct.ID] = ct
lock.Unlock()
}
err = rows.Err()
return
})
}
if err = wg.Wait(); err != nil {
log.Error("wg.Wait() error(%v)", err)
}
return
}
// ContentsSpecial multi get special dm content by dmids.
func (d *Dao) ContentsSpecial(c context.Context, dmids []int64) (res map[int64]*model.ContentSpecial, err error) {
res = make(map[int64]*model.ContentSpecial, len(dmids))
rows, err := d.dmReader.Query(c, fmt.Sprintf(_getContentsSpeSQL, xstr.JoinInts(dmids)))
if err != nil {
log.Error("db.Query() error(%v)", err)
return
}
defer rows.Close()
for rows.Next() {
content := &model.ContentSpecial{}
if err = rows.Scan(&content.ID, &content.Msg, &content.Ctime, &content.Mtime); err != nil {
log.Error("rows.Scan() error(%v)", err)
return
}
res[content.ID] = content
}
return
}
// ContentSpecial get special dm content by dmids.
func (d *Dao) ContentSpecial(c context.Context, dmid int64) (contentSpe *model.ContentSpecial, err error) {
contentSpe = &model.ContentSpecial{}
row := d.dmReader.QueryRow(c, _getContentSpeSQL, dmid)
if err = row.Scan(&contentSpe.ID, &contentSpe.Msg, &contentSpe.Ctime, &contentSpe.Mtime); err != nil {
log.Error("rows.Scan() error(%v)", err)
}
return
}
// DelDMHideState del dm hide state
func (d *Dao) DelDMHideState(c context.Context, tp int32, oid int64, dmid int64) (affect int64, err error) {
res, err := d.dmWriter.Exec(c, fmt.Sprintf(_delDMHideState, d.hitIndex(oid)), model.StateNormal, oid, dmid, model.StateHide)
if err != nil {
log.Error("dmWriter.Exec(%s %d dmid=%d) error(%v)", _delDMHideState, oid, dmid, err)
return
}
return res.RowsAffected()
}
// TxIncrSubMCount update monitor dm count.
func (d *Dao) TxIncrSubMCount(tx *sql.Tx, tp int32, oid int64) (affect int64, err error) {
res, err := tx.Exec(fmt.Sprintf(_incrSubMCountSQL, d.hitSubject(oid)), tp, oid)
if err != nil {
log.Error("tx.Exec(%s,%d,%d) error(%v)", _incrSubMCountSQL, tp, oid, err)
return
}
return res.RowsAffected()
}
// UpdateSubtitle update subtitle mid
func (d *Dao) UpdateSubtitle(c context.Context, subtitle *model.Subtitle) (err error) {
if _, err = d.biliDMWriter.Exec(c, fmt.Sprintf(_updateSubtitle, d.hitSubtile(subtitle.Oid)), subtitle.UpMid, subtitle.Status,
subtitle.PubTime, subtitle.RejectComment, subtitle.ID); err != nil {
log.Error("biliDMWriter.Exec(query:%v,subtitle:%+v) error(%v)", _updateSubtitle, subtitle, err)
return
}
return
}
// GetSubtitles .
func (d *Dao) GetSubtitles(c context.Context, tp int32, oid int64) (subtitles []*model.Subtitle, err error) {
rows, err := d.biliDMWriter.Query(c, fmt.Sprintf(_getSubtitles, d.hitSubtile(oid)), oid, tp)
if err != nil {
log.Error("biliDMWriter.Query(%s,%d,%d) error(%v)", _getSubtitles, oid, tp, err)
return
}
defer rows.Close()
for rows.Next() {
var subtitle = &model.Subtitle{}
if err = rows.Scan(&subtitle.ID, &subtitle.Oid, &subtitle.Type, &subtitle.Lan, &subtitle.Status, &subtitle.Mid, &subtitle.UpMid,
&subtitle.SubtitleURL, &subtitle.PubTime, &subtitle.RejectComment); err != nil {
log.Error("biliDMWriter.Scan(%s,%d,%d) error(%v)", _getSubtitles, oid, tp, err)
return
}
subtitles = append(subtitles, subtitle)
}
if err = rows.Err(); err != nil {
log.Error("biliDMWriter.rows.Err()(%s,%d,%d) error(%v)", _getSubtitles, oid, tp, err)
return
}
return
}
// GetSubtitle .
func (d *Dao) GetSubtitle(c context.Context, oid int64, subtitleID int64) (subtitle *model.Subtitle, err error) {
subtitle = &model.Subtitle{}
row := d.biliDMWriter.QueryRow(c, fmt.Sprintf(_getSubtitle, d.hitSubtile(oid)), subtitleID)
if err = row.Scan(&subtitle.ID, &subtitle.Oid, &subtitle.Type, &subtitle.Lan, &subtitle.Status, &subtitle.Mid, &subtitle.UpMid,
&subtitle.SubtitleURL, &subtitle.PubTime, &subtitle.RejectComment); err != nil {
if err == sql.ErrNoRows {
subtitle = nil
err = nil
} else {
log.Error("row.Scan() error(%v)", err)
}
}
return
}
// TxUpdateSubtitle .
func (d *Dao) TxUpdateSubtitle(tx *sql.Tx, subtitle *model.Subtitle) (err error) {
if _, err = tx.Exec(fmt.Sprintf(_updateSubtitle, d.hitSubtile(subtitle.Oid)), subtitle.UpMid, subtitle.Status,
subtitle.PubTime, subtitle.RejectComment, subtitle.ID); err != nil {
log.Error("params(%+v),error(%v)", subtitle, err)
return
}
return
}
// TxAddSubtitlePub .
func (d *Dao) TxAddSubtitlePub(tx *sql.Tx, subtitlePub *model.SubtitlePub) (err error) {
if _, err = tx.Exec(_addSubtitlePub, subtitlePub.Oid, subtitlePub.Type, subtitlePub.Lan, subtitlePub.SubtitleID, subtitlePub.IsDelete, subtitlePub.SubtitleID, subtitlePub.IsDelete); err != nil {
log.Error("params(%+v),error(%v)", subtitlePub, err)
return
}
return
}
// MaskMids get mask mids from db.
func (d *Dao) MaskMids(c context.Context) (mids []int64, err error) {
mids = make([]int64, 0, 100)
rows, err := d.biliDMWriter.Query(c, _getMaskMids)
if err != nil {
log.Error("biliDMWriter.Query(%s) error(%v)", _getMaskMids, err)
return
}
defer rows.Close()
for rows.Next() {
var mid int64
if err = rows.Scan(&mid); err != nil {
log.Error("biliDMWriter.Scan(%s) error(%v)", _getMaskMids, err)
return
}
mids = append(mids, mid)
}
if err = rows.Err(); err != nil {
log.Error("biliDMWriter.rows.Err() error(%v)", err)
}
return
}

View File

@@ -0,0 +1,39 @@
package dao
import (
"context"
"strings"
"time"
"go-common/library/database/sql"
"go-common/library/log"
)
const (
_addDmSpecialLocationSQL = "INSERT INTO dm_special_content_location (type,oid,locations,ctime) VALUES(?,?,?,?) ON DUPLICATE KEY UPDATE locations=?"
_getDmSpecialLocationSQL = "SELECT locations FROM dm_special_content_location WHERE oid=? AND type=?"
)
// UpsertDmSpecialLocation .
func (d *Dao) UpsertDmSpecialLocation(c context.Context, tp int32, oid int64, locations string) (err error) {
if _, err = d.dmWriter.Exec(c, _addDmSpecialLocationSQL, tp, oid, locations, time.Now(), locations); err != nil {
log.Error("AddDmSpecialLocation.Exec error(%v)", err)
}
return
}
// DMSpecialLocations .
func (d *Dao) DMSpecialLocations(c context.Context, tp int32, oid int64) (locations []string, err error) {
row := d.dmReader.QueryRow(c, _getDmSpecialLocationSQL, oid, tp)
var s string
if err = row.Scan(&s); err != nil {
if err == sql.ErrNoRows {
err = nil
} else {
log.Error("DMSpecialLocations.Query error(%v)", err)
}
return
}
locations = strings.Split(s, ",")
return
}

View File

@@ -0,0 +1,38 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoUpsertDmSpecialLocation(t *testing.T) {
convey.Convey("UpsertDmSpecialLocation", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
locations = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := testDao.UpsertDmSpecialLocation(c, tp, oid, locations)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoDMSpecialLocations(t *testing.T) {
convey.Convey("DMSpecialLocations", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(1)
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.DMSpecialLocations(c, tp, oid)
})
})
}

View File

@@ -0,0 +1,522 @@
package dao
import (
"context"
"go-common/app/job/main/dm2/model"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaohitSubject(t *testing.T) {
convey.Convey("hitSubject", t, func(ctx convey.C) {
var (
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.hitSubject(oid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaohitIndex(t *testing.T) {
convey.Convey("hitIndex", t, func(ctx convey.C) {
var (
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.hitIndex(oid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaohitContent(t *testing.T) {
convey.Convey("hitContent", t, func(ctx convey.C) {
var (
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.hitContent(oid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaohitSubtile(t *testing.T) {
convey.Convey("hitSubtile", t, func(ctx convey.C) {
var (
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.hitSubtile(oid)
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoAddSubject(t *testing.T) {
convey.Convey("AddSubject", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
pid = int64(0)
mid = int64(0)
maxlimit = int64(0)
attr = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.AddSubject(c, tp, oid, pid, mid, maxlimit, attr)
})
})
}
func TestDaoUpdateSubAttr(t *testing.T) {
convey.Convey("UpdateSubAttr", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
attr = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affect, err := testDao.UpdateSubAttr(c, tp, oid, attr)
ctx.Convey("Then err should be nil.affect should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affect, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoUpdateSubMid(t *testing.T) {
convey.Convey("UpdateSubMid", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
mid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affect, err := testDao.UpdateSubMid(c, tp, oid, mid)
ctx.Convey("Then err should be nil.affect should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affect, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoSubject(t *testing.T) {
convey.Convey("Subject", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
s, err := testDao.Subject(c, tp, oid)
ctx.Convey("Then err should be nil.s should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(s, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoUpdateChildpool(t *testing.T) {
convey.Convey("UpdateChildpool", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
childpool = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affect, err := testDao.UpdateChildpool(c, tp, oid, childpool)
ctx.Convey("Then err should be nil.affect should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affect, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoTxIncrSubjectCount(t *testing.T) {
convey.Convey("TxIncrSubjectCount", t, func(ctx convey.C) {
var (
tx, _ = testDao.BeginTran(c)
tp = int32(0)
oid = int64(0)
acount = int64(0)
count = int64(0)
childpool = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affect, err := testDao.TxIncrSubjectCount(tx, tp, oid, acount, count, childpool)
ctx.Convey("Then err should be nil.affect should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affect, convey.ShouldNotBeNil)
})
})
ctx.Reset(func() {
tx.Commit()
})
})
}
func TestDaoTxAddIndex(t *testing.T) {
convey.Convey("TxAddIndex", t, func(ctx convey.C) {
var (
tx, _ = testDao.BeginTran(c)
m = &model.DM{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
id, err := testDao.TxAddIndex(tx, m)
ctx.Convey("Then err should be nil.id should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(id, convey.ShouldNotBeNil)
})
})
ctx.Reset(func() {
tx.Commit()
})
})
}
func TestDaoIndexs(t *testing.T) {
convey.Convey("Indexs", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.Indexs(c, tp, oid)
})
})
}
func TestDaoIndexsSeg(t *testing.T) {
convey.Convey("IndexsSeg", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
ps = int64(0)
pe = int64(0)
limit = int64(0)
pool = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.IndexsSeg(c, tp, oid, ps, pe, limit, pool)
})
})
}
func TestDaoIndexsSegID(t *testing.T) {
convey.Convey("IndexsSegID", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
ps = int64(0)
pe = int64(0)
limit = int64(0)
pool = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.IndexsSegID(c, tp, oid, ps, pe, limit, pool)
})
})
}
func TestDaoIndexsID(t *testing.T) {
convey.Convey("IndexsID", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
pool = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
dmids, err := testDao.IndexsID(c, tp, oid, pool)
ctx.Convey("Then err should be nil.dmids should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(dmids, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoIndexsByid(t *testing.T) {
convey.Convey("IndexsByid", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
dmids = []int64{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.IndexsByid(c, tp, oid, dmids)
})
})
}
func TestDaoIndexsByPool(t *testing.T) {
convey.Convey("IndexsByPool", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
pool = int32(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
dms, dmids, err := testDao.IndexsByPool(c, tp, oid, pool)
ctx.Convey("Then err should be nil.dms,dmids should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(dmids, convey.ShouldNotBeNil)
ctx.So(dms, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoTxAddContent(t *testing.T) {
convey.Convey("TxAddContent", t, func(ctx convey.C) {
var (
tx, _ = testDao.BeginTran(c)
oid = int64(0)
m = &model.Content{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
id, err := testDao.TxAddContent(tx, oid, m)
ctx.Convey("Then err should be nil.id should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(id, convey.ShouldNotBeNil)
})
})
ctx.Reset(func() {
tx.Commit()
})
})
}
func TestDaoTxAddContentSpecial(t *testing.T) {
convey.Convey("TxAddContentSpecial", t, func(ctx convey.C) {
var (
tx, _ = testDao.BeginTran(c)
m = &model.ContentSpecial{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
id, err := testDao.TxAddContentSpecial(tx, m)
ctx.Convey("Then err should be nil.id should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(id, convey.ShouldNotBeNil)
})
})
ctx.Reset(func() {
tx.Commit()
})
})
}
func TestDaoContent(t *testing.T) {
convey.Convey("Content", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
dmid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
ct, err := testDao.Content(c, oid, dmid)
ctx.Convey("Then err should be nil.ct should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(ct, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoContents(t *testing.T) {
convey.Convey("Contents", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
dmids = []int64{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
ctsMap, err := testDao.Contents(c, oid, dmids)
ctx.Convey("Then err should be nil.ctsMap should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(ctsMap, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoContentsSpecial(t *testing.T) {
convey.Convey("ContentsSpecial", t, func(ctx convey.C) {
var (
c = context.Background()
dmids = []int64{123}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.ContentsSpecial(c, dmids)
})
})
}
func TestDaoContentSpecial(t *testing.T) {
convey.Convey("ContentSpecial", t, func(ctx convey.C) {
var (
c = context.Background()
dmid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
contentSpe, err := testDao.ContentSpecial(c, dmid)
ctx.Convey("Then err should be nil.contentSpe should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(contentSpe, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoDelDMHideState(t *testing.T) {
convey.Convey("DelDMHideState", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
dmid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affect, err := testDao.DelDMHideState(c, tp, oid, dmid)
ctx.Convey("Then err should be nil.affect should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affect, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoTxIncrSubMCount(t *testing.T) {
convey.Convey("TxIncrSubMCount", t, func(ctx convey.C) {
var (
tx, _ = testDao.BeginTran(c)
tp = int32(0)
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affect, err := testDao.TxIncrSubMCount(tx, tp, oid)
ctx.Convey("Then err should be nil.affect should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affect, convey.ShouldNotBeNil)
})
})
ctx.Reset(func() {
tx.Commit()
})
})
}
func TestDaoUpdateSubtitle(t *testing.T) {
convey.Convey("UpdateSubtitle", t, func(ctx convey.C) {
var (
c = context.Background()
subtitle = &model.Subtitle{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := testDao.UpdateSubtitle(c, subtitle)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoGetSubtitles(t *testing.T) {
convey.Convey("GetSubtitles", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(0)
oid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
subtitles, err := testDao.GetSubtitles(c, tp, oid)
ctx.Convey("Then err should be nil.subtitles should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(subtitles, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoGetSubtitle(t *testing.T) {
convey.Convey("GetSubtitle", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(0)
subtitleID = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.GetSubtitle(c, oid, subtitleID)
})
})
}
func TestDaoTxUpdateSubtitle(t *testing.T) {
convey.Convey("TxUpdateSubtitle", t, func(ctx convey.C) {
var (
tx, _ = testDao.BeginTran(c)
subtitle = &model.Subtitle{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.TxUpdateSubtitle(tx, subtitle)
})
ctx.Reset(func() {
tx.Commit()
})
})
}
func TestDaoTxAddSubtitlePub(t *testing.T) {
convey.Convey("TxAddSubtitlePub", t, func(ctx convey.C) {
var (
tx, _ = testDao.BeginTran(c)
subtitlePub = &model.SubtitlePub{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.TxAddSubtitlePub(tx, subtitlePub)
})
ctx.Reset(func() {
tx.Commit()
})
})
}
func TestDaoMaskMids(t *testing.T) {
convey.Convey("MaskMids", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
mids, err := testDao.MaskMids(c)
ctx.Convey("Then err should be nil.mids should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(mids, convey.ShouldNotBeNil)
})
})
})
}

View File

@@ -0,0 +1,21 @@
package dao
import (
"context"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
)
const (
_dataRankURI = "/data/rank/recent_region-%d-%d.json"
)
// RankList get data rank by tid
func (d *Dao) RankList(c context.Context, tid int64, day int32) (resp *model.RankRecentResp, err error) {
if err = d.httpCli.RESTfulGet(c, d.conf.Host.DataRank+_dataRankURI, "", nil, &resp, tid, day); err != nil {
log.Error("RankList(tid:%v,day:%v),error(%v)", tid, day, err)
return
}
return
}

View File

@@ -0,0 +1,20 @@
package dao
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestRankList(t *testing.T) {
Convey("", t, func() {
res, err := testDao.RankList(context.Background(), 185, 3)
So(err, ShouldBeNil)
So(res, ShouldNotBeNil)
t.Log(res)
for _, v := range res.List {
t.Log(v)
}
})
}

View File

@@ -0,0 +1,146 @@
package dao
import (
"context"
"fmt"
"strconv"
"go-common/app/job/main/dm2/model"
"go-common/library/cache/redis"
"go-common/library/log"
)
const (
// dm xml list v1
_prefixDM = "dm_v1_%d_%d" // dm_v1_tpe_oid
divide = 34359738368 // 2^35
)
func keyDM(tp int32, oid int64) (key string) {
return fmt.Sprintf(_prefixDM, tp, oid)
}
// 弹幕在redis sortset 中的score
// 通过score保证弹幕在缓存中的排序为:普通弹幕、普通弹幕中的保护弹幕、字幕弹幕、脚本弹幕
func score(dm *model.DM) (score float64) {
// NOTE redis score最多17位表示这里采用整数十位+小数部分十位
v := dm.ID / divide // 2^63 / 2^35 = 2^28-1 整数部分最大值268435455
k := dm.ID % divide // 精度8位最后5位可忽略
r := int64(dm.Pool)<<29 | int64(dm.Attr)&1<<28 | v // NOTE v should less than 2^28
score, _ = strconv.ParseFloat(fmt.Sprintf("%d.%d", r, k), 64)
return
}
// AddDMCache add dm to redis.
func (d *Dao) AddDMCache(c context.Context, dm *model.DM) (err error) {
var (
conn = d.dmRds.Get(c)
value []byte
key = keyDM(dm.Type, dm.Oid)
)
defer conn.Close()
if value, err = dm.Marshal(); err != nil {
log.Error("dm.Marshal(%v) error(%v)", dm, err)
return
}
if err = conn.Send("ZADD", key, score(dm), value); err != nil {
log.Error("conn.Send(ZADD %v) error(%v)", dm, err)
return
}
if err = conn.Send("EXPIRE", key, d.dmRdsExpire); err != nil {
log.Error("conn.Send(EXPIRE %s) error(%v)", key, err)
return
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush() error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive() error(%v)", err)
return
}
}
return
}
// SetDMCache flush dm list to redis.
func (d *Dao) SetDMCache(c context.Context, tp int32, oid int64, dms []*model.DM) (err error) {
var (
value []byte
conn = d.dmRds.Get(c)
key = keyDM(tp, oid)
)
defer conn.Close()
for _, dm := range dms {
if value, err = dm.Marshal(); err != nil {
log.Error("dm.Marshal(%v) error(%v)", dm, err)
return
}
if err = conn.Send("ZADD", key, score(dm), value); err != nil {
log.Error("conn.Send(ZADD %v) error(%v)", dm, err)
return
}
}
if err = conn.Send("EXPIRE", key, d.dmRdsExpire); err != nil {
log.Error("conn.Send(EXPIRE %s) error(%v)", key, err)
return
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush() error(%v)", err)
return
}
for i := 0; i < len(dms)+1; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive() error(%v)", err)
return
}
}
return
}
// DelDMCache delete redis cache of oid.
func (d *Dao) DelDMCache(c context.Context, tp int32, oid int64) (err error) {
var (
key = keyDM(tp, oid)
conn = d.dmRds.Get(c)
)
if _, err = conn.Do("DEL", key); err != nil {
log.Error("conn.Do(DEL %s) error(%v)", key, err)
}
conn.Close()
return
}
// ExpireDMCache expire dm.
func (d *Dao) ExpireDMCache(c context.Context, tp int32, oid int64) (ok bool, err error) {
key := keyDM(tp, oid)
conn := d.dmRds.Get(c)
if ok, err = redis.Bool(conn.Do("EXPIRE", key, d.dmRdsExpire)); err != nil {
log.Error("conn.Do(EXPIRE %s) error(%v)", key, err)
}
conn.Close()
return
}
// DMCache 获取redis列表中的弹幕.
func (d *Dao) DMCache(c context.Context, tp int32, oid int64) (res [][]byte, err error) {
conn := d.dmRds.Get(c)
key := keyDM(tp, oid)
if res, err = redis.ByteSlices(conn.Do("ZRANGE", key, 0, -1)); err != nil {
log.Error("conn.Do(ZRANGE %s) error(%v)", key, err)
}
conn.Close()
return
}
// TrimDMCache 从redis列表中pop掉count条弹幕.
func (d *Dao) TrimDMCache(c context.Context, tp int32, oid, count int64) (err error) {
conn := d.dmRds.Get(c)
key := keyDM(tp, oid)
if _, err = conn.Do("ZREMRANGEBYRANK", key, 0, count-1); err != nil {
log.Error("conn.Do(ZREMRANGEBYRANK %s) error(%v)", key, err)
}
conn.Close()
return
}

View File

@@ -0,0 +1,64 @@
package dao
import (
"context"
"go-common/library/cache/redis"
"go-common/library/log"
)
const (
_maskJobKey = "mask_job"
)
// SetnxMaskJob setnx mask_job value
func (d *Dao) SetnxMaskJob(c context.Context, value string) (ok bool, err error) {
var (
conn = d.dmRds.Get(c)
)
defer conn.Close()
if ok, err = redis.Bool(conn.Do("SETNX", _maskJobKey, value)); err != nil {
log.Error("d.SetnxMask(value:%s),error(%v)", value, err)
return
}
return
}
// GetMaskJob .
func (d *Dao) GetMaskJob(c context.Context) (value string, err error) {
var (
conn = d.dmRds.Get(c)
)
defer conn.Close()
if value, err = redis.String(conn.Do("GET", _maskJobKey)); err != nil {
log.Error("d.GetMaskJob,error(%v)", err)
return
}
return
}
// GetSetMaskJob .
func (d *Dao) GetSetMaskJob(c context.Context, value string) (old string, err error) {
var (
conn = d.dmRds.Get(c)
)
defer conn.Close()
if old, err = redis.String(conn.Do("GETSET", _maskJobKey, value)); err != nil {
log.Error("d.GetSetMaskJob(value:%s),error(%v)", value, err)
return
}
return
}
// DelMaskJob .
func (d *Dao) DelMaskJob(c context.Context) (err error) {
var (
conn = d.dmRds.Get(c)
)
defer conn.Close()
if _, err = conn.Do("DEL", _maskJobKey); err != nil {
log.Error("d.DelMaskJob,error(%v)", err)
return
}
return
}

View File

@@ -0,0 +1,40 @@
package dao
import (
"context"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestSetnxMaskJob(t *testing.T) {
Convey("", t, func() {
ok, err := testDao.SetnxMaskJob(context.Background(), time.Now().String())
So(err, ShouldBeNil)
t.Logf("ok:%v", ok)
})
}
func TestGetMaskJob(t *testing.T) {
Convey("", t, func() {
value, err := testDao.GetMaskJob(context.Background())
So(err, ShouldBeNil)
t.Logf("ok:%v", value)
})
}
func TestGetSetMaskJob(t *testing.T) {
Convey("", t, func() {
value, err := testDao.GetSetMaskJob(context.Background(), time.Now().String())
So(err, ShouldBeNil)
t.Logf("ok:%v", value)
})
}
func TestDelMaskJob(t *testing.T) {
Convey("", t, func() {
err := testDao.DelMaskJob(context.Background())
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,89 @@
package dao
import (
"context"
"strconv"
"go-common/app/job/main/dm2/model"
"go-common/library/cache/redis"
"go-common/library/log"
)
const (
_prefixRecent = "dm_rct_"
)
func keyRecent(mid int64) string {
return _prefixRecent + strconv.FormatInt(mid, 10)
}
// AddRecentDM add recent dm of up to redis.
func (d *Dao) AddRecentDM(c context.Context, mid int64, dm *model.DM) (count int64, err error) {
var (
conn = d.dmRctRds.Get(c)
key = keyRecent(mid)
value []byte
)
defer conn.Close()
if value, err = dm.Marshal(); err != nil {
log.Error("dm.Marshal(%v) error(%v)", dm, err)
return
}
if err = conn.Send("ZREMRANGEBYSCORE", key, dm.ID, dm.ID); err != nil {
log.Error("conn.Do(ZREMRANGEBYSCORE %s) error(%v)", key, err)
return
}
if err = conn.Send("ZADD", key, dm.ID, value); err != nil {
log.Error("conn.Send(ZADD %v) error(%v)", dm, err)
return
}
if err = conn.Send("EXPIRE", key, d.dmRctExpire); err != nil {
log.Error("conn.Send(EXPIRE %s) error(%v)", key, err)
return
}
if err = conn.Send("ZCARD", key); err != nil {
log.Error("conn.Send(ZCARD %s) error(%v)", key, err)
return
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush() error(%v)", err)
return
}
for i := 0; i < 3; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive() error(%v)", err)
return
}
}
if count, err = redis.Int64(conn.Receive()); err != nil {
log.Error("conn.Receive() error(%v)", err)
return
}
return
}
// ZRemRecentDM remove recent dm of up.
func (d *Dao) ZRemRecentDM(c context.Context, mid, dmid int64) (err error) {
var (
conn = d.dmRctRds.Get(c)
key = keyRecent(mid)
)
defer conn.Close()
if _, err = conn.Do("ZREMRANGEBYSCORE", key, dmid, dmid); err != nil {
log.Error("conn.Do(ZREMRANGEBYSCORE %s) error(%v)", key, dmid)
}
return
}
// TrimRecentDM zrange remove recent dm of up.
func (d *Dao) TrimRecentDM(c context.Context, mid, count int64) (err error) {
var (
conn = d.dmRctRds.Get(c)
key = keyRecent(mid)
)
defer conn.Close()
if _, err = conn.Do("ZREMRANGEBYRANK", key, 0, count-1); err != nil {
log.Error("conn.Do(ZREMRANGEBYRANK %s) error(%v)", key, err)
}
return
}

View File

@@ -0,0 +1,26 @@
package dao
import (
"context"
"testing"
)
func TestAddRecentDM(t *testing.T) {
count, err := testDao.AddRecentDM(context.TODO(), 123, dm)
if err != nil {
t.Fatal(err)
}
t.Logf("count:%d", count)
}
func TestZRemRecentDM(t *testing.T) {
if err := testDao.ZRemRecentDM(context.TODO(), 123, 719150142); err != nil {
t.Fatal(err)
}
}
func TestTrimRecentDM(t *testing.T) {
if err := testDao.TrimRecentDM(context.TODO(), 123, 1); err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,282 @@
package dao
import (
"context"
"encoding/xml"
"fmt"
"go-common/app/job/main/dm2/model"
"go-common/library/cache/redis"
"go-common/library/log"
)
const (
_keyIdx = "i_%d_%d_%d_%d" // normal dm segment sortedset(s_type_oid_cnt_n, ctime, dmid)
_keyIdxSub = "s_%d_%d" // subtitle dm sortedset(s_type_oid, progress, dmid)
_keyIdxContent = "c_%d_%d" // dm content hash(d_type_oid, dmid, xml)
)
func keyIdx(tp int32, oid, cnt, n int64) string {
return fmt.Sprintf(_keyIdx, tp, oid, cnt, n)
}
// keyIdxSub return dm idx key.
func keyIdxSub(tp int32, oid int64) string {
return fmt.Sprintf(_keyIdxSub, tp, oid)
}
// keyIdxContent return key of different dm.
func keyIdxContent(tp int32, oid int64) string {
return fmt.Sprintf(_keyIdxContent, tp, oid)
}
// ExpireDMID set expire time of index.
func (d *Dao) ExpireDMID(c context.Context, tp int32, oid, cnt, n int64) (ok bool, err error) {
key := keyIdx(tp, oid, cnt, n)
conn := d.dmSegRds.Get(c)
if ok, err = redis.Bool(conn.Do("EXPIRE", key, d.dmSegExpire)); err != nil {
log.Error("conn.Do(EXPIRE %s) error(%v)", key, err)
}
conn.Close()
return
}
// DMIDCache return dm ids.
func (d *Dao) DMIDCache(c context.Context, tp int32, oid int64, cnt, n, limit int64) (dmids []int64, err error) {
var (
conn = d.dmSegRds.Get(c)
key = keyIdx(tp, oid, cnt, n)
)
defer conn.Close()
if dmids, err = redis.Int64s(conn.Do("ZRANGE", key, 0, -1)); err != nil {
log.Error("DMIDSPCache.conn.DO(ZRANGEBYSCORE %s) error(%v)", key, err)
}
return
}
// AddDMIDCache add dmid(normal and special) to segment redis.
func (d *Dao) AddDMIDCache(c context.Context, tp int32, oid, cnt, n int64, dmids ...int64) (err error) {
key := keyIdx(tp, oid, cnt, n)
conn := d.dmSegRds.Get(c)
defer conn.Close()
for _, dmid := range dmids {
if err = conn.Send("ZADD", key, dmid, dmid); err != nil {
log.Error("conn.Send(ZADD %s) error(%v)", key, err)
return
}
}
if err = conn.Send("EXPIRE", key, d.dmSegExpire); err != nil {
log.Error("conn.Send(EXPIRE %s) error(%v)", key, err)
return
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush() error(%v)", err)
return
}
for i := 0; i < len(dmids)+1; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive() error(%v)", err)
return
}
}
return
}
// DelDMIDCache delete dm segment cache.
func (d *Dao) DelDMIDCache(c context.Context, tp int32, oid, cnt, n int64) (err error) {
key := keyIdx(tp, oid, cnt, n)
conn := d.dmSegRds.Get(c)
if _, err = conn.Do("DEL", key); err != nil {
log.Error("conn.Do(DEL %s) err(%v)", key, err)
}
conn.Close()
return
}
// ExpireDMIDSubtitle set expire time of subtitle dmid.
func (d *Dao) ExpireDMIDSubtitle(c context.Context, tp int32, oid int64) (ok bool, err error) {
key := keyIdxSub(tp, oid)
conn := d.dmSegRds.Get(c)
if ok, err = redis.Bool(conn.Do("EXPIRE", key, d.dmSegExpire)); err != nil {
log.Error("conn.Do(EXPIRE %s) error(%v)", key, err)
}
conn.Close()
return
}
// DMIDSubtitleCache get subtitle dmid.
func (d *Dao) DMIDSubtitleCache(c context.Context, tp int32, oid int64, ps, pe, limit int64) (dmids []int64, err error) {
var (
conn = d.dmSegRds.Get(c)
key = keyIdxSub(tp, oid)
)
defer conn.Close()
if dmids, err = redis.Int64s(conn.Do("ZRANGEBYSCORE", key, ps, pe, "LIMIT", 0, limit)); err != nil {
log.Error("conn.DO(ZRANGEBYSCORE %s) error(%v)", key, err)
}
return
}
// AddDMIDSubtitleCache add subtitle dmid to redis.
func (d *Dao) AddDMIDSubtitleCache(c context.Context, tp int32, oid int64, dms ...*model.DM) (err error) {
key := keyIdxSub(tp, oid)
conn := d.dmSegRds.Get(c)
defer conn.Close()
for _, dm := range dms {
if err = conn.Send("ZADD", key, dm.Progress, dm.ID); err != nil {
log.Error("conn.Send(ZADD %s) error(%v)", key, err)
return
}
}
if err = conn.Send("EXPIRE", key, d.dmSegExpire); err != nil {
log.Error("conn.Send(EXPIRE %s) error(%v)", key, err)
return
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush() error(%v)", err)
return
}
for i := 0; i < len(dms)+1; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive() error(%v)", err)
return
}
}
return
}
// DelDMIDSubtitleCache delete subtitle dmid cache.
func (d *Dao) DelDMIDSubtitleCache(c context.Context, tp int32, oid int64) (err error) {
key := keyIdxSub(tp, oid)
conn := d.dmSegRds.Get(c)
if _, err = conn.Do("DEL", key); err != nil {
log.Error("conn.Do(DEL %s) error(%v)", key, err)
}
conn.Close()
return
}
// AddIdxContentCaches add index content cache to redis.
func (d *Dao) AddIdxContentCaches(c context.Context, tp int32, oid int64, dms ...*model.DM) (err error) {
var (
conn = d.dmSegRds.Get(c)
key = keyIdxContent(tp, oid)
)
defer conn.Close()
for _, dm := range dms {
if err = conn.Send("HSET", key, dm.ID, dm.ToXMLSeg()); err != nil {
log.Error("conn.Send(HSET %s,%v) error(%v)", key, dm, err)
return
}
}
if err = conn.Send("EXPIRE", key, d.dmSegExpire); err != nil {
log.Error("conn.Send(EXPIRE %s) error(%v)", key, err)
return
}
if err = conn.Flush(); err != nil {
log.Error("conn.Flush() error(%v)", err)
return
}
for i := 0; i <= len(dms); i++ {
if _, err = conn.Receive(); err != nil {
log.Error("conn.Receive() error(%v)", err)
return
}
}
return
}
// DelIdxContentCaches del index content cache.
func (d *Dao) DelIdxContentCaches(c context.Context, tp int32, oid int64, dmids ...int64) (err error) {
key := keyIdxContent(tp, oid)
conn := d.dmSegRds.Get(c)
args := []interface{}{key}
for _, dmid := range dmids {
args = append(args, dmid)
}
if _, err = conn.Do("HDEL", args...); err != nil {
log.Error("conn.Do(HDEL %s) error(%v)", key, err)
}
conn.Close()
return
}
// IdxContentCache get xml info by dmid.
func (d *Dao) IdxContentCache(c context.Context, tp int32, oid int64, dmids []int64) (res []byte, missed []int64, err error) {
var (
k int
dmid int64
values [][]byte
key = keyIdxContent(tp, oid)
args = []interface{}{key}
)
for _, dmid = range dmids {
args = append(args, dmid)
}
conn := d.dmSegRds.Get(c)
defer conn.Close()
if values, err = redis.ByteSlices(conn.Do("HMGET", args...)); err != nil {
log.Error("conn.Do(HMGET %v) error(%v)", args, err)
if err == redis.ErrNil {
return nil, nil, nil
}
return
}
for k, dmid = range dmids {
if len(values[k]) == 0 {
missed = append(missed, dmid)
continue
}
res = append(res, values[k]...)
}
return
}
// IdxContentCacheV2 get elems info by dmid.
func (d *Dao) IdxContentCacheV2(c context.Context, tp int32, oid int64, dmids []int64) (elems []*model.Elem, missed []int64, err error) {
var (
values [][]byte
key = keyIdxContent(tp, oid)
args = []interface{}{key}
)
for _, dmid := range dmids {
args = append(args, dmid)
}
conn := d.dmSegRds.Get(c)
defer conn.Close()
if values, err = redis.ByteSlices(conn.Do("HMGET", args...)); err != nil {
log.Error("conn.Do(HMGET %v) error(%v)", args, err)
if err == redis.ErrNil {
return nil, nil, nil
}
return
}
for k, dmid := range dmids {
if len(values[k]) == 0 {
missed = append(missed, dmid)
continue
}
elem, err := d.xmlToElem(values[k])
if err != nil {
missed = append(missed, dmid)
continue
}
elems = append(elems, elem)
}
return
}
// 在缓存过渡期将<d p="弹幕ID,弹幕属性,播放时间,弹幕模式,字体大小,颜色,发送时间,弹幕池,用户hash id">弹幕内容</d>
// 装换为 model.Elem结构
func (d *Dao) xmlToElem(data []byte) (e *model.Elem, err error) {
var v struct {
XMLName xml.Name `xml:"d"`
Attribute string `xml:"p,attr"`
Content string `xml:",chardata"`
}
if err = xml.Unmarshal(data, &v); err != nil {
return
}
e = &model.Elem{Content: v.Content, Attribute: v.Attribute}
return
}

View File

@@ -0,0 +1,53 @@
package dao
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestDMIDCache(t *testing.T) {
var (
tp int32 = 1
oid int64 = 1508
cnt int64 = 26
num int64 = 1
c = context.TODO()
)
Convey("", t, func() {
_, err := testDao.DMIDCache(c, tp, oid, cnt, num, 100)
So(err, ShouldBeNil)
})
}
func TestAddDMIDCache(t *testing.T) {
Convey("", t, func() {
err := testDao.AddDMIDCache(c, 1, 1508, 26, 1, 1233333333333)
So(err, ShouldBeNil)
})
}
func TestIdxContentCacheV2(t *testing.T) {
var (
tp int32 = 1
oid int64 = 1508
c = context.TODO()
dmids = []int64{2355015081, 2356915089}
)
Convey("", t, func() {
elems, missed, err := testDao.IdxContentCacheV2(c, tp, oid, dmids)
So(err, ShouldBeNil)
t.Logf("missed dmid:%v", missed)
t.Logf("elems:%+v", elems)
})
}
func TestXMLToElem(t *testing.T) {
Convey("convert xml tag to elem struct", t, func() {
s := []byte(`<d p="1,1,1,1,11,111,11,1,23123123">弹幕内容</d>`)
elem, err := testDao.xmlToElem(s)
So(err, ShouldBeNil)
t.Logf("%+v", elem)
})
}

View File

@@ -0,0 +1,64 @@
package dao
import (
"context"
"go-common/library/cache/redis"
"go-common/library/log"
)
const (
_taskJobKey = "task_job"
)
// SetnxTaskJob setnx task_job value
func (d *Dao) SetnxTaskJob(c context.Context, value string) (ok bool, err error) {
var (
conn = d.dmRds.Get(c)
)
defer conn.Close()
if ok, err = redis.Bool(conn.Do("SETNX", _taskJobKey, value)); err != nil {
log.Error("d.SetnxMask(value:%s),error(%v)", value, err)
return
}
return
}
// GetTaskJob .
func (d *Dao) GetTaskJob(c context.Context) (value string, err error) {
var (
conn = d.dmRds.Get(c)
)
defer conn.Close()
if value, err = redis.String(conn.Do("GET", _taskJobKey)); err != nil {
log.Error("d.GetMaskJob,error(%v)", err)
return
}
return
}
// GetSetTaskJob .
func (d *Dao) GetSetTaskJob(c context.Context, value string) (old string, err error) {
var (
conn = d.dmRds.Get(c)
)
defer conn.Close()
if old, err = redis.String(conn.Do("GETSET", _taskJobKey, value)); err != nil {
log.Error("d.GetSetTaskJob(value:%s),error(%v)", value, err)
return
}
return
}
// DelTaskJob .
func (d *Dao) DelTaskJob(c context.Context) (err error) {
var (
conn = d.dmRds.Get(c)
)
defer conn.Close()
if _, err = conn.Do("DEL", _taskJobKey); err != nil {
log.Error("d.DelTaskJob,error(%v)", err)
return
}
return
}

View File

@@ -0,0 +1,65 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoSetnxTaskJob(t *testing.T) {
convey.Convey("SetnxTaskJob", t, func(ctx convey.C) {
var (
c = context.Background()
value = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
ok, err := testDao.SetnxTaskJob(c, value)
ctx.Convey("Then err should be nil.ok should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(ok, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoGetTaskJob(t *testing.T) {
convey.Convey("GetTaskJob", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.GetTaskJob(c)
})
})
}
func TestDaoGetSetTaskJob(t *testing.T) {
convey.Convey("GetSetTaskJob", t, func(ctx convey.C) {
var (
c = context.Background()
value = ""
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
old, err := testDao.GetSetTaskJob(c, value)
ctx.Convey("Then err should be nil.old should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(old, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoDelTaskJob(t *testing.T) {
convey.Convey("DelTaskJob", t, func(ctx convey.C) {
var (
c = context.Background()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := testDao.DelTaskJob(c)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}

View File

@@ -0,0 +1,71 @@
package dao
import (
"context"
"testing"
"go-common/app/job/main/dm2/model"
)
var (
c = context.TODO()
dm = &model.DM{
ID: 719150142,
Oid: 1221,
Type: 1,
Mid: 478046,
Progress: 0,
State: 0,
Content: &model.Content{
ID: 719150142,
FontSize: 24,
Mode: 1,
Msg: "aaa",
}}
)
func TestAddDMCache(t *testing.T) {
if err := testDao.AddDMCache(context.TODO(), dm); err != nil {
t.Error(err)
}
}
func TestSetDMCache(t *testing.T) {
if err := testDao.SetDMCache(c, dm.Type, dm.Oid, []*model.DM{dm, dm}); err != nil {
t.Error(err)
}
}
func TestDelDMCache(t *testing.T) {
if err := testDao.DelDMCache(context.TODO(), 1, 1221); err != nil {
t.Error(err)
}
}
func TestExpireDMCache(t *testing.T) {
ok, err := testDao.ExpireDMCache(context.TODO(), 1, 1221)
if err != nil {
t.Error(err)
}
t.Log(ok)
}
func TestDMCache(t *testing.T) {
values, err := testDao.DMCache(context.TODO(), dm.Type, dm.Oid)
if err != nil {
t.Error(err)
}
for _, value := range values {
dmCache := &model.DM{}
if err = dmCache.Unmarshal(value); err != nil {
t.Errorf("Unmarshal(%s) error(%v)", value, err)
}
t.Log(dmCache)
}
}
func TestTrimDMCache(t *testing.T) {
if err := testDao.TrimDMCache(context.TODO(), 1, dm.Oid, 1); err != nil {
t.Error(err)
}
}

View File

@@ -0,0 +1,230 @@
package dao
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"go-common/app/job/main/dm2/model"
"go-common/library/database/sql"
"go-common/library/log"
"go-common/library/xstr"
)
const (
_taskInfo = "SELECT id,topic,state,qcount,result,sub,last_index,priority,creator,reviewer,title FROM dm_task WHERE state=? order by priority desc,ctime asc"
_oneTask = "SELECT id,topic,state,qcount,result,sub,last_index,priority,creator,reviewer,title FROM dm_task WHERE state in (3,9) order by priority desc,ctime asc limit 1"
_taskByID = "SELECT id,topic,state,qcount,result,sub,last_index,priority,creator,reviewer,title FROM dm_task WHERE id=?"
_uptTask = "UPDATE dm_task SET state=?,last_index=?,qcount=?,result=? WHERE id=?"
_uptSubTask = "UPDATE dm_sub_task SET tcount=tcount+?,end=? WHERE task_id=?"
_selectSubTask = "SELECT id,operation,rate,tcount,start,end FROM dm_sub_task WHERE task_id=?"
_delDMIndex = "UPDATE dm_index_%03d SET state=? WHERE oid=? AND id IN (%s)"
_uptSubCountSQL = "UPDATE dm_subject_%02d SET count=count-? WHERE type=? AND oid=?"
_merakMsgURI = "/"
)
// TaskInfos task infos.
func (d *Dao) TaskInfos(c context.Context, state int32) (tasks []*model.TaskInfo, err error) {
rows, err := d.biliDMWriter.Query(c, _taskInfo, state)
if err != nil {
log.Error("d.biliDMWriter.Query(query:%s,state:%d) error(%v)", _taskInfo, state, err)
return
}
defer rows.Close()
for rows.Next() {
task := &model.TaskInfo{}
if err = rows.Scan(&task.ID, &task.Topic, &task.State, &task.Count, &task.Result, &task.Sub, &task.LastIndex, &task.Priority, &task.Creator, &task.Reviewer, &task.Title); err != nil {
log.Error("d.biliDMWriter.Scan(query:%s,state:%d) error(%v)", _taskInfo, state, err)
return
}
tasks = append(tasks, task)
}
if err = rows.Err(); err != nil {
log.Error("d.biliDMWriter.rows.Err() error(%v)", err)
}
return
}
// OneTask .
func (d *Dao) OneTask(c context.Context) (task *model.TaskInfo, err error) {
task = &model.TaskInfo{}
row := d.biliDMWriter.QueryRow(c, _oneTask)
if err = row.Scan(&task.ID, &task.Topic, &task.State, &task.Count, &task.Result, &task.Sub, &task.LastIndex, &task.Priority, &task.Creator, &task.Reviewer, &task.Title); err != nil {
if err == sql.ErrNoRows {
err = nil
task = nil
} else {
log.Error("d.biliDMWriter.Scan(query:%s) error(%v)", _oneTask, err)
}
}
return
}
// TaskInfoByID .
func (d *Dao) TaskInfoByID(c context.Context, id int64) (task *model.TaskInfo, err error) {
task = &model.TaskInfo{}
row := d.biliDMWriter.QueryRow(c, _taskByID, id)
if err = row.Scan(&task.ID, &task.Topic, &task.State, &task.Count, &task.Result, &task.Sub, &task.LastIndex, &task.Priority, &task.Creator, &task.Reviewer, &task.Title); err != nil {
if err == sql.ErrNoRows {
err = nil
task = nil
} else {
log.Error("d.biliDMWriter.Scan(query:%s,id:%d) error(%v)", _taskByID, id, err)
}
}
return
}
// UpdateTask update dm task.
func (d *Dao) UpdateTask(c context.Context, task *model.TaskInfo) (affected int64, err error) {
row, err := d.biliDMWriter.Exec(c, _uptTask, task.State, task.LastIndex, task.Count, task.Result, task.ID)
if err != nil {
log.Error("d.biliDMWriter.Exec(query:%s,task:%+v) error(%v)", _uptTask, task, err)
return
}
return row.RowsAffected()
}
// UptSubTask uopdate dm sub task.
func (d *Dao) UptSubTask(c context.Context, taskID, delCount int64, end time.Time) (affected int64, err error) {
row, err := d.biliDMWriter.Exec(c, _uptSubTask, delCount, end, taskID)
if err != nil {
log.Error("d.biliDMWriter.Exec(query:%s) error(%v)", _uptSubTask, err)
return
}
return row.RowsAffected()
}
// SubTask .
func (d *Dao) SubTask(c context.Context, id int64) (subTask *model.SubTask, err error) {
// TODO: operation time
subTask = new(model.SubTask)
row := d.biliDMWriter.QueryRow(c, _selectSubTask, id)
if err = row.Scan(&subTask.ID, &subTask.Operation, &subTask.Rate, &subTask.Tcount, &subTask.Start, &subTask.End); err != nil {
if err == sql.ErrNoRows {
err = nil
subTask = nil
}
log.Error("biliDM.Scan(%s, taskID:%d) error*(%v)", _selectSubTask, id, err)
return
}
return
}
// DelDMs dm task del dms.
func (d *Dao) DelDMs(c context.Context, oid int64, dmids []int64, state int32) (affected int64, err error) {
rows, err := d.dmWriter.Exec(c, fmt.Sprintf(_delDMIndex, d.hitIndex(oid), xstr.JoinInts(dmids)), state, oid)
if err != nil {
log.Error("d.dmWriter.Exec(query:%s,oid:%d,dmids:%v) error(%v)", _delDMIndex, oid, dmids, err)
return
}
return rows.RowsAffected()
}
// UptSubjectCount update count.
func (d *Dao) UptSubjectCount(c context.Context, tp int32, oid, count int64) (affected int64, err error) {
res, err := d.dmWriter.Exec(c, fmt.Sprintf(_uptSubCountSQL, d.hitSubject(oid)), count, tp, oid)
if err != nil {
log.Error("dmWriter.Exec(query:%s,oid:%d) error(%v)", _uptSubCountSQL, oid, err)
return
}
return res.RowsAffected()
}
// TaskSearchRes get res from BI url
func (d *Dao) TaskSearchRes(c context.Context, task *model.TaskInfo) (count int64, result string, state int32, err error) {
var (
resp *http.Response
res struct {
Code int64 `json:"code"`
StatusID int32 `json:"statusId"`
Path []string `json:"hdfsPath"`
Count int64 `json:"count"`
}
bs []byte
)
// may costing long time use default transport
if resp, err = http.Get(task.Topic); err != nil {
log.Error("http.Get(%s) error(%v)", task.Topic, err)
return
}
defer resp.Body.Close()
if bs, err = ioutil.ReadAll(resp.Body); err != nil {
log.Error("ioutil.ReadAll url:%v error(%v)", task.Topic, err)
return
}
if err = json.Unmarshal(bs, &res); err != nil {
return
}
if res.Code != 200 {
err = fmt.Errorf("%v", res)
log.Error("d.httpClient.Get(%s) code(%d)", task.Topic, res.Code)
return
}
if res.StatusID == model.TaskSearchSuc && len(res.Path) > 0 {
result = res.Path[0]
count = res.Count
}
return count, result, res.StatusID, err
}
// SendWechatWorkMsg send wechat work msg.
func (d *Dao) SendWechatWorkMsg(c context.Context, content, title string, users []string) (err error) {
userMap := make(map[string]struct{}, len(users))
unames := make([]string, 0, len(users))
for _, user := range users {
if user == "" {
continue
}
if _, ok := userMap[user]; ok {
continue
}
userMap[user] = struct{}{}
unames = append(unames, user)
}
params := url.Values{}
params.Set("Action", "CreateWechatMessage")
params.Set("PublicKey", d.conf.TaskConf.MsgPublicKey)
params.Set("UserName", strings.Join(unames, ","))
params.Set("Title", title)
params.Set("Content", content)
params.Set("Signature", "")
params.Set("TreeId", "")
paramStr := params.Encode()
if strings.IndexByte(paramStr, '+') > -1 {
paramStr = strings.Replace(paramStr, "+", "%20", -1)
}
var (
buffer bytes.Buffer
querry string
)
buffer.WriteString(paramStr)
querry = buffer.String()
res := &struct {
Code int `json:"RetCode"`
}{}
url := d.conf.Host.MerakHost + _merakMsgURI
req, err := http.NewRequest("POST", url, strings.NewReader(querry))
if err != nil {
return
}
req.Header.Add("content-type", "application/x-www-form-urlencoded; charset=UTF-8")
if err = d.httpCli.Do(c, req, res); err != nil {
log.Error("d.SendWechatWorkMsg.client.Do(%v,%v) error(%v)", req, querry, err)
return
}
if res.Code != 0 {
log.Error("d.SendWechatWorkMsg(%s,%s,%v) res.Code != 0, res(%v)", content, title, users, res)
err = fmt.Errorf("uri:%s,code:%d", url+querry, res.Code)
}
return
}

View File

@@ -0,0 +1,152 @@
package dao
import (
"context"
"testing"
"time"
"go-common/app/job/main/dm2/model"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoTaskInfos(t *testing.T) {
convey.Convey("TaskInfos", t, func(ctx convey.C) {
var (
c = context.Background()
state = int32(1)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
tasks, err := testDao.TaskInfos(c, state)
ctx.Convey("Then err should be nil.tasks should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(tasks, convey.ShouldNotBeNil)
for _, task := range tasks {
t.Logf("%+v", task)
}
})
})
})
}
func TestDaoUpdateTask(t *testing.T) {
convey.Convey("UpdateTask", t, func(ctx convey.C) {
var (
c = context.Background()
task = &model.TaskInfo{}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affected, err := testDao.UpdateTask(c, task)
ctx.Convey("Then err should be nil.affected should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affected, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoDelDMs(t *testing.T) {
convey.Convey("DelDMs", t, func(ctx convey.C) {
var (
c = context.Background()
oid = int64(221)
dmids = []int64{719182141}
state = int32(12)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affected, err := testDao.DelDMs(c, oid, dmids, state)
ctx.Convey("Then err should be nil.affected should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affected, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoUptSubTask(t *testing.T) {
convey.Convey("UptSubTask", t, func(ctx convey.C) {
var (
c = context.Background()
taskID = int64(0)
delCount = int64(0)
end = time.Now()
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affected, err := testDao.UptSubTask(c, taskID, delCount, end)
ctx.Convey("Then err should be nil.affected should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affected, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoTaskSearchRes(t *testing.T) {
convey.Convey("TaskSearchRes", t, func(ctx convey.C) {
var (
c = context.Background()
task = &model.TaskInfo{
Topic: "http://berserker.bilibili.co/m-avenger/api/hive/status/query/148/672bc22888af701529e8b3052fd2c4a7/1541066053/1389520",
}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, result, state, err := testDao.TaskSearchRes(c, task)
ctx.Convey("Then err should be nil.result,state should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(state, convey.ShouldNotBeNil)
ctx.So(result, convey.ShouldNotBeNil)
t.Log(result, state)
})
})
})
}
func TestDaoUptSubjectCount(t *testing.T) {
convey.Convey("UptSubjectCount", t, func(ctx convey.C) {
var (
c = context.Background()
tp = int32(1)
oid = int64(0)
count = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
affected, err := testDao.UptSubjectCount(c, tp, oid, count)
ctx.Convey("Then err should be nil.affected should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(affected, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoSendWechatWorkMsg(t *testing.T) {
convey.Convey("SendWechatWorkMsg", t, func(ctx convey.C) {
var (
c = context.Background()
content = "test"
title = "test"
users = []string{"fengduzhen"}
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
err := testDao.SendWechatWorkMsg(c, content, title, users)
ctx.Convey("Then err should be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}
func TestDaoSubTask(t *testing.T) {
convey.Convey("SubTask", t, func(ctx convey.C) {
var (
c = context.Background()
id = int64(32)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
_, err := testDao.SubTask(c, id)
ctx.Convey("Then err should be nil.subTask should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
})
}

View File

@@ -0,0 +1,69 @@
package dao
import (
"context"
"fmt"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
)
const (
_selectTransfer = "SELECT id,from_cid,to_cid,mid,offset,state,ctime FROM dm_transfer_job WHERE state=? limit 1"
_updateTransfer = "UPDATE dm_transfer_job SET state=?,dmid=? WHERE id=?"
_idxsSQL = "SELECT id,type,oid,mid,progress,state,pool,attr,ctime,mtime FROM dm_index_%03d FORCE INDEX(ix_oid_state) WHERE type=? AND oid=? AND id >? ORDER BY id limit ?"
)
// Transfers get all transfer job
func (d *Dao) Transfers(c context.Context, state int8) (trans []*model.Transfer, err error) {
rows, err := d.biliDMWriter.Query(c, _selectTransfer, model.StatInit)
if err != nil {
log.Error("d.biliDMWriter.Query(sql:%s) error(%v)", _selectTransfer, err)
return
}
defer rows.Close()
for rows.Next() {
t := &model.Transfer{}
if err = rows.Scan(&t.ID, &t.FromCid, &t.ToCid, &t.Mid, &t.Offset, &t.State, &t.Ctime); err != nil {
log.Error("rows.Scan() error(%v)", err)
return
}
trans = append(trans, t)
}
return
}
// UpdateTransfer change transfer job state.
func (d *Dao) UpdateTransfer(c context.Context, t *model.Transfer) (affect int64, err error) {
row, err := d.biliDMWriter.Exec(c, _updateTransfer, t.State, t.Dmid, t.ID)
if err != nil {
log.Error("d.biliDMWriter.Exec(%+v) error(%v)", t, err)
return
}
return row.RowsAffected()
}
// DMIndexs get dm indexs info
func (d *Dao) DMIndexs(c context.Context, tp int32, oid, minID, limit int64) (idxMap map[int64]*model.DM, dmids, special []int64, err error) {
query := fmt.Sprintf(_idxsSQL, d.hitIndex(oid))
rows, err := d.dmReader.Query(c, query, tp, oid, minID, limit)
if err != nil {
log.Error("db.Query() error(%v)", err)
return
}
defer rows.Close()
idxMap = make(map[int64]*model.DM)
for rows.Next() {
idx := new(model.DM)
if err = rows.Scan(&idx.ID, &idx.Type, &idx.Oid, &idx.Mid, &idx.Progress, &idx.State, &idx.Pool, &idx.Attr, &idx.Ctime, &idx.Mtime); err != nil {
log.Error("row.Scan() error(%v)", err)
return
}
idxMap[idx.ID] = idx
dmids = append(dmids, idx.ID)
if idx.Pool == model.PoolSpecial {
special = append(special, idx.ID)
}
}
return
}

View File

@@ -0,0 +1,52 @@
package dao
import (
"context"
"testing"
"go-common/app/job/main/dm2/conf"
"go-common/app/job/main/dm2/model"
. "github.com/smartystreets/goconvey/convey"
)
func TestTransfers(t *testing.T) {
var (
d = New(conf.Conf)
c = context.TODO()
)
Convey("test transferJob", t, func() {
_, err := d.Transfers(c, model.StatInit)
So(err, ShouldBeNil)
})
}
func TestDmIndexs(t *testing.T) {
var (
d = New(conf.Conf)
c = context.TODO()
)
Convey("test DmIndexs", t, func() {
_, _, _, err := d.DMIndexs(c, 1, 1012, 0, 10)
So(err, ShouldBeNil)
})
}
func TestUpdateTransfer(t *testing.T) {
var (
d = New(conf.Conf)
c = context.TODO()
trans = &model.Transfer{
ID: 265,
FromCid: 233,
ToCid: 1221,
Dmid: 333,
Mid: 1,
Offset: 0.00,
State: 0,
}
)
Convey("test update job", t, func() {
d.UpdateTransfer(c, trans)
})
}

View File

@@ -0,0 +1,56 @@
package dao
import (
"context"
"fmt"
"net/url"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
)
const (
_archiveURL = "/videoup/view"
)
func (d *Dao) archiveURI() string {
return d.conf.Host.Videoup + _archiveURL
}
// Videos 根据aid获取分批信息包含未开放浏览分批.
func (d *Dao) Videos(c context.Context, aid int64) (videos []*model.Video, err error) {
var (
uri = d.archiveURI()
params = url.Values{}
res struct {
Code int64 `json:"code"`
Message string `json:"message"`
Data *struct {
Archive *model.Archive `json:"archive"`
Videos []*model.Video `json:"videos"`
} `json:"data"`
}
)
params.Set("aid", fmt.Sprint(aid))
if err = d.httpCli.Get(c, uri, "", params, &res); err != nil {
return
}
if res.Code != 0 {
err = fmt.Errorf("aid:%d,res.Code:%d", aid, res.Code)
log.Error("url(%s) aid(%d) res(%v)", uri+"?"+params.Encode(), aid, res)
return
}
if res.Data == nil || res.Data.Archive == nil {
log.Error("url(%s) aid(%d) res(%v)", uri+"?"+params.Encode(), aid, res)
return
}
if len(res.Data.Videos) == 0 {
log.Error("url(%s) aid(%d) videos is empty", uri+"?"+params.Encode(), aid)
return
}
for _, v := range res.Data.Videos {
v.Mid = res.Data.Archive.Mid
videos = append(videos, v)
}
return
}

View File

@@ -0,0 +1,31 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoarchiveURI(t *testing.T) {
convey.Convey("archiveURI", t, func(ctx convey.C) {
ctx.Convey("When everything goes positive", func(ctx convey.C) {
p1 := testDao.archiveURI()
ctx.Convey("Then p1 should not be nil.", func(ctx convey.C) {
ctx.So(p1, convey.ShouldNotBeNil)
})
})
})
}
func TestDaoVideos(t *testing.T) {
convey.Convey("Videos", t, func(ctx convey.C) {
var (
c = context.Background()
aid = int64(0)
)
ctx.Convey("When everything goes positive", func(ctx convey.C) {
testDao.Videos(c, aid)
})
})
}

View File

@@ -0,0 +1,34 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["http.go"],
importpath = "go-common/app/job/main/dm2/http",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/dm2/conf:go_default_library",
"//app/job/main/dm2/service:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,38 @@
package http
import (
"net/http"
"go-common/app/job/main/dm2/conf"
"go-common/app/job/main/dm2/service"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
)
var svc *service.Service
// Init new dm2 job service
func Init(c *conf.Config, s *service.Service) {
svc = s
// init inner router
engine := bm.DefaultServer(c.HTTPServer)
innerRouter(engine)
// init local server
if err := engine.Start(); err != nil {
log.Error("bm.DefaultServer error(%v)", err)
panic(err)
}
}
// innerRouter init inner router.
func innerRouter(e *bm.Engine) {
e.Ping(ping)
}
// ping check whether server is ok
func ping(c *bm.Context) {
if err := svc.Ping(c); err != nil {
log.Error("dm2-job service ping error(%v)", err)
c.AbortWithStatus(http.StatusServiceUnavailable)
}
}

View File

@@ -0,0 +1,73 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
load(
"@io_bazel_rules_go//proto:def.bzl",
"go_proto_library",
)
proto_library(
name = "model_proto",
srcs = ["dm.proto"],
tags = ["automanaged"],
deps = ["@gogo_special_proto//github.com/gogo/protobuf/gogoproto"],
)
go_proto_library(
name = "model_go_proto",
compilers = ["@io_bazel_rules_go//proto:gogofast_proto"],
importpath = "go-common/app/job/main/dm2/model",
proto = ":model_proto",
tags = ["manual"],
deps = [
"//library/time:go_default_library",
"@com_github_gogo_protobuf//gogoproto:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"action.go",
"bnj.go",
"dm.go",
"dm_seg.go",
"dm_special.go",
"rank_list.go",
"report.go",
"subject.go",
"subtitle.go",
"task.go",
"transfer.go",
"videoup.go",
],
embed = [":model_go_proto"],
importpath = "go-common/app/job/main/dm2/model",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/time:go_default_library",
"@com_github_gogo_protobuf//gogoproto:go_default_library",
"@com_github_gogo_protobuf//proto:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/job/main/dm2/model/oplog:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,34 @@
package model
import (
"encoding/json"
)
// action from DMAction-T
const (
ActFlushDM = "flush" // 刷新弹幕缓存
ActReportDel = "report_del" // 删除举报弹幕
ActAddDM = "dm_add" // 新增弹幕
ActFlushDMSeg = "dm_seg_flush" // 刷新分段弹幕缓存
)
// Action action message
type Action struct {
Action string `json:"action"`
Data json.RawMessage `json:"data"`
}
// Flush flush cache message
type Flush struct {
Type int32 `json:"type"`
Oid int64 `json:"oid"`
Force bool `json:"force"`
}
// FlushDMSeg flush segment dm cache
type FlushDMSeg struct {
Type int32 `json:"type"`
Oid int64 `json:"oid"`
Force bool `json:"force"`
Page *Page `json:"page"`
}

View File

@@ -0,0 +1,29 @@
package model
// Spam .
const (
SpamBlack = 52001
SpamOverflow = 52002
SpamRestrict = 52005
LiveDanmuMsgTypeNormal = 0
)
// LiveDanmu .
type LiveDanmu struct {
RoomID int64 `json:"room_id"`
UID int64 `json:"uid"`
UName string `json:"uname"`
UserLevel int32 `json:"user_level"`
Color int32 `json:"color"`
Content string `json:"content"`
Time int64 `json:"time"`
MsgType int32 `json:"msg_type"`
}
// BnjLiveConfig .
type BnjLiveConfig struct {
DanmuDtarTime string `json:"danmu_start_time"`
CommentID int64 `json:"comment_id"`
RoomID int64 `json:"room_id"`
}

View File

@@ -0,0 +1,172 @@
package model
import (
"encoding/json"
"fmt"
"hash/crc32"
"strconv"
)
var (
bAmp = []byte(`&amp;`)
bGt = []byte(`&gt;`)
bLt = []byte(`&lt;`)
bSp = []byte(` `)
// <d p="播放时间弹幕模式字体大小颜色发送时间弹幕池用户hash弹幕id">弹幕内容</d>
_xmlFmt = `<d p="%.5f,%d,%d,%d,%d,%d,%s,%d">%s</d>`
// <d p="播放时间弹幕模式字体大小颜色发送时间弹幕池用户hash弹幕id用户id">弹幕内容</d>
_rnameFmt = `<d p="%.5f,%d,%d,%d,%d,%d,%s,%d,%d">%s</d>`
)
// All const variable use in job
const (
AttrNo = int32(0) // 属性位为0
AttrYes = int32(1) // 属性位为1
AttrProtect = uint(0) // 保护弹幕
StateNormal = int32(0) // 普通状态
StateDelete = int32(1) // 删除状态
StateHide = int32(2) // 隐藏状态
StateBlock = int32(3) // 屏蔽状态
StateFilter = int32(4) // 过滤状态
StateMonitorBefore = int32(5) // 先审后发
StateMonitorAfter = int32(6) // 先发后审
StateSystemFilter = int32(7) // 敏感词过滤
StateReportDelete = int32(8) // 举报删除
StateAdminDelete = int32(9) // 弹幕管理删除
StateUserDelete = int32(10) // 用户删除
StateScriptDelete = int32(11) // 举报脚本删除
StateTaskDel = int32(12) //弹幕任务删除
// 弹幕模式
ModeRolling = int32(1)
ModeBottom = int32(4)
ModeTop = int32(5)
ModeReverse = int32(6)
ModeSpecial = int32(7)
ModeCode = int32(8)
ModeBAS = int32(9)
PoolNormal = int32(0) // 普通弹幕池
PoolSubtitle = int32(1) // 字幕弹幕池
PoolSpecial = int32(2) // 特殊弹幕池
MaskPriorityHgih = int32(1) // 弹幕蒙版优先级高
MaskPriorityLow = int32(0) // 弹幕蒙版优先级低
NotFound = int64(-1)
)
// BinlogMsg binlog msg produced by canal
type BinlogMsg struct {
Action string `json:"action"`
Table string `json:"table"`
New json.RawMessage `json:"new"`
Old json.RawMessage `json:"old"`
}
// AttrVal return val of index'attr
func (d *DM) AttrVal(bit uint) int32 {
return (d.Attr >> bit) & int32(1)
}
// AttrSet set val of index'attr
func (d *DM) AttrSet(v int32, bit uint) {
d.Attr = d.Attr&(^(1 << bit)) | (v << bit)
}
// DMSlice dm array
type DMSlice []*DM
func (d DMSlice) Len() int { return len(d) }
func (d DMSlice) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d DMSlice) Less(i, j int) bool { return d[i].ID < d[j].ID }
// ToXML convert dm struct to xml.
func (d *DM) ToXML(realname bool) (s string) {
if d.Content == nil {
return
}
msg := d.Content.Msg
if d.ContentSpe != nil {
msg = d.ContentSpe.Msg
}
if len(msg) == 0 {
return
}
if realname {
// <d e="用户id" p="播放时间弹幕模式字体大小颜色发送时间弹幕池用户hash弹幕id">弹幕内容</d>
s = fmt.Sprintf(_rnameFmt, float64(d.Progress)/1000.0, d.Content.Mode, d.Content.FontSize, d.Content.Color, d.Ctime, d.Pool, hash(d.Mid, uint32(d.Content.IP)), d.ID, d.Mid, xmlReplace([]byte(msg)))
} else {
// <d p="播放时间弹幕模式字体大小颜色发送时间弹幕池用户hash弹幕id">弹幕内容</d>
s = fmt.Sprintf(_xmlFmt, float64(d.Progress)/1000.0, d.Content.Mode, d.Content.FontSize, d.Content.Color, d.Ctime, d.Pool, hash(d.Mid, uint32(d.Content.IP)), d.ID, xmlReplace([]byte(msg)))
}
return
}
// xmlReplace replace special char in xml.
func xmlReplace(bi []byte) (bo []byte) {
for _, b := range bi {
if b == 0 {
continue
} else if b == '&' {
bo = append(bo, bAmp...)
continue
} else if b == '>' {
bo = append(bo, bGt...)
continue
} else if b == '<' {
bo = append(bo, bLt...)
continue
} else if (b >= 0x01 && b <= 0x08) || (b >= 0x0b && b <= 0x0c) || (b >= 0x0e && b <= 0x1f) || (b == 0x7f) {
bo = append(bo, bSp...)
} else {
bo = append(bo, b)
}
}
return
}
// hash return hash string.
func hash(mid int64, ip uint32) string {
var s uint32
if mid != 0 {
s = crc32.ChecksumIEEE([]byte(strconv.FormatInt(mid, 10)))
return strconv.FormatInt(int64(s), 16)
}
s = crc32.ChecksumIEEE([]byte(strconv.FormatInt(int64(ip), 10)))
return "D" + strconv.FormatInt(int64(s), 16)
}
// GetSpecialSeg .
func (d *DM) GetSpecialSeg() (msg string) {
if d.Content == nil || d.Pool != PoolSpecial {
return
}
msg = d.Content.Msg
if d.ContentSpe != nil {
msg = d.ContentSpe.Msg
}
return
}
// NeedDisplay 判断该条弹幕是否需要展示
func (d *DM) NeedDisplay() bool {
return d.State == StateNormal || d.State == StateMonitorAfter
}
// NeedUpdateSpecial .
func (d *DM) NeedUpdateSpecial(old *DM) bool {
if (d.Pool == PoolSpecial || old.Pool == PoolSpecial) && d.Pool != old.Pool {
return true
}
if d.Pool == PoolSpecial && d.NeedDisplay() && !old.NeedDisplay() {
return true
}
if d.Pool == PoolSpecial && old.NeedDisplay() && !d.NeedDisplay() {
return true
}
return false
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,54 @@
syntax = "proto3";
package model;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.goproto_enum_prefix_all) = false;
option (gogoproto.goproto_getters_all) = false;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
message DM {
int64 ID = 1 [(gogoproto.jsontag) = "id"];
int32 Type = 2 [(gogoproto.jsontag) = "type"];
int64 Oid = 3 [(gogoproto.jsontag) = "oid"];
int64 Mid = 4 [(gogoproto.jsontag) = "mid"];
int32 Progress = 5 [(gogoproto.jsontag) = "progress"];
int32 Pool = 6 [(gogoproto.jsontag) = "pool"];
int32 Attr = 7 [(gogoproto.jsontag) = "attr"];
int32 State = 8 [(gogoproto.jsontag) = "state"];
int64 Ctime = 9 [(gogoproto.jsontag) = "ctime", (gogoproto.casttype) = "stime"];
int64 Mtime = 10 [(gogoproto.jsontag) = "mtime", (gogoproto.casttype) = "stime"];
Content Content = 11 [(gogoproto.jsontag) = "content,omitempty"];
ContentSpecial ContentSpe = 12 [(gogoproto.jsontag) = "content_special,omitempty"];
}
message Content {
int64 ID = 1 [(gogoproto.jsontag) = "id"];
int32 FontSize = 2 [(gogoproto.jsontag) = "fontsize"];
int64 Color = 3 [(gogoproto.jsontag) = "color"];
int32 Mode = 4 [(gogoproto.jsontag) = "mode"];
int64 IP = 5 [(gogoproto.jsontag) = "ip"];
int32 Plat = 6 [(gogoproto.jsontag) = "plat"];
string Msg = 7 [(gogoproto.jsontag) = "msg"];
int64 Ctime = 8 [(gogoproto.jsontag) = "ctime", (gogoproto.casttype) = "go-common/library/time.Time"];
int64 Mtime = 9 [(gogoproto.jsontag) = "mtime", (gogoproto.casttype) = "go-common/library/time.Time"];
}
message ContentSpecial {
int64 ID = 1 [(gogoproto.jsontag) = "id"];
string Msg = 2 [(gogoproto.jsontag) = "msg"];
int64 Ctime = 3 [(gogoproto.jsontag) = "ctime", (gogoproto.casttype) = "go-common/library/time.Time"];
int64 Mtime = 4 [(gogoproto.jsontag) = "mtime", (gogoproto.casttype) = "go-common/library/time.Time"];
}
message Elem {
string Attribute = 1 [(gogoproto.jsontag) = "attribute,omitempty"];
string Content = 2 [(gogoproto.jsontag) = "content,omitempty"];
}
message DMSeg {
repeated Elem Elems = 1 [(gogoproto.jsontag) = "dms"];
repeated string SpecialURL = 2 [(gogoproto.jsontag) = "special_url,omitempty"];
}

View File

@@ -0,0 +1,138 @@
package model
import (
"fmt"
)
var (
// segmentLength 分段长度,根据视频时长做分段,单位:毫秒
segmentLength = int64(6 * 60 * 1000)
_defaultSeg = &Segment{Start: 0, End: DefaultVideoEnd, Cnt: 1, Num: 1, Duration: 0}
// <d p="弹幕ID,弹幕属性,播放时间,弹幕模式,字体大小,颜色,发送时间,弹幕池,用户hash id">弹幕内容</d>
_xmlSegFmt = `<d p="%d,%d,%d,%d,%d,%d,%d,%d,%s">%s</d>`
// DefaultPage default page info
DefaultPage = &Page{Num: 1, Size: DefaultVideoEnd, Total: 1}
_xmlSegHeader = `<?xml version="1.0" encoding="UTF-8"?><i><oid>%d</oid><ps>%d</ps><pe>%d</pe><pc>%d</pc><pn>%d</pn><state>%d</state><real_name>%d</real_name>`
)
// const variable
const (
// DefaultVideoEnd 当视频时长不存在或者为0时的默认视频结尾时间点
DefaultVideoEnd = 10 * 60 * 60 * 1000
// DefaultPageSize 默认分段长度
DefaultPageSize = 60 * 6 * 1000
)
// Page dm page info
type Page struct {
Num int64 `json:"num"`
Size int64 `json:"size"`
Total int64 `json:"total"`
}
// Segment dm segment struct
type Segment struct {
Start int64 `json:"ps"` // 分段起始时间
End int64 `json:"pe"` // 分段结束时间
Cnt int64 `json:"cnt"` // 总分段数
Num int64 `json:"num"` // 当前第几段
Duration int64 `json:"duration"` // 视频总时长
}
// ToXMLHeader convert segment to xml header format.
func (s *Segment) ToXMLHeader(oid int64, state, realname int32) string {
return fmt.Sprintf(_xmlSegHeader, oid, s.Start, s.End, s.Cnt, s.Num, state, realname)
}
// SegmentInfo get segment info by start time and video duration.
func SegmentInfo(ps, duration int64) (s *Segment) {
var cnt, num, pe int64
if duration == 0 {
s = _defaultSeg
return
}
cnt = duration / DefaultPageSize
if duration%DefaultPageSize > 0 {
cnt++
}
for i := int64(0); i < cnt; i++ {
if ps >= i*DefaultPageSize && ps < (i+1)*DefaultPageSize {
ps = i * DefaultPageSize
pe = (i + 1) * DefaultPageSize
num = i + 1
}
}
if pe > duration {
pe = duration
}
s = &Segment{
Start: ps,
End: pe,
Cnt: cnt,
Num: num,
Duration: duration,
}
return
}
// ToXMLSeg convert dm struct to xml.
func (d *DM) ToXMLSeg() (s string) {
if d.Content == nil {
return
}
msg := d.Content.Msg
if d.ContentSpe != nil {
msg = d.ContentSpe.Msg
}
if len(msg) == 0 {
return
}
if d.Pool == PoolSpecial {
msg = ""
}
s = fmt.Sprintf(_xmlSegFmt, d.ID, d.Attr, d.Progress, d.Content.Mode, d.Content.FontSize, d.Content.Color, d.Ctime, d.Pool, hash(d.Mid, uint32(d.Content.IP)), xmlReplace([]byte(msg)))
return
}
// ToElem convert dm struct to element.
func (d *DM) ToElem() (e *Elem) {
if d.Content == nil {
return
}
msg := d.Content.Msg
if d.ContentSpe != nil {
msg = d.ContentSpe.Msg
}
if len(msg) == 0 {
return
}
if d.Pool == PoolSpecial {
msg = ""
}
// "弹幕ID,弹幕属性,播放时间,弹幕模式,字体大小,颜色,发送时间,弹幕池,用户hash id
e = &Elem{
Attribute: fmt.Sprintf(`%d,%d,%d,%d,%d,%d,%d,%d,%s`, d.ID, d.Attr, d.Progress, d.Content.Mode, d.Content.FontSize, d.Content.Color, d.Ctime, d.Pool, hash(d.Mid, uint32(d.Content.IP))),
Content: msg,
}
return
}
// SegmentPoint 根据当前段数和视频总时长计算分段的起始时间点
func SegmentPoint(num, duration int64) (ps, pe int64) {
if duration == 0 {
ps = 0
pe = DefaultVideoEnd
return
}
pe = num * segmentLength
ps = pe - segmentLength
if pe > duration {
pe = duration
}
if ps < 0 {
ps = 0
}
return
}

View File

@@ -0,0 +1,47 @@
package model
import (
"fmt"
"regexp"
"strings"
)
var (
_regFmt = `.*/bfs/([\S]+)/%s.xml`
)
// DmSpecialContent .
type DmSpecialContent struct {
ID int64 `json:"id"`
Content string `json:"content"`
}
// DmSpecial special dm bfs location
type DmSpecial struct {
ID int64
Type int32
Oid int64
Locations string
}
// Split .
func (d *DmSpecial) Split() []string {
return strings.Split(d.Locations, ",")
}
// Join .
func (d *DmSpecial) Join(s []string) {
d.Locations = strings.Join(s, ",")
}
// Find find url if exist
func (d *DmSpecial) Find(sha1Sum string) string {
locations := d.Split()
reg := regexp.MustCompile(fmt.Sprintf(_regFmt, sha1Sum))
for _, location := range locations {
if reg.MatchString(location) {
return location
}
}
return ""
}

View File

@@ -0,0 +1,29 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["oplog.go"],
importpath = "go-common/app/job/main/dm2/model/oplog",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//library/log:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,79 @@
package oplog
import (
"go-common/library/log"
)
// Infoc operation log for administrator
type Infoc struct {
Oid int64 `json:"oid"`
Type int `json:"type"`
DMIds []int64 `json:"dmids"`
Subject string `json:"subject"`
OriginVal string `json:"origin_val"`
CurrentVal string `json:"current_val"`
OperationTime string `json:"optime"`
OperatorType OperatorType `json:"operator_type"`
Operator int64 `json:"operator"`
Source Source `json:"source"`
Remark string `json:"remark"`
}
// Source enum integer value
type Source int
// Source enum definition list
const (
_ Source = iota
SourceManager
SourceUp
SourcePlayer
)
// String returns the Source enmu description
func (source Source) String() string {
var text string
switch source {
case SourceManager:
text = "运营后台"
case SourceUp:
text = "创作中心"
case SourcePlayer:
text = "播放器"
default:
log.Warn("String() Unknow Source, warn(%+v)", int(source))
text = "未知来源"
}
return text
}
// OperatorType enum integer value
type OperatorType int
// OperatorType enum definition list
const (
_ OperatorType = iota
OperatorAdmin
OperatorMember
OperatorUp
OperatorSystem
)
// String returns the Source enmu description
func (opType OperatorType) String() string {
var text string
switch opType {
case OperatorAdmin:
text = "管理员"
case OperatorMember:
text = "用户"
case OperatorUp:
text = "UP主"
case OperatorSystem:
text = "系统"
default:
log.Warn("String() Unknow Source, warn(%+v)", int(opType))
text = "未知来源"
}
return text
}

View File

@@ -0,0 +1,15 @@
package model
// RankRecentRegion 分区排行榜
type RankRecentRegion struct {
Aid int64 `json:"aid"`
Mid int64 `json:"mid"`
Others []*RankRecentRegion `json:"others"`
}
// RankRecentResp .
type RankRecentResp struct {
Code int32 `json:"code"`
List []*RankRecentRegion `json:"list"`
Num int32 `json:"num"`
}

View File

@@ -0,0 +1,8 @@
package model
// ReportAction report message
type ReportAction struct {
Cid int64 `json:"cid"` // 视频id
Did int64 `json:"dmid"` // 弹幕id
HideTime int64 `json:"hide_time"` // 弹幕隐截止j时间
}

View File

@@ -0,0 +1,94 @@
package model
import (
"database/sql/driver"
"strconv"
"time"
)
// All const variable used in job
const (
SubTypeVideo = int32(1) // 主题类型
SubStateOpen = int32(0) // 主题打开
SubStateClosed = int32(1) // 主题关闭
AttrSubGuest = uint(0) // 允许游客弹幕
AttrSubSpolier = uint(1) // 允许剧透弹幕
AttrSubMission = uint(2) // 允许活动弹幕
AttrSubAdvance = uint(3) // 允许高级弹幕
AttrSubMonitorBefore = uint(4) // 弹幕先审后发
AttrSubMonitorAfter = uint(5) // 弹幕先发后审
AttrSubMaskOpen = uint(6) // 开启蒙版
AttrSubMaskReady = uint(7) // 蒙版生产完成
MaskPlatWeb int8 = 0 // web端
MaskPlatMbl int8 = 1 // 移动端
MaskPlatAll int8 = 100 //全部端
)
// Subject dm_subject.
type Subject struct {
ID int64 `json:"id"`
Type int32 `json:"type"`
Oid int64 `json:"oid"`
Pid int64 `json:"pid"`
Mid int64 `json:"mid"`
State int32 `json:"state"`
Attr int32 `json:"attr"`
ACount int64 `json:"acount"`
Count int64 `json:"count"`
MCount int64 `json:"mcount"`
MoveCnt int64 `json:"move_count"`
Maxlimit int64 `json:"maxlimit"`
Childpool int32 `json:"childpool"`
Ctime stime `json:"ctime"`
Mtime stime `json:"mtime"`
}
// ConvertStime .
func ConvertStime(t time.Time) stime {
return stime(t.Unix())
}
type stime int64
// Scan scan time.
func (st *stime) Scan(src interface{}) (err error) {
switch sc := src.(type) {
case time.Time:
*st = stime(sc.Unix())
case string:
var i int64
i, err = strconv.ParseInt(sc, 10, 64)
*st = stime(i)
}
return
}
// Value get time value.
func (st stime) Value() (driver.Value, error) {
return time.Unix(int64(st), 0), nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (st *stime) UnmarshalJSON(data []byte) error {
timestamp, err := strconv.ParseInt(string(data), 10, 64)
if err == nil {
*st = stime(timestamp)
return nil
}
t, err := time.ParseInLocation(`"2006-01-02 15:04:05"`, string(data), time.Local)
*st = stime(t.Unix())
return err
}
// AttrVal return val of subject'attr
func (s *Subject) AttrVal(bit uint) int32 {
return (s.Attr >> bit) & int32(1)
}
// AttrSet set val of subject'attr
func (s *Subject) AttrSet(v int32, bit uint) {
s.Attr = s.Attr&(^(1 << bit)) | (v << bit)
}

View File

@@ -0,0 +1,64 @@
package model
// SubtitleStatus .
type SubtitleStatus uint8
// SubtitleStatus
const (
SubtitleStatusUnknown SubtitleStatus = iota
SubtitleStatusDraft
SubtitleStatusToAudit
SubtitleStatusAuditBack
SubtitleStatusRemove
SubtitleStatusPublish
SubtitleStatusCheckToAudit
SubtitleStatusCheckPublish
)
// Subtitle .
type Subtitle struct {
ID int64 `json:"id"`
Oid int64 `json:"oid"`
Type int32 `json:"type"`
Lan uint8 `json:"lan"`
Aid int64 `json:"aid"`
Mid int64 `json:"mid"`
UpMid int64 `json:"up_mid"`
Status SubtitleStatus `json:"status"`
SubtitleURL string `json:"subtitle_url"`
PubTime int64 `json:"pub_time"`
RejectComment string `json:"reject_comment"`
}
// SubtitlePub .
type SubtitlePub struct {
Oid int64
Type int32
Lan uint8
SubtitleID int64
IsDelete bool
}
// SubtitleItem .
type SubtitleItem struct {
From float64 `json:"from"`
To float64 `json:"to"`
Location int8 `json:"location"`
Content string `json:"content"`
}
// SubtitleBody .
type SubtitleBody struct {
FontSize float64 `json:"font_size,omitempty"`
FontColor string `json:"font_color,omitempty"`
BackgroundAlpha float64 `json:"background_alpha,omitempty"`
BackgroundColor string `json:"background_color,omitempty"`
Stroke string `json:"Stroke,omitempty"`
Bodys []*SubtitleItem `json:"body"`
}
// SubtitleAuditMsg .
type SubtitleAuditMsg struct {
SubtitleID int64 `json:"subtitle_id"`
Oid int64 `json:"oid"`
}

View File

@@ -0,0 +1,47 @@
package model
import "time"
// const var
const (
TaskStateSearch = int32(2) //查询执行中
TaskStateDelDM = int32(3) //删除执行中
TaskStateFail = int32(4) //执行失败
TaskStatePause = int32(5) //任务中断
TaskStateSuc = int32(6) //执行成功
TaskStateDel = int32(8) //任务被删除
TaskStateWait = int32(9) //等待执行删除
// 数据平台返回的弹幕任务查询状态
TaskSearchSuc = int32(1) // 查询完成
TaskSearchFail = int32(2) // 查询失败
// 企业微信通知
TaskNoticeTitle = "弹幕任务删除过多告警"
TaskNoticeContent = "弹幕任务(id:%d, title:%s)已删除%d条弹幕已经被暂停请前往管理后台查看"
)
// TaskInfo .
type TaskInfo struct {
ID int64
Topic string
State int32
Count int64
Result string
Sub int32
LastIndex int32
Priority int64
Title string
Creator string
Reviewer string
}
// SubTask .
type SubTask struct {
ID int64
Operation int32
Rate int32
Tcount int64 //删除总数
Start time.Time
End time.Time
}

View File

@@ -0,0 +1,26 @@
package model
import (
"time"
)
// dm transfer state
const (
StatInit = 0
StatFinished = 1
StatFailed = 2
StatTransfing = 3
)
// Transfer dm transfer task
type Transfer struct {
ID int64
FromCid int64
ToCid int64
Mid int64
Offset float64
State int8
Dmid int64
Ctime time.Time
Mtime time.Time
}

View File

@@ -0,0 +1,40 @@
package model
const (
// RouteSecondRound 稿件二审消息
RouteSecondRound = "second_round"
// RouteAutoOpen = 稿件自动开放浏览
RouteAutoOpen = "auto_open"
// RouteForceSync 稿件强制同步
RouteForceSync = "force_sync"
// RouteDelayOpen 稿件定时开放浏览
RouteDelayOpen = "delay_open"
// VideoStatusOpen 视频开放浏览
VideoStatusOpen = int32(0)
//VideoXcodeHDFinish 高清转码完成
VideoXcodeHDFinish = int32(4)
//VideoXcodeFinish 视频转码
VideoXcodeFinish = int32(2)
)
// VideoupMsg second round msg from VideoupBvc.
type VideoupMsg struct {
Route string `json:"route"`
Aid int64 `json:"aid"`
}
// Archive archive info.
type Archive struct {
Aid int64 `json:"aid"`
Mid int64 `json:"mid"`
}
// Video video info.
type Video struct {
Aid int64 `json:"aid"`
Cid int64 `json:"cid"`
Mid int64 `json:"mid"`
Duration int64 `json:"duration"`
Status int32 `json:"status"`
XCodeState int32 `json:"xcode_state"`
}

View File

@@ -0,0 +1,90 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"action_test.go",
"archive_test.go",
"dm_seg_test.go",
"dm_sepcial_test.go",
"dm_test.go",
"mask_test.go",
"service_test.go",
"subject_test.go",
"subtitle_test.go",
"task_test.go",
"track_test.go",
"transfer_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/job/main/dm2/conf:go_default_library",
"//app/job/main/dm2/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"action.go",
"archive.go",
"bnj.go",
"dm.go",
"dm_seg.go",
"dm_special.go",
"mask.go",
"service.go",
"subject.go",
"subtitle.go",
"task.go",
"track.go",
"transfer.go",
],
importpath = "go-common/app/job/main/dm2/service",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/job/main/dm2/conf:go_default_library",
"//app/job/main/dm2/dao:go_default_library",
"//app/job/main/dm2/model:go_default_library",
"//app/job/main/dm2/model/oplog:go_default_library",
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/api/gorpc:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//app/service/main/filter/api/grpc/v1:go_default_library",
"//app/service/main/seq-server/model:go_default_library",
"//app/service/main/seq-server/rpc/client:go_default_library",
"//library/database/sql:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/log/infoc:go_default_library",
"//library/net/metadata:go_default_library",
"//library/queue/databus:go_default_library",
"//library/sync/pipeline/fanout:go_default_library",
"//library/time:go_default_library",
"//library/xstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,210 @@
package service
import (
"context"
"encoding/json"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
)
func (s *Service) actionAct(c context.Context, act *model.Action) (err error) {
switch act.Action {
case model.ActFlushDM:
fc := new(model.Flush)
if err = json.Unmarshal(act.Data, &fc); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", act.Data, err)
return
}
s.asyncAddFlushDM(c, fc)
case model.ActFlushDMSeg:
fc := new(model.FlushDMSeg)
if err = json.Unmarshal(act.Data, &fc); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", act.Data, err)
return
}
if fc.Page == nil {
log.Error("s.ActFlushDMSeg(+%v) error page nil", fc)
return
}
// async flush cache
s.asyncAddFlushDMSeg(c, fc)
case model.ActAddDM:
var (
dm = &model.DM{}
sub *model.Subject
)
if err = json.Unmarshal(act.Data, &dm); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", act.Data, err)
return
}
if sub, err = s.subject(c, dm.Type, dm.Oid); err != nil {
return
}
if err = s.actionAddDM(c, sub, dm); err != nil {
log.Error("s.actionAddDM(+%v) error(%v)", dm, err)
return
}
if dm.State == model.StateNormal || dm.State == model.StateMonitorAfter {
// 1. 创作中心最新1000条弹幕
s.asyncAddRecent(c, dm)
// 2. 刷新全段弹幕,NOTE 忽略redis缓存报错
if ok, _ := s.dao.ExpireDMCache(c, dm.Type, dm.Oid); ok {
s.dao.AddDMCache(c, dm)
}
s.asyncAddFlushDM(c, &model.Flush{
Type: dm.Type,
Oid: dm.Oid,
Force: false,
})
// 3. 刷新分段弹幕缓存,NOTE 忽略redis缓存报错
var p *model.Page
if p, err = s.pageinfo(c, sub.Pid, dm); err != nil {
return
}
switch dm.Pool {
case model.PoolNormal:
if ok, _ := s.dao.ExpireDMID(c, dm.Type, dm.Oid, p.Total, p.Num); ok {
s.dao.AddDMIDCache(c, dm.Type, dm.Oid, p.Total, p.Num, dm.ID)
}
case model.PoolSubtitle:
if ok, _ := s.dao.ExpireDMIDSubtitle(c, dm.Type, dm.Oid); ok {
s.dao.AddDMIDSubtitleCache(c, dm.Type, dm.Oid, dm)
}
case model.PoolSpecial:
if err = s.specialLocationUpdate(c, dm.Type, dm.Oid); err != nil {
return
}
// TODO add cache
default:
return
}
s.dao.AddIdxContentCaches(c, dm.Type, dm.Oid, dm)
s.asyncAddFlushDMSeg(c, &model.FlushDMSeg{
Type: dm.Type,
Oid: dm.Oid,
Force: false,
Page: p,
})
}
s.bnjDmCount(c, sub, dm)
}
return
}
func (s *Service) actionFlushDM(c context.Context, tp int32, oid int64, force bool) (err error) {
sub, err := s.subject(c, tp, oid)
if err != nil {
return
}
if force {
s.dao.DelDMCache(c, tp, oid) // delete redis cache,ignore error
}
xml, err := s.genXML(c, sub) // generate xml from redis or database
if err != nil {
log.Error("s.genXML(%d) error(%v)", oid, err)
return
}
data, err := s.gzflate(xml, 4)
if err != nil {
log.Error("s.gzflate(type:%d,oid:%d) error(%v)", tp, oid, err)
return
}
if err = s.dao.AddXMLCache(c, sub.Oid, data); err != nil {
return
}
log.Info("actionFlushDM type:%d,oid:%d fore:%v", tp, oid, force)
return
}
// actionAddDM add dm index and content to db by transaction.
func (s *Service) actionAddDM(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
tx, err := s.dao.BeginTran(c)
if err != nil {
return
}
// special dm
if dm.Pool == model.PoolSpecial && dm.ContentSpe != nil {
if _, err = s.dao.TxAddContentSpecial(tx, dm.ContentSpe); err != nil {
return tx.Rollback()
}
}
if _, err = s.dao.TxAddContent(tx, dm.Oid, dm.Content); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxAddIndex(tx, dm); err != nil {
return tx.Rollback()
}
if dm.State == model.StateMonitorBefore || dm.State == model.StateMonitorAfter {
if _, err = s.dao.TxIncrSubMCount(tx, dm.Type, dm.Oid); err != nil {
return tx.Rollback()
}
}
var count int64
if dm.State == model.StateNormal || dm.State == model.StateMonitorAfter || dm.State == model.StateHide {
count = 1
if sub.Childpool == model.PoolNormal && dm.Pool != model.PoolNormal {
sub.Childpool = 1
}
}
if _, err = s.dao.TxIncrSubjectCount(tx, sub.Type, sub.Oid, 1, count, sub.Childpool); err != nil {
return tx.Rollback()
}
return tx.Commit()
}
// actionFlushXMLDmSeg flush xml dm seg
func (s *Service) actionFlushXMLDmSeg(c context.Context, tp int32, oid int64, p *model.Page, force bool) (err error) {
var (
sub *model.Subject
duration int64
seg *model.Segment
)
if sub, err = s.subject(c, tp, oid); err != nil {
return
}
if force {
if err = s.dao.DelDMIDCache(c, tp, oid, p.Total, p.Num); err != nil {
return
}
if sub.Childpool > 0 {
s.dao.DelDMIDSubtitleCache(c, tp, oid)
}
}
if duration, err = s.videoDuration(c, sub.Pid, sub.Oid); err != nil {
return
}
ps, _ := model.SegmentPoint(p.Num, duration)
if seg, err = s.segmentInfo(c, sub.Pid, sub.Oid, ps, duration); err != nil {
return
}
res, err := s.dmSegXML(c, sub, seg)
if err != nil {
return
}
if err = s.dao.SetXMLSegCache(c, tp, oid, seg.Cnt, seg.Num, res); err != nil {
return
}
log.Info("actionFlushXMLDmSeg type:%d,oid:%d,seg:%+v", tp, oid, seg)
return
}
func (s *Service) flushDmSegCache(c context.Context, fc *model.FlushDMSeg) (err error) {
if fc.Page == nil {
return
}
if err = s.actionFlushXMLDmSeg(c, fc.Type, fc.Oid, fc.Page, fc.Force); err != nil {
return
}
return
}
func (s *Service) flushDmCache(c context.Context, fc *model.Flush) (err error) {
if err = s.actionFlushDM(c, fc.Type, fc.Oid, fc.Force); err != nil {
return
}
if err = s.dao.DelAjaxDMCache(c, fc.Oid); err != nil {
return
}
return
}

View File

@@ -0,0 +1,45 @@
package service
import (
"context"
"encoding/json"
"testing"
"time"
"go-common/app/job/main/dm2/model"
. "github.com/smartystreets/goconvey/convey"
)
func TestActionAddDM(t *testing.T) {
id := int64(time.Now().UnixNano())
dm := &model.DM{
ID: id,
Type: 1,
Oid: 1221,
Mid: 4780461,
Progress: 111,
State: 0,
Pool: 0,
Ctime: 1533804859,
Content: &model.Content{
ID: id,
Mode: 4,
IP: 123,
FontSize: 25,
Color: 12345,
Msg: "testtddddddddddddd",
Ctime: 1533804859,
},
}
Convey("", t, func() {
data, err := json.Marshal(dm)
So(err, ShouldBeNil)
act := &model.Action{
Action: model.ActAddDM,
Data: data,
}
err = svr.actionAct(context.TODO(), act)
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,42 @@
package service
import (
"context"
"go-common/app/job/main/dm2/model"
"go-common/app/service/main/archive/model/archive"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/net/metadata"
)
// videoDuration return video duration cid.
func (s *Service) videoDuration(c context.Context, aid, cid int64) (duration int64, err error) {
var cache = true
if duration, err = s.dao.DurationCache(c, cid); err != nil {
log.Error("dao.Duration(cid:%d) error(%v)", cid, err)
err = nil
cache = false
} else if duration != model.NotFound {
return
}
arg := &archive.ArgVideo2{Aid: aid, Cid: cid, RealIP: metadata.String(c, metadata.RemoteIP)}
page, err := s.arcRPC.Video3(c, arg)
if err != nil {
if ecode.Cause(err).Code() == ecode.NothingFound.Code() {
duration = 0
err = nil
log.Warn("acvSvc.Video3(%v) error(duration not exist)", arg)
} else {
log.Error("acvSvc.Video3(%v) error(%v)", arg, err)
}
} else {
duration = page.Duration * 1000
}
if cache {
s.cache.Do(c, func(ctx context.Context) {
s.dao.SetDurationCache(ctx, cid, duration)
})
}
return
}

View File

@@ -0,0 +1,20 @@
package service
import (
"context"
"testing"
)
func TestVideoDuration(t *testing.T) {
var (
aid int64 = 10097265
oid int64 = 1508
c = context.TODO()
)
d, err := svr.videoDuration(c, aid, oid)
if err != nil {
t.Errorf("s.videoDuration(%d %d) error(%v)", aid, oid, err)
t.FailNow()
}
t.Logf("oid:%d duration:%d", oid, d)
}

View File

@@ -0,0 +1,419 @@
package service
import (
"context"
"encoding/json"
"math/rand"
"regexp"
"strings"
"time"
"go-common/app/job/main/dm2/model"
"go-common/app/service/main/archive/api"
arcMdl "go-common/app/service/main/archive/model/archive"
filterMdl "go-common/app/service/main/filter/api/grpc/v1"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/net/metadata"
"go-common/library/queue/databus"
xtime "go-common/library/time"
)
var (
msgRegex = regexp.MustCompile(`^(\s|\xE3\x80\x80)*$`) // 全文仅空格
_bnjDmMsgLen = 100
_dateFormat = "2006-01-02 15:04:05"
)
func init() {
rand.Seed(time.Now().Unix())
}
func (s *Service) initBnj() {
var err error
if s.conf.BNJ.Aid <= 0 {
return
}
s.bnjAid = s.conf.BNJ.Aid
//bnj count
if s.conf.BNJ.BnjCounter != nil {
bnjSubAids := make(map[int64]struct{})
for _, aid := range s.conf.BNJ.BnjCounter.SubAids {
bnjSubAids[aid] = struct{}{}
}
s.bnjSubAids = bnjSubAids
}
// bnj danmu
s.bnjVideos(context.TODO())
s.bnjLiveConfig(context.TODO())
go func() {
ticker := time.NewTicker(time.Second * 30)
for range ticker.C {
s.bnjVideos(context.TODO())
s.bnjLiveConfig(context.TODO())
}
}()
s.bnjIgnoreRate = s.conf.BNJ.BnjLiveDanmu.IgnoreRate
s.bnjIgnoreBeginTime = time.Duration(s.conf.BNJ.BnjLiveDanmu.IgnoreBegin)
s.bnjIgnoreEndTime = time.Duration(s.conf.BNJ.BnjLiveDanmu.IgnoreEnd)
s.bnjliveRoomID = s.conf.BNJ.BnjLiveDanmu.RoomID
s.bnjUserLevel = s.conf.BNJ.BnjLiveDanmu.Level
if s.bnjStart, err = time.ParseInLocation(_dateFormat, s.conf.BNJ.BnjLiveDanmu.Start, time.Now().Location()); err != nil {
panic(err)
}
s.bnjCsmr = databus.New(s.conf.Databus.BnjCsmr)
log.Info("bnj init start:%v room_id:%v", s.bnjStart.String(), s.conf.BNJ.BnjLiveDanmu.RoomID)
go s.bnjProc()
}
func (s *Service) bnjProc() {
var (
err error
c = context.Background()
)
for {
msg, ok := <-s.bnjCsmr.Messages()
if !ok {
log.Error("bnj bnjProc consumer exit")
return
}
log.Info("bnj partition:%d,offset:%d,key:%s,value:%s", msg.Partition, msg.Offset, msg.Key, msg.Value)
m := &model.LiveDanmu{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", string(msg.Value), err)
continue
}
if err = s.bnjLiveDanmu(c, m); err != nil {
log.Error("bnj bnjLiveDanmu(msg:%+v),error(%v)", m, err)
continue
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) bnjVideos(c context.Context) (err error) {
var (
videos []*model.Video
)
if videos, err = s.dao.Videos(c, s.bnjAid); err != nil {
log.Error("bnj bnjVideos(aid:%v) error(%v)", s.bnjAid, err)
return
}
if len(videos) >= 4 {
videos = videos[:4]
}
for _, video := range videos {
if err = s.syncBnjVideo(c, model.SubTypeVideo, video); err != nil {
log.Error("bnj syncBnjVideo(video:%+v) error(%v)", video, err)
return
}
}
s.bnjArcVideos = videos
return
}
func (s *Service) syncBnjVideo(c context.Context, tp int32, v *model.Video) (err error) {
sub, err := s.dao.Subject(c, tp, v.Cid)
if err != nil {
return
}
if sub == nil {
if v.XCodeState >= model.VideoXcodeHDFinish {
if _, err = s.dao.AddSubject(c, tp, v.Cid, v.Aid, v.Mid, s.maxlimit(v.Duration), 0); err != nil {
return
}
}
} else {
if sub.Mid != v.Mid {
if _, err = s.dao.UpdateSubMid(c, tp, v.Cid, v.Mid); err != nil {
return
}
}
}
return
}
// bnjDmCount laji bnj count
func (s *Service) bnjDmCount(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
var (
dmid int64
pages []*api.Page
chosen *api.Page
choseSub *model.Subject
)
if _, ok := s.bnjSubAids[sub.Pid]; !ok {
return
}
if pages, err = s.arcRPC.Page3(c, &arcMdl.ArgAid2{
Aid: s.bnjAid,
RealIP: metadata.String(c, metadata.RemoteIP),
}); err != nil {
log.Error("bnjDmCount Page3(aid:%v) error(%v)", sub.Pid, err)
return
}
if len(pages) <= 0 {
return
}
idx := time.Now().Unix() % int64(len(pages))
if chosen = pages[idx]; chosen == nil {
return
}
if choseSub, err = s.subject(c, model.SubTypeVideo, chosen.Cid); err != nil {
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnjDmCount genDMID() error(%v)", err)
return
}
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: chosen.Cid,
Mid: dm.Mid,
Progress: int32((chosen.Duration + 1) * 1000),
Pool: dm.Pool,
State: model.StateAdminDelete,
Ctime: dm.Ctime,
Mtime: dm.Mtime,
Content: &model.Content{
ID: dmid,
FontSize: dm.Content.FontSize,
Color: dm.Content.Color,
Mode: dm.Content.Mode,
IP: dm.Content.IP,
Plat: dm.Content.Plat,
Msg: dm.Content.Msg,
Ctime: dm.Content.Ctime,
Mtime: dm.Content.Mtime,
},
}
if dm.Pool == model.PoolSpecial {
forkDM.ContentSpe = &model.ContentSpecial{
ID: dmid,
Msg: dm.ContentSpe.Msg,
Ctime: dm.ContentSpe.Ctime,
Mtime: dm.ContentSpe.Mtime,
}
}
if err = s.bnjAddDM(c, choseSub, forkDM); err != nil {
return
}
return
}
// bnjAddDM add dm index and content to db by transaction.
func (s *Service) bnjAddDM(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
if dm.State != model.StateAdminDelete {
return
}
tx, err := s.dao.BeginTran(c)
if err != nil {
return
}
// special dm
if dm.Pool == model.PoolSpecial && dm.ContentSpe != nil {
if _, err = s.dao.TxAddContentSpecial(tx, dm.ContentSpe); err != nil {
return tx.Rollback()
}
}
if _, err = s.dao.TxAddContent(tx, dm.Oid, dm.Content); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxAddIndex(tx, dm); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxIncrSubjectCount(tx, sub.Type, sub.Oid, 1, 0, sub.Childpool); err != nil {
return tx.Rollback()
}
return tx.Commit()
}
func (s *Service) genDMID(c context.Context) (dmid int64, err error) {
if dmid, err = s.seqRPC.ID(c, s.seqArg); err != nil {
log.Error("seqRPC.ID() error(%v)", err)
return
}
return
}
// bnjLiveDanmu laji live to video
// TODO stime
func (s *Service) bnjLiveDanmu(c context.Context, liveDanmu *model.LiveDanmu) (err error) {
var (
cid, dmid int64
progress float64
)
// ignore time before
if time.Since(s.bnjStart) < 0 {
return
}
// limit
if liveDanmu == nil || s.bnjliveRoomID <= 0 || s.bnjliveRoomID != liveDanmu.RoomID || liveDanmu.MsgType != model.LiveDanmuMsgTypeNormal {
return
}
if liveDanmu.UserLevel < s.bnjUserLevel {
return
}
if s.bnjIgnoreRate <= 0 || rand.Int63n(s.bnjIgnoreRate) != 0 {
return
}
if cid, progress, err = s.pickBnjVideo(c, liveDanmu.Time); err != nil {
return
}
// ignore illegal progress
if progress <= 0 {
return
}
if err = s.checkBnjDmMsg(c, liveDanmu.Content); err != nil {
log.Error("bnj bnjLiveDanmu checkBnjDmMsg(liveDanmu:%+v) error(%v)", liveDanmu, err)
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnj bnjLiveDanmu genDMID() error(%v)", err)
return
}
now := time.Now().Unix()
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: cid,
Mid: liveDanmu.UID,
Progress: int32(progress * 1000),
Pool: model.PoolNormal,
State: model.StateMonitorAfter,
Ctime: model.ConvertStime(time.Now()),
Mtime: model.ConvertStime(time.Now()),
Content: &model.Content{
ID: dmid,
FontSize: 25,
Color: 16777215,
Mode: model.ModeRolling,
Plat: 0,
Msg: liveDanmu.Content,
Ctime: xtime.Time(now),
Mtime: xtime.Time(now),
},
}
if err = s.bnjCheckFilterService(c, forkDM); err != nil {
log.Error("s.bnjCheckFilterService(%+v) error(%v)", forkDM, err)
return
}
var (
bs []byte
)
if bs, err = json.Marshal(forkDM); err != nil {
log.Error("json.Marshal(%+v) error(%v)", forkDM, err)
return
}
act := &model.Action{
Action: model.ActAddDM,
Data: bs,
}
if err = s.actionAct(c, act); err != nil {
log.Error("s.actionAddDM(%+v) error(%v)", liveDanmu, err)
return
}
return
}
func (s *Service) pickBnjVideo(c context.Context, timestamp int64) (cid int64, progress float64, err error) {
var (
idx int
video *model.Video
)
progress = float64(timestamp - s.bnjStart.Unix())
for idx, video = range s.bnjArcVideos {
if progress > float64(video.Duration) {
progress = progress - float64(video.Duration)
continue
}
// ignore p1 start
if idx != 0 && progress < s.bnjIgnoreBeginTime.Seconds() {
err = ecode.DMProgressTooBig
return
}
if float64(video.Duration)-progress < s.bnjIgnoreEndTime.Seconds() {
err = ecode.DMProgressTooBig
return
}
if progress >= 0 {
progress = progress + float64(rand.Int31n(1000)/1000)
}
cid = video.Cid
return
}
err = ecode.DMProgressTooBig
return
}
func (s *Service) bnjCheckFilterService(c context.Context, dm *model.DM) (err error) {
var (
filterReply *filterMdl.FilterReply
)
if filterReply, err = s.filterRPC.Filter(c, &filterMdl.FilterReq{
Area: "danmu",
Message: dm.Content.Msg,
Id: dm.ID,
Oid: dm.Oid,
Mid: dm.Mid,
}); err != nil {
log.Error("checkFilterService(dm:%+v),err(%v)", dm, err)
return
}
if filterReply.Level > 0 || filterReply.Limit == model.SpamBlack || filterReply.Limit == model.SpamOverflow {
dm.State = model.StateFilter
log.Info("bnj filter service delete(dmid:%d,data:+%v)", dm.ID, filterReply)
}
return
}
func (s *Service) checkBnjDmMsg(c context.Context, msg string) (err error) {
var (
msgLen = len([]rune(msg))
)
if msgRegex.MatchString(msg) { // 空白弹幕
err = ecode.DMMsgIlleagel
return
}
if msgLen > _bnjDmMsgLen {
err = ecode.DMMsgTooLong
return
}
if strings.Contains(msg, `\n`) || strings.Contains(msg, `/n`) {
err = ecode.DMMsgIlleagel
return
}
return
}
func (s *Service) bnjLiveConfig(c context.Context) (err error) {
var (
bnjConfig *model.BnjLiveConfig
start time.Time
)
if bnjConfig, err = s.dao.BnjConfig(c); err != nil {
log.Error("bnjLiveConfig error current:%v err:%+v", time.Now().String(), err)
return
}
if bnjConfig == nil {
log.Error("bnjLiveConfig error current:%v bnjConfig nil", time.Now().String())
return
}
if start, err = time.ParseInLocation(_dateFormat, bnjConfig.DanmuDtarTime, time.Now().Location()); err != nil {
log.Error("bnjLiveConfig start time error current:%v config:%+v", time.Now().String(), bnjConfig)
return
}
if bnjConfig.CommentID <= 0 || bnjConfig.RoomID <= 0 {
log.Info("bnjLiveConfig illegal current:%v config:%+v", time.Now().String(), bnjConfig)
return
}
s.bnjAid = bnjConfig.CommentID
s.bnjliveRoomID = bnjConfig.RoomID
s.bnjStart = start
log.Info("bnjLiveConfig ok current:%v config:%+v", time.Now().String(), bnjConfig)
return
}

View File

@@ -0,0 +1,271 @@
package service
import (
"bytes"
"compress/flate"
"context"
"fmt"
"math"
"sort"
"go-common/app/job/main/dm2/model"
arcMdl "go-common/app/service/main/archive/model/archive"
"go-common/library/log"
)
// Gzflate flate 压缩
func (s *Service) gzflate(in []byte, level int) (out []byte, err error) {
if len(in) == 0 {
return
}
buf := new(bytes.Buffer)
w, err := flate.NewWriter(buf, level)
if err != nil {
return
}
if _, err = w.Write(in); err != nil {
return
}
if err = w.Close(); err != nil {
return
}
out = buf.Bytes()
return
}
func (s *Service) dmsCache(c context.Context, tp int32, oid, maxlimit int64) (dms []*model.DM, err error) {
ok, err := s.dao.ExpireDMCache(c, tp, oid)
if err != nil || !ok {
return
}
values, err := s.dao.DMCache(c, tp, oid)
if err != nil || len(values) == 0 {
return
}
var (
start, trimCnt int
normal, protect, special []*model.DM
)
for _, value := range values {
dm := &model.DM{}
if err = dm.Unmarshal(value); err != nil {
log.Error("proto.Unmarshal(%s) error(%v)", value, err)
return
}
if dm.Pool == model.PoolNormal {
if dm.AttrVal(model.AttrProtect) == model.AttrYes {
protect = append(protect, dm)
} else {
normal = append(normal, dm)
}
} else {
special = append(special, dm)
}
}
// 保护弹幕
if start = len(protect) - int(maxlimit); start > 0 { // 只保留maxlimit条保护弹幕
trimCnt += start
protect = protect[start:]
}
dms = append(dms, protect...)
// 普通弹幕
if start = len(normal) + len(protect) - int(maxlimit); start > 0 { // 保护弹幕+普通弹幕=maxlimit
trimCnt += start
normal = normal[start:]
}
dms = append(dms, normal...)
// 追加字幕弹幕和特殊弹幕
dms = append(dms, special...)
if trimCnt > 0 {
err = s.dao.TrimDMCache(c, tp, oid, int64(trimCnt))
}
return
}
// 返回所有每个弹幕池对应的弹幕列表
func (s *Service) dms(c context.Context, tp int32, oid, maxlimit int64, childpool int32) (dms []*model.DM, err error) {
var (
count int
keyprotect = "kp"
dmMap = make(map[string][]*model.DM)
contentSpeMap = make(map[int64]*model.ContentSpecial)
)
idxMap, dmids, spedmids, err := s.dao.Indexs(c, tp, oid)
if err != nil {
return
}
if len(dmids) == 0 {
return
}
ctsMap, err := s.dao.Contents(c, oid, dmids)
if err != nil {
return
}
if len(spedmids) > 0 {
if contentSpeMap, err = s.dao.ContentsSpecial(c, spedmids); err != nil {
return
}
}
for _, content := range ctsMap {
if dm, ok := idxMap[content.ID]; ok {
key := fmt.Sprint(dm.Pool)
dm.Content = content
if dm.Pool == model.PoolNormal {
if dm.AttrVal(model.AttrProtect) == model.AttrYes {
key = keyprotect
}
}
if dm.Pool == model.PoolSpecial {
contentSpe, ok := contentSpeMap[dm.ID]
if ok {
dm.ContentSpe = contentSpe
}
}
dmMap[key] = append(dmMap[key], dm)
}
}
// dm sort
for _, dmsTmp := range dmMap {
sort.Sort(model.DMSlice(dmsTmp))
}
// pool = 0 保护弹幕和普通弹幕总和为maxlimit
if protect, ok := dmMap[keyprotect]; ok {
if start := len(protect) - int(maxlimit); start > 0 { // 只保留maxlimit条保护弹幕
protect = protect[start:]
}
dms = append(dms, protect...)
count = len(protect)
}
if normal, ok := dmMap[fmt.Sprint(model.PoolNormal)]; ok {
start := len(normal) + count - int(maxlimit)
if start > 0 {
normal = normal[start:]
}
dms = append(dms, normal...)
}
// pool = 1 字幕弹幕
if subtitle, ok := dmMap[fmt.Sprint(model.PoolSubtitle)]; ok {
dms = append(dms, subtitle...)
}
// pool =2 特殊弹幕
if special, ok := dmMap[fmt.Sprint(model.PoolSpecial)]; ok {
dms = append(dms, special...)
}
return
}
func (s *Service) genXML(c context.Context, sub *model.Subject) (xml []byte, err error) {
realname := s.isRealname(c, sub.Pid, sub.Oid)
buf := new(bytes.Buffer)
buf.WriteString(`<?xml version="1.0" encoding="UTF-8"?><i>`)
buf.WriteString(`<chatserver>chat.bilibili.com</chatserver><chatid>`)
buf.WriteString(fmt.Sprint(sub.Oid))
buf.WriteString(`</chatid><mission>`)
buf.WriteString(fmt.Sprint(sub.AttrVal(model.AttrSubMission)))
buf.WriteString(`</mission><maxlimit>`)
buf.WriteString(fmt.Sprint(sub.Maxlimit))
buf.WriteString(`</maxlimit>`)
buf.WriteString(fmt.Sprintf(`<state>%d</state>`, sub.State))
if realname {
buf.WriteString(`<real_name>1</real_name>`)
} else {
buf.WriteString(`<real_name>0</real_name>`)
}
if sub.State == model.SubStateClosed {
buf.WriteString(`</i>`)
xml = buf.Bytes()
return
}
dms, err := s.dmsCache(c, sub.Type, sub.Oid, sub.Maxlimit)
if err != nil {
return
}
if len(dms) > 0 {
buf.WriteString(`<source>k-v</source>`)
} else {
buf.WriteString(`<source>e-r</source>`)
if dms, err = s.dms(c, sub.Type, sub.Oid, sub.Maxlimit, int32(sub.Childpool)); err != nil {
return
}
if err = s.dao.SetDMCache(c, sub.Type, sub.Oid, dms); err != nil { // add redis cache
return
}
}
for _, dm := range dms {
buf.WriteString(dm.ToXML(realname))
}
buf.WriteString("</i>")
xml = buf.Bytes()
return
}
func (s *Service) isRealname(c context.Context, aid, oid int64) (realname bool) {
if oid == 13196688 || oid == 290932 {
realname = true
return
}
arg := &arcMdl.ArgAid2{Aid: aid}
archive, err := s.arcRPC.Archive3(c, arg)
if err != nil {
log.Error("arcRPC.Archive3(%v) error(%v)", arg, err)
return
}
if v, ok := s.realname[int64(archive.TypeID)]; ok && oid >= v {
realname = true
} else {
realname = false
}
return
}
// flushXMLSegCache 刷新每个分段的缓存NOTE:目前只是单纯删除缓存
func (s *Service) flushXMLSegCache(c context.Context, sub *model.Subject) (err error) {
duration, err := s.videoDuration(c, sub.Pid, sub.Oid)
if err != nil {
return
}
seg := model.SegmentInfo(0, duration)
for i := int64(1); i <= seg.Cnt; i++ {
if err = s.dao.DelXMLSegCache(c, sub.Type, sub.Oid, seg.Cnt, i); err != nil {
continue
}
}
return
}
// rebuildDmSegCache 刷新视频每个分段弹幕缓存
func (s *Service) flushAllDmSegCache(c context.Context, oid int64, tp int32) (err error) {
var (
sub *model.Subject
duration, total int64
)
if sub, err = s.subject(c, tp, oid); err != nil {
return
}
if duration, err = s.videoDuration(c, sub.Pid, sub.Oid); err != nil {
return
}
total = int64(math.Ceil(float64(duration) / float64(model.DefaultPageSize)))
for i := int64(1); i <= total; i++ {
s.asyncAddFlushDMSeg(c, &model.FlushDMSeg{
Type: tp,
Oid: oid,
Force: true,
Page: &model.Page{
Num: i,
Size: model.DefaultPageSize,
Total: total,
},
})
}
log.Info("flushAllDmSegCache oid:%v total:%v", oid, total)
return
}
func (s *Service) asyncAddFlushDM(c context.Context, fc *model.Flush) {
select {
case s.flushMergeChan[fc.Oid%int64(s.routineSize)] <- fc:
default:
log.Warn("flush merge channel is full,flush(%+v)", fc)
}
}

View File

@@ -0,0 +1,242 @@
package service
import (
"bytes"
"compress/gzip"
"context"
"math"
"go-common/app/job/main/dm2/model"
"go-common/library/ecode"
"go-common/library/log"
)
func (s *Service) gzip(input []byte) ([]byte, error) {
buf := new(bytes.Buffer)
zw := gzip.NewWriter(buf)
if _, err := zw.Write(input); err != nil {
return nil, err
}
if err := zw.Close(); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (s *Service) dmsByid(c context.Context, tp int32, oid int64, missed []int64) (dms []*model.DM, err error) {
idxMap, special, err := s.dao.IndexsByid(c, tp, oid, missed)
if err != nil || len(idxMap) == 0 {
return
}
ctsMap, err := s.dao.Contents(c, oid, missed)
if err != nil {
return
}
ctsSpeMap := make(map[int64]*model.ContentSpecial)
if len(special) > 0 {
if ctsSpeMap, err = s.dao.ContentsSpecial(c, special); err != nil {
return
}
}
for _, content := range ctsMap {
if idx, ok := idxMap[content.ID]; ok {
dm := &model.DM{
ID: idx.ID,
Type: idx.Type,
Oid: idx.Oid,
Mid: idx.Mid,
Progress: idx.Progress,
Pool: idx.Pool,
Attr: idx.Attr,
State: idx.State,
Ctime: idx.Ctime,
Mtime: idx.Mtime,
Content: content,
}
if idx.Pool == model.PoolSpecial {
if _, ok = ctsSpeMap[dm.ID]; ok {
dm.ContentSpe = ctsSpeMap[dm.ID]
}
}
dms = append(dms, dm)
}
}
return
}
func (s *Service) dmSeg(c context.Context, tp int32, oid, limit int64, childpool int32, p *model.Page) (res *model.DMSeg, err error) {
var (
ids []int64
cache = true
dmids = make([]int64, 0, limit)
elems = make([]*model.Elem, 0, limit)
ps = (p.Num - 1) * p.Size
pe = p.Num * p.Size
)
res = new(model.DMSeg)
if ids, err = s.dmidsSeg(c, tp, oid, p.Total, p.Num, ps, pe, limit); err != nil {
return
}
dmids = append(dmids, ids...)
if childpool > 0 {
if ids, err = s.dmidSubtitle(c, tp, oid, ps, pe, limit); err != nil {
return
}
dmids = append(dmids, ids...)
}
if len(dmids) <= 0 {
return
}
elemsCache, missed, err := s.dao.IdxContentCacheV2(c, tp, oid, dmids)
if err != nil {
missed = dmids
cache = false
} else {
elems = append(elems, elemsCache...)
}
if len(missed) == 0 {
res.Elems = elems
return
}
dms, err := s.dmsByid(c, tp, oid, missed)
if err != nil {
return
}
for _, dm := range dms {
if e := dm.ToElem(); e != nil {
elems = append(elems, e)
}
}
res.Elems = elems
if cache && len(dms) > 0 {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddIdxContentCaches(ctx, tp, oid, dms...)
})
}
return
}
func (s *Service) dmidsSeg(c context.Context, tp int32, oid, total, num, ps, pe, limit int64) (dmids []int64, err error) {
if dmids, err = s.dao.DMIDCache(c, tp, oid, total, num, limit); err != nil || len(dmids) == 0 {
if dmids, err = s.dao.IndexsSegID(c, tp, oid, ps, pe, limit, model.PoolNormal); err != nil {
return
}
if len(dmids) > 0 {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddDMIDCache(ctx, tp, oid, total, num, dmids...)
})
}
}
return
}
func (s *Service) dmidSubtitle(c context.Context, tp int32, oid, ps, pe, limit int64) (dmids []int64, err error) {
if dmids, err = s.dao.DMIDSubtitleCache(c, tp, oid, ps, pe, limit); err != nil || len(dmids) == 0 {
var dms []*model.DM
if dms, dmids, err = s.dao.IndexsSeg(c, tp, oid, ps, pe, limit, model.PoolSubtitle); err != nil {
return
}
if len(dms) > 0 {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddDMIDSubtitleCache(ctx, tp, oid, dms...)
})
}
}
return
}
// add flush dm segment action to flush channel.
func (s *Service) asyncAddFlushDMSeg(c context.Context, fc *model.FlushDMSeg) (err error) {
select {
case s.flushSegChan[fc.Oid%int64(s.routineSize)] <- fc:
default:
log.Warn("segment flush merge channel is full,fc:%+v page:%+v", fc, fc.Page)
}
return
}
func (s *Service) pageinfo(c context.Context, pid int64, dm *model.DM) (p *model.Page, err error) {
duration, err := s.videoDuration(c, pid, dm.Oid)
if err != nil {
return
}
if duration != 0 {
p = &model.Page{
Num: int64(math.Ceil(float64(dm.Progress) / float64(model.DefaultPageSize))),
Size: model.DefaultPageSize,
Total: int64(math.Ceil(float64(duration) / float64(model.DefaultPageSize))),
}
if p.Num == 0 { // fix progress == 0
p.Num = 1
}
} else { // duration not exist
p = model.DefaultPage
}
// NOTE PoolSpecial store in the first segment
if dm.Pool == model.PoolSpecial {
p.Num = 1
}
return
}
func (s *Service) dmSegXML(c context.Context, sub *model.Subject, seg *model.Segment) (res []byte, err error) {
var (
cache = true
buf = new(bytes.Buffer)
dms []*model.DM
dmids, normalIds, subtitleIds []int64
)
buf.WriteString(seg.ToXMLHeader(sub.Oid, sub.State, 0))
defer func() {
if err == nil {
buf.WriteString(`</i>`)
res, err = s.gzip(buf.Bytes())
}
}()
if normalIds, err = s.dmidsSeg(c, sub.Type, sub.Oid, seg.Cnt, seg.Num, seg.Start, seg.End, 2*sub.Maxlimit); err != nil {
return
}
dmids = append(dmids, normalIds...)
if sub.Childpool > 0 {
if subtitleIds, err = s.dmidSubtitle(c, sub.Type, sub.Oid, seg.Start, seg.End, 2*sub.Maxlimit); err != nil {
return
}
dmids = append(dmids, subtitleIds...)
}
if len(dmids) <= 0 {
return
}
content, missed, err := s.dao.IdxContentCache(c, sub.Type, sub.Oid, dmids)
if err != nil {
missed = dmids
cache = false
} else {
buf.Write(content)
}
if len(missed) > 0 {
if dms, err = s.dmsByid(c, sub.Type, sub.Oid, missed); err != nil {
return
}
for _, dm := range dms {
buf.WriteString(dm.ToXMLSeg())
}
}
if cache && len(dms) > 0 {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddIdxContentCaches(ctx, sub.Type, sub.Oid, dms...)
})
}
return
}
// segmentInfo get segment info of oid.
func (s *Service) segmentInfo(c context.Context, aid, oid, ps int64, duration int64) (seg *model.Segment, err error) {
if duration != 0 && ps >= duration {
log.Warn("oid:%d ps:%d larger than duration:%d", oid, ps, duration)
err = ecode.NotModified
return
}
seg = model.SegmentInfo(ps, duration)
return
}

View File

@@ -0,0 +1,51 @@
package service
import (
"context"
"testing"
"go-common/app/job/main/dm2/model"
. "github.com/smartystreets/goconvey/convey"
)
func TestDmsByid(t *testing.T) {
var (
tp int32 = 1
oid int64 = 1221
missed = []int64{719150141, 719150142}
)
Convey("", t, func() {
dms, err := svr.dmsByid(context.TODO(), tp, oid, missed)
So(err, ShouldBeNil)
So(dms, ShouldNotBeEmpty)
for _, dm := range dms {
t.Log(dm)
}
})
}
func TestDMSeg(t *testing.T) {
var (
tp int32 = 1
oid int64 = 1221
childpool int32 = 1
limit int64 = 10
p = &model.Page{Num: 1, Size: model.DefaultVideoEnd, Total: 1}
)
Convey("", t, func() {
res, err := svr.dmSeg(context.TODO(), tp, oid, limit, childpool, p)
So(err, ShouldBeNil)
So(res, ShouldNotBeNil)
t.Logf("%v,length:%d", res, len(res.Elems))
})
}
func TestPageInfo(t *testing.T) {
Convey("", t, func() {
dm := &model.DM{ID: 719182141, Type: 1, Oid: 1221, Progress: 0, Pool: 2}
p, err := svr.pageinfo(context.TODO(), 12345, dm)
So(err, ShouldBeNil)
t.Log(p)
})
}

View File

@@ -0,0 +1,40 @@
package service
import (
"context"
"fmt"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
// func TestSpecialDmRemove(t *testing.T) {
// Convey("", t, func() {
// dmid := int64(920249977)
// dm := &model.DM{
// ID: dmid,
// Type: 1,
// Oid: 19,
// Mid: 1,
// State: 1,
// Pool: 2,
// Progress: 10,
// }
// _, err := testSvc.dao.UpdateDM(context.TODO(), dm)
// if err != nil {
// fmt.Println(err)
// }
// So(err, ShouldBeNil)
// })
// }
func TestSpecialLocationUpdate(t *testing.T) {
Convey("", t, func() {
err := svr.specialLocationUpdate(context.TODO(), 1, 19)
if err != nil {
fmt.Println(err)
}
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,133 @@
package service
import (
"context"
"encoding/json"
"fmt"
"sort"
"go-common/app/job/main/dm2/model"
)
const (
_bfsMaxSize = 16 * 1024 * 1024 // size MediumText
_specialJSONItemSize = 20 + 1 // {"id":,"content":""},
_specialJSONAtLeastSize = 2 // []
)
// buildSpeicalDms build when db is no record
func (s *Service) speicalDms(c context.Context, tp int32, oid int64) (dms []*model.DM, err error) {
var (
dmids []int64
spContentMap map[int64]*model.ContentSpecial
contentMap map[int64]*model.Content
)
if dms, dmids, err = s.dao.IndexsByPool(c, tp, oid, model.PoolSpecial); err != nil {
return
}
if len(dmids) == 0 {
return
}
if contentMap, err = s.dao.Contents(c, oid, dmids); err != nil {
return
}
if spContentMap, err = s.dao.ContentsSpecial(c, dmids); err != nil {
return
}
for _, dm := range dms {
if v, ok := contentMap[dm.ID]; ok {
dm.Content = v
}
if v, ok := spContentMap[dm.ID]; ok {
dm.ContentSpe = v
}
}
sort.Slice(dms, func(i, j int) bool {
return dms[i].Progress < dms[j].Progress
})
return
}
func (s *Service) buildSpecialDms(c context.Context, dms []*model.DM) (bss [][]byte, err error) {
var (
dmSpecialContents []*model.DmSpecialContent
bs []byte
length int
)
if len(dms) == 0 {
return
}
dmSpecialContents = make([]*model.DmSpecialContent, 0, len(dms))
length = _specialJSONAtLeastSize
for _, dm := range dms {
if len(dm.GetSpecialSeg()) == 0 {
continue
}
itemSize := len(fmt.Sprint(dm.ID)) + len(dm.GetSpecialSeg()) + _specialJSONItemSize
if length+itemSize > _bfsMaxSize {
if bs, err = json.Marshal(dmSpecialContents); err != nil {
return
}
bss = append(bss, bs)
dmSpecialContents = make([]*model.DmSpecialContent, 0, len(dms))
length = _specialJSONAtLeastSize
}
length += itemSize
dmSpecialContents = append(dmSpecialContents, &model.DmSpecialContent{
ID: dm.ID,
Content: dm.GetSpecialSeg(),
})
}
if len(dmSpecialContents) > 0 {
if bs, err = json.Marshal(dmSpecialContents); err != nil {
return
}
bss = append(bss, bs)
}
return
}
func (s *Service) updateSpecualDms(c context.Context, tp int32, oid int64, bss [][]byte) (err error) {
var (
location string
locations []string
ds *model.DmSpecial
)
for _, bs := range bss {
if len(bs) == 0 {
continue
}
if location, err = s.dao.BfsDmUpload(c, "", bs); err != nil {
return
}
locations = append(locations, location)
}
ds = &model.DmSpecial{
Type: tp,
Oid: oid,
}
ds.Join(locations)
if err = s.dao.UpsertDmSpecialLocation(c, ds.Type, ds.Oid, ds.Locations); err != nil {
return
}
return
}
func (s *Service) specialLocationUpdate(c context.Context, tp int32, oid int64) (err error) {
var (
dms []*model.DM
bss [][]byte
)
if dms, err = s.speicalDms(c, tp, oid); err != nil {
return
}
if bss, err = s.buildSpecialDms(c, dms); err != nil {
return
}
if err = s.updateSpecualDms(c, tp, oid, bss); err != nil {
return
}
return
}

View File

@@ -0,0 +1,18 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestDMSCache(t *testing.T) {
Convey("should return dms and nil", t, func() {
dms, err := svr.dms(context.TODO(), 1, 1221, 1, 0)
So(err, ShouldBeNil)
Convey("dms shoule not be empty", func() {
So(len(dms), ShouldNotBeEmpty)
})
})
}

View File

@@ -0,0 +1,188 @@
package service
import (
"context"
"time"
"go-common/app/job/main/dm2/model"
"go-common/app/service/main/archive/api"
archiveMdl "go-common/app/service/main/archive/model/archive"
"go-common/library/log"
)
var (
_maskJobDay = []int32{3, 7}
)
// maskProc .
func (s *Service) maskProc() {
var (
err error
c = context.Background()
ticker *time.Ticker
)
if s.conf.MaskCate == nil {
return
}
ticker = time.NewTicker(time.Duration(s.conf.MaskCate.Interval))
for range ticker.C {
if err = s.maskSchedule(c); err != nil {
log.Error("maskProc.error(%v)", err)
continue
}
}
}
func (s *Service) maskSchedule(c context.Context) (err error) {
var (
ok bool
now = time.Now()
expire = now.Add(time.Duration(s.conf.MaskCate.Interval))
expireStr = expire.Format(time.RFC3339)
oldExpireStr, oldExpireGetSetStr string
oldExpire time.Time
)
if ok, err = s.dao.SetnxMaskJob(c, expireStr); err != nil {
return
}
// redis中不存在
if ok {
if err = s.maskJob(c); err != nil {
s.dao.DelMaskJob(c)
log.Error("maskJob,error(%v)", err)
return
}
return
}
// redis中已经存在
// 判断是否过期了
if oldExpireStr, err = s.dao.GetMaskJob(c); err != nil {
return
}
if oldExpire, err = time.Parse(time.RFC3339, oldExpireStr); err != nil {
return
}
if oldExpire.Sub(now) > 0 {
return
}
if oldExpireGetSetStr, err = s.dao.GetSetMaskJob(c, expireStr); err != nil {
return
}
if oldExpireGetSetStr != oldExpireStr {
return
}
if err = s.maskJob(c); err != nil {
s.dao.DelMaskJob(c)
log.Error("maskJob,error(%v)", err)
return
}
return
}
// 执行任务
func (s *Service) maskJob(c context.Context) (err error) {
for _, tid := range s.conf.MaskCate.Tids {
if err = s.maskOneCate(c, tid); err != nil {
log.Error("maskOneCate(tid:%v),error(%v)", tid, err)
return
}
}
return
}
func (s *Service) maskOneCate(c context.Context, tid int64) (err error) {
var (
err1 error
resp *model.RankRecentResp
aids []int64
)
for _, day := range _maskJobDay {
if resp, err = s.dao.RankList(c, tid, day); err != nil {
log.Error("RankList(tid:%v,day:%v),error(%v)", tid, day, err)
return
}
for idx, recentRegion := range resp.List {
if idx >= s.conf.MaskCate.Limit {
break
}
aids = append(aids, recentRegion.Aid)
for _, other := range recentRegion.Others {
aids = append(aids, other.Aid)
}
}
}
for _, aid := range aids {
if err1 = s.maskOneArchive(c, aid); err1 != nil {
log.Error("maskOneArchive.err aid:%v,error(%v)", aid, err1)
continue
}
log.Info("maskOneArchive.ok aid:%v", aid)
}
return
}
func (s *Service) maskOneArchive(c context.Context, aid int64) (err error) {
var (
pages []*api.Page
)
if pages, err = s.arcRPC.Page3(c, &archiveMdl.ArgAid2{Aid: aid}); err != nil {
log.Error("s.arcRPC.Page3(aid:%v),error(%v)", aid, err)
return
}
for _, page := range pages {
if err = s.maskOneVideo(c, page.Cid); err != nil {
log.Error("maskOneVideo(oid:%v),error(%v)", page.Cid, err)
return
}
}
return
}
// runGenMask send to gen mask url
func (s *Service) maskOneVideo(c context.Context, oid int64) (err error) {
var (
subject *model.Subject
archive3 *api.Arc
err1 error
duration int64
typeID int32
)
if subject, err = s.subject(c, model.SubTypeVideo, oid); err != nil {
log.Error("s.subject(oid:%v),error(%v)", oid, err)
return
}
if subject.AttrVal(model.AttrSubMaskOpen) == model.AttrYes {
return
}
if archive3, err1 = s.arcRPC.Archive3(c, &archiveMdl.ArgAid2{Aid: subject.Pid}); err1 == nil && archive3 != nil {
duration = archive3.Duration
typeID = archive3.TypeID
}
if err = s.dao.GenerateMask(c, oid, subject.Mid, model.MaskPlatAll, model.MaskPriorityLow, subject.Pid, duration, typeID); err != nil {
log.Error("GenerateMask(oid:%v),error(%v)", oid, err)
return
}
subject.AttrSet(model.AttrYes, model.AttrSubMaskOpen)
if _, err = s.dao.UpdateSubAttr(c, subject.Type, subject.Oid, subject.Attr); err != nil {
log.Error("UpdateSubAttr(oid:%v,attr:%v),error(%v)", oid, subject.Attr, err)
return
}
return
}
func (s *Service) maskMidProc() {
var (
c = context.Background()
mids []int64
err error
)
ticker := time.NewTicker(time.Minute * 5)
defer ticker.Stop()
for range ticker.C {
if mids, err = s.dao.MaskMids(c); err != nil {
continue
}
s.maskMid = mids
log.Info("update mask mid(%v)", s.maskMid)
}
}

View File

@@ -0,0 +1,32 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestMaskOneVideo(t *testing.T) {
Convey("mask one video", t, func() {
err := svr.maskOneVideo(context.TODO(), 8936701)
So(err, ShouldBeNil)
t.Logf("err:%v", err)
})
}
func TestMaskOneArchive(t *testing.T) {
Convey("mask one archive", t, func() {
err := svr.maskOneArchive(context.TODO(), 10098039)
So(err, ShouldBeNil)
t.Logf("err:%v", err)
})
}
func TestMaskOneCate(t *testing.T) {
Convey("mask one cate", t, func() {
err := svr.maskOneCate(context.TODO(), 185)
So(err, ShouldBeNil)
t.Logf("err:%v", err)
})
}

View File

@@ -0,0 +1,457 @@
package service
import (
"context"
"encoding/json"
"fmt"
"regexp"
"time"
"go-common/app/job/main/dm2/conf"
"go-common/app/job/main/dm2/dao"
"go-common/app/job/main/dm2/model"
"go-common/app/job/main/dm2/model/oplog"
arcCli "go-common/app/service/main/archive/api/gorpc"
filterCli "go-common/app/service/main/filter/api/grpc/v1"
seqMdl "go-common/app/service/main/seq-server/model"
seqCli "go-common/app/service/main/seq-server/rpc/client"
"go-common/library/log"
"go-common/library/log/infoc"
"go-common/library/queue/databus"
"go-common/library/sync/pipeline/fanout"
"go-common/library/xstr"
)
const (
_routineSizeDefault = 10
_chanSize = 10240
_batchSize = 1000
_maxUpRecent = 1000
)
// Service service struct
type Service struct {
conf *conf.Config
dao *dao.Dao
cache *fanout.Fanout
arcRPC *arcCli.Service2
// seq serer
seqArg *seqMdl.ArgBusiness
seqRPC *seqCli.Service2
indexCsmr *databus.Databus
subjectCsmr *databus.Databus
actionCsmr *databus.Databus
reportCsmr *databus.Databus
videoupCsmr *databus.Databus
subtitleAuditCsmr *databus.Databus
flushMergeChan []chan *model.Flush
flushSegChan []chan *model.FlushDMSeg
dmRecentChan []chan *model.DM
routineSize int
realname map[int64]int64 // key分区idvalue:cid即该分区中大于cid的视频开启实名制
// filter service
filterRPC filterCli.FilterClient
maskMid []int64
dmOperationLogSvc *infoc.Infoc
opsLogCh chan *oplog.Infoc
// bnj
bnjAid int64
bnjSubAids map[int64]struct{}
bnjCsmr *databus.Databus
bnjliveRoomID int64
bnjStart time.Time
bnjIgnoreBeginTime time.Duration
bnjIgnoreEndTime time.Duration
bnjArcVideos []*model.Video
bnjIgnoreRate int64
bnjUserLevel int32
}
// New new service.
func New(c *conf.Config) (s *Service) {
s = &Service{
conf: c,
dao: dao.New(c),
cache: fanout.New("cache", fanout.Worker(1), fanout.Buffer(1024)),
arcRPC: arcCli.New2(c.ArchiveRPC),
seqArg: &seqMdl.ArgBusiness{BusinessID: c.Seq.BusinessID, Token: c.Seq.Token},
seqRPC: seqCli.New2(c.SeqRPC),
subjectCsmr: databus.New(c.Databus.SubjectCsmr),
indexCsmr: databus.New(c.Databus.IndexCsmr),
actionCsmr: databus.New(c.Databus.ActionCsmr),
reportCsmr: databus.New(c.Databus.ReportCsmr),
videoupCsmr: databus.New(c.Databus.VideoupCsmr),
subtitleAuditCsmr: databus.New(c.Databus.SubtitleAuditCsmr),
routineSize: c.RoutineSize,
realname: make(map[int64]int64),
dmOperationLogSvc: infoc.New(c.Infoc2),
opsLogCh: make(chan *oplog.Infoc, 1024),
}
if c.RoutineSize <= 0 {
s.routineSize = _routineSizeDefault
}
s.flushMergeChan = make([]chan *model.Flush, s.routineSize)
s.flushSegChan = make([]chan *model.FlushDMSeg, s.routineSize)
s.dmRecentChan = make([]chan *model.DM, s.routineSize)
filterRPC, err := filterCli.NewClient(c.FliterRPC)
if err != nil {
panic(err)
}
s.filterRPC = filterRPC
for idStr, cid := range conf.Conf.Realname {
ids, err := xstr.SplitInts(idStr)
if err != nil {
panic(err)
}
for _, id := range ids {
if _, ok := s.realname[id]; !ok {
s.realname[id] = cid
}
}
}
// laji bnj
if s.conf.BNJ != nil {
//bnj count
s.initBnj()
}
//消费DMReport-T消息
go s.reportCsmproc()
// 消费DMAction-T消息
go s.actionCsmproc()
// 消费DMSubject-T消息
go s.subjectCsmproc()
// 消费DMMeta-T消息
go s.indexCsmproc()
// 消费 Videoup2Bvc消息
go s.videoupCsmrproc()
// 消费 字幕 提交 消息
go s.subtitleAuditProc()
// 刷新全段弹幕
for i := 0; i < s.routineSize; i++ {
flushChan := make(chan *model.Flush, _chanSize)
s.flushMergeChan[i] = flushChan
go s.flushmergeproc(flushChan)
}
// 刷新分段弹幕
for i := 0; i < s.routineSize; i++ {
flushSegChan := make(chan *model.FlushDMSeg, _chanSize)
s.flushSegChan[i] = flushSegChan
go s.flushSegproc(flushSegChan)
}
// 异步处理创作中心最新弹幕列表缓存
for i := 0; i < s.routineSize; i++ {
recentChan := make(chan *model.DM, _chanSize)
s.dmRecentChan[i] = recentChan
go s.dmRecentproc(recentChan)
}
go s.transferProc()
// 处理热门二级分类视频的弹幕蒙版
go s.maskProc()
// 刷新开启蒙版mid
s.maskMid, _ = s.dao.MaskMids(context.TODO())
log.Info("update mask mid(%v)", s.maskMid)
go s.maskMidProc()
// dm task
go s.taskResProc()
go s.taskDelProc()
// oplog
go s.oplogproc()
return
}
// Ping check thrid resource.
func (s *Service) Ping(c context.Context) (err error) {
return s.dao.Ping(c)
}
func (s *Service) subjectCsmproc() {
var (
err error
c = context.TODO()
regexSubject = regexp.MustCompile("dm_subject_[0-9]+")
)
for {
msg, ok := <-s.subjectCsmr.Messages()
if !ok {
log.Error("subject binlog consumer exit")
return
}
m := &model.BinlogMsg{}
if err = json.Unmarshal(msg.Value, &m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
log.Info("%s", m)
if regexSubject.MatchString(m.Table) {
if err = s.trackSubject(c, m); err != nil {
log.Error("s.trackSubject(%s) error(%v)", m, err)
continue
}
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) indexCsmproc() {
var (
err error
c = context.TODO()
regexIndex = regexp.MustCompile("dm_index_[0-9]+")
)
for {
msg, ok := <-s.indexCsmr.Messages()
if !ok {
log.Error("index binlog consumer exit")
return
}
m := &model.BinlogMsg{}
if err = json.Unmarshal(msg.Value, &m); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
log.Info("%s", m)
if regexIndex.MatchString(m.Table) {
if err = s.trackIndex(c, m); err != nil {
log.Error("s.traceIndex(%s) error(%v)", m, err)
continue
}
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) flushmergeproc(flushChan chan *model.Flush) {
var (
flushs = make(map[int64]*model.Flush)
ticker = time.NewTicker(60 * time.Second)
err error
)
for {
select {
case flush, ok := <-flushChan:
if !ok {
log.Error("action channel closed")
return
}
if _, ok := flushs[flush.Oid]; !ok || flush.Force { // key不存在或者需要强制刷新的
flushs[flush.Oid] = flush
}
if len(flushs) < _batchSize {
continue
}
case <-ticker.C:
}
if len(flushs) > 0 {
for _, flush := range flushs {
if err = s.flushDmCache(context.TODO(), flush); err != nil {
log.Error("action:flushmergeproc,flush:%+v,error(%v)", flush, err)
}
}
flushs = make(map[int64]*model.Flush)
}
}
}
func keySegFlush(tp int32, oid, total, num int64) string {
return fmt.Sprintf("f_%d_%d_%d_%d", tp, oid, total, num)
}
func (s *Service) flushSegproc(ch chan *model.FlushDMSeg) {
var (
key string
merge = make(map[string]*model.FlushDMSeg)
ticker = time.NewTicker(60 * time.Second)
err error
)
for {
select {
case msg, ok := <-ch:
if !ok {
log.Error("action channel closed")
return
}
key = keySegFlush(msg.Type, msg.Oid, msg.Page.Total, msg.Page.Num)
if _, ok := merge[key]; !ok || msg.Force { // key不存在或者需要强制刷新的
merge[key] = msg
}
if len(merge) < _batchSize {
continue
}
case <-ticker.C:
}
if len(merge) > 0 {
for _, v := range merge {
if err = s.flushDmSegCache(context.TODO(), v); err != nil {
log.Error("action:flushSegproc,data:%+v,error(%v)", v, err)
continue
}
}
merge = make(map[string]*model.FlushDMSeg)
}
}
}
func (s *Service) actionCsmproc() {
for {
msg, ok := <-s.actionCsmr.Messages()
if !ok {
log.Error("action consumer exit")
return
}
act := &model.Action{}
err := json.Unmarshal(msg.Value, &act)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
start := time.Now()
if err = s.actionAct(context.TODO(), act); err != nil {
log.Error("action:%s,data:%s,error(%v)", act.Action, act.Data, err)
continue
}
log.Info("partition:%d,offset:%d,key:%s,value:%s costing:%+v", msg.Partition, msg.Offset, msg.Key, msg.Value, time.Since(start))
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
// add recent dm
func (s *Service) dmRecentproc(dmChan chan *model.DM) {
var (
count int64
c = context.TODO()
)
for {
dm, ok := <-dmChan
if !ok {
log.Error("recent dm channel is closed")
return
}
sub, err := s.subject(c, dm.Type, dm.Oid)
if err != nil {
continue
}
if dm.State != model.StateNormal && dm.State != model.StateHide && dm.State != model.StateMonitorAfter {
if err = s.dao.ZRemRecentDM(c, sub.Mid, dm.ID); err != nil {
continue
}
} else {
if dm.Content == nil {
if dm.Content, err = s.dao.Content(c, dm.Oid, dm.ID); err != nil {
continue
}
}
if dm.Pool == model.PoolSpecial && dm.ContentSpe == nil {
if dm.ContentSpe, err = s.dao.ContentSpecial(c, dm.ID); err != nil {
continue
}
}
if count, err = s.dao.AddRecentDM(c, sub.Mid, dm); err != nil {
continue
}
if trimCnt := count - _maxUpRecent; trimCnt > 0 {
if err = s.dao.TrimRecentDM(c, sub.Mid, trimCnt); err != nil {
continue
}
}
}
}
}
func (s *Service) asyncAddRecent(c context.Context, dm *model.DM) {
select {
case s.dmRecentChan[dm.Oid%int64(s.routineSize)] <- dm:
default:
log.Warn("dm recent channel is full,dm(%+v)", dm)
}
}
func (s *Service) reportCsmproc() {
for {
msg, ok := <-s.reportCsmr.Messages()
if !ok {
log.Error("report consumer exit")
return
}
log.Info("partition:%d,offset:%d,value:%s", msg.Partition, msg.Offset, msg.Value)
act := &model.ReportAction{}
err := json.Unmarshal(msg.Value, &act)
if err != nil {
log.Error("json.Unmarshal(%s) error(%v)", msg.Value, err)
continue
}
if diff := act.HideTime - time.Now().Unix(); diff > 0 {
log.Info("action:%+v will be processed after %d seconds", act, diff)
time.Sleep(time.Duration(diff) * time.Second)
}
if _, err = s.dao.DelDMHideState(context.TODO(), 1, act.Cid, act.Did); err != nil {
log.Error("DelDMHideState(%+v) error(%v)", act, err)
} else {
log.Info("DelDMHideState(%+v) success ", act)
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) videoupCsmrproc() {
var (
err error
c = context.TODO()
)
for {
msg, ok := <-s.videoupCsmr.Messages()
if !ok {
log.Error("videoup consumer exit")
return
}
log.Info("partition:%d,offset:%d,key:%s,value:%s", msg.Partition, msg.Offset, msg.Key, msg.Value)
m := &model.VideoupMsg{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", string(msg.Value), err)
continue
}
if m.Route == model.RouteSecondRound || m.Route == model.RouteAutoOpen ||
m.Route == model.RouteForceSync || m.Route == model.RouteDelayOpen {
if err = s.trackVideoup(c, m.Aid); err != nil {
continue
}
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) subtitleAuditProc() {
var (
err error
c = context.Background()
)
for {
msg, ok := <-s.subtitleAuditCsmr.Messages()
if !ok {
log.Error("subtitle_audit consumer exit")
return
}
log.Info("partition:%d,offset:%d,key:%s,value:%s", msg.Partition, msg.Offset, msg.Key, msg.Value)
m := &model.SubtitleAuditMsg{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", string(msg.Value), err)
continue
}
if err = s.SubtitleFilter(c, m.Oid, m.SubtitleID); err != nil {
log.Error("SubtitleFilter(oid:%v,subtitleID:%v),error(%v)", m.Oid, m.SubtitleID, err)
continue
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}

View File

@@ -0,0 +1,39 @@
package service
import (
"context"
"flag"
"os"
"path/filepath"
"testing"
"go-common/app/job/main/dm2/conf"
. "github.com/smartystreets/goconvey/convey"
)
var (
svr *Service
)
func TestMain(m *testing.M) {
var (
err error
)
dir, _ := filepath.Abs("../cmd/dm2-job.toml")
if err = flag.Set("conf", dir); err != nil {
panic(err)
}
if err = conf.Init(); err != nil {
panic(err)
}
svr = New(conf.Conf)
os.Exit(m.Run())
}
func TestPing(t *testing.T) {
Convey("", t, func() {
err := svr.Ping(context.TODO())
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,39 @@
package service
import (
"context"
"go-common/app/job/main/dm2/model"
"go-common/library/ecode"
"go-common/library/log"
)
func (s *Service) subject(c context.Context, tp int32, oid int64) (sub *model.Subject, err error) {
var cache = true
if sub, err = s.dao.SubjectCache(c, tp, oid); err != nil {
err = nil
cache = false
}
if sub == nil {
if sub, err = s.dao.Subject(c, tp, oid); err != nil {
return
}
if sub == nil {
sub = &model.Subject{
Type: tp,
Oid: oid,
}
}
if cache {
s.cache.Do(c, func(ctx context.Context) {
s.dao.AddSubjectCache(ctx, sub)
})
}
}
if sub.ID == 0 {
err = ecode.NothingFound
log.Error("subject not exist,type:%d,oid:%d", tp, oid)
return
}
return
}

View File

@@ -0,0 +1,17 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestSubject(t *testing.T) {
Convey("", t, func() {
sub, err := svr.subject(context.TODO(), 1, 1221)
So(err, ShouldBeNil)
So(sub, ShouldNotBeNil)
t.Logf("subject:%+v", sub)
})
}

View File

@@ -0,0 +1,242 @@
package service
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"go-common/app/job/main/dm2/model"
filterMdl "go-common/app/service/main/filter/api/grpc/v1"
"go-common/library/database/sql"
"go-common/library/ecode"
"go-common/library/log"
)
const (
_filterAreaSubtitle = "subtitle"
_filterCapacity = 5
_contentSizeLimit = 300
)
// SubtitleFilter .
// 1、只处理状态为审核待检测发布待检测的数据
// 2、如果是审核待检测的数据检测完毕状态改为带审核删除草稿缓存删除字幕缓存
// 3、如果检测失败状态改为审核驳回并且更新驳回理由 删除缓存
// 4、如果是发布待检测的状态检测完毕状态改为发布更新数据库逻辑发布表更新删除字幕缓存
// 5、如果发布检测失败。状态改为审核驳回并且更新驳回理由。删除缓存
// 6、如果消费失败 数据丢失 容错
func (s *Service) SubtitleFilter(c context.Context, oid int64, subtitleID int64) (err error) {
var (
subtitle *model.Subtitle
)
if subtitle, err = s.dao.GetSubtitle(c, oid, subtitleID); err != nil {
log.Error("params(oid::%v,subtitleID:%v)", oid, subtitleID)
return
}
if subtitle == nil {
log.Error("params(oid:%v,subtitleID:%v not found)", oid, subtitleID)
return
}
switch subtitle.Status {
case model.SubtitleStatusCheckToAudit:
if err = s.checkToAudit(c, subtitle); err != nil {
log.Error("checkToAudit.params(subtitle:%+v),error(%v)", subtitle, err)
return
}
case model.SubtitleStatusCheckPublish:
if err = s.checkToPublish(c, subtitle); err != nil {
log.Error("checkToPublish.params(subtitle:%+v),error(%v)", subtitle, err)
return
}
default:
return
}
return
}
func (s *Service) checkToAudit(c context.Context, subtitle *model.Subtitle) (err error) {
var (
status = model.SubtitleStatusToAudit
hits []string
)
if hits, err = s.checkBfsData(c, subtitle); err != nil {
log.Error("checkBfsData(subtitle:%+v),error(%v)", subtitle, err)
return
}
if len(hits) > 0 {
subtitle.RejectComment = "敏感词:" + strings.Join(hits, ",")
status = model.SubtitleStatusAuditBack
subtitle.PubTime = time.Now().Unix()
}
subtitle.Status = status
if err = s.dao.UpdateSubtitle(c, subtitle); err != nil {
log.Error("UpdateSubtitleStatus(subtitle:%+v),error(%v)", subtitle, err)
return
}
s.dao.DelSubtitleDraftCache(c, subtitle.Oid, subtitle.Type, subtitle.Mid, subtitle.Lan)
s.dao.DelSubtitleCache(c, subtitle.Oid, subtitle.ID)
return
}
func (s *Service) checkToPublish(c context.Context, subtitle *model.Subtitle) (err error) {
var (
status = model.SubtitleStatusPublish
hits []string
)
if hits, err = s.checkBfsData(c, subtitle); err != nil && err != ecode.SubtitleSizeLimit {
log.Error("checkBfsData(subtitle:%+v),error(%v)", subtitle, err)
return
}
if err == ecode.SubtitleSizeLimit {
subtitle.RejectComment = "单条字幕数超过限制"
status = model.SubtitleStatusAuditBack
}
if len(hits) > 0 {
subtitle.RejectComment = "敏感词:" + strings.Join(hits, ",")
status = model.SubtitleStatusAuditBack
}
subtitle.Status = status
if err = s.dao.UpdateSubtitle(c, subtitle); err != nil {
log.Error("UpdateSubtitleStatus(subtitle:%+v),error(%v)", subtitle, err)
return
}
if status == model.SubtitleStatusPublish {
if err = s.auditPass(c, subtitle); err != nil {
log.Error("auditPass(subtitle:%+v),error(%v)", subtitle, err)
return
}
return
}
if err = s.auditReject(c, subtitle); err != nil {
log.Error("auditReject(subtitle:%+v),error(%v)", subtitle, err)
return
}
return
}
// checkBfsData .
func (s *Service) checkBfsData(c context.Context, subtitle *model.Subtitle) (hits []string, err error) {
var (
body *model.SubtitleBody
bs []byte
)
if bs, err = s.dao.BfsData(c, subtitle.SubtitleURL); err != nil {
log.Error("BfsData.params(SubtitleURL:%v),error(%v)", subtitle.SubtitleURL, err)
return
}
body = &model.SubtitleBody{}
if err = json.Unmarshal(bs, body); err != nil {
log.Error("checkToAudit.Unmarshal,error(%v)", err)
return
}
if hits, err = s.checkFilter(c, body); err != nil {
log.Error("checkFilter(body:%+v),error(%v)", body, err)
return
}
return
}
// checkFilter .
func (s *Service) checkFilter(c context.Context, body *model.SubtitleBody) (hits []string, err error) {
var (
msgMap map[string]string
msgMaps []map[string]string
reply *filterMdl.MHitReply
hitMap map[string]struct{}
)
msgMap = make(map[string]string)
for idx, item := range body.Bodys {
if len(item.Content) > _contentSizeLimit {
err = ecode.SubtitleSizeLimit
return
}
msgMap[fmt.Sprint(idx)] = item.Content
if (idx+1)%_filterCapacity == 0 {
msgMaps = append(msgMaps, msgMap)
msgMap = make(map[string]string)
}
}
if len(msgMap) > 0 {
msgMaps = append(msgMaps, msgMap)
}
hitMap = make(map[string]struct{})
for _, msgMap = range msgMaps {
if reply, err = s.filterRPC.MHit(c, &filterMdl.MHitReq{
Area: _filterAreaSubtitle,
MsgMap: msgMap,
}); err != nil {
log.Error("checkFilter(msgMap:%+v),error(%v)", msgMap, err)
return
}
for _, rl := range reply.GetRMap() {
for _, hit := range rl.GetHits() {
hitMap[hit] = struct{}{}
}
}
}
for k := range hitMap {
hits = append(hits, k)
}
return
}
// auditReject subtitle reject
func (s *Service) auditReject(c context.Context, subtitle *model.Subtitle) (err error) {
subtitle.Status = model.SubtitleStatusAuditBack
if err = s.dao.UpdateSubtitle(c, subtitle); err != nil {
log.Error("params(%+v).error(%v)", subtitle, err)
return
}
s.dao.DelSubtitleDraftCache(context.Background(), subtitle.Oid, subtitle.Type, subtitle.Mid, subtitle.Lan)
s.dao.DelSubtitleCache(context.Background(), subtitle.Oid, subtitle.ID)
return
}
// auditPass .
func (s *Service) auditPass(c context.Context, subtitle *model.Subtitle) (err error) {
var (
tx *sql.Tx
subtitlePub *model.SubtitlePub
)
defer func() {
if err != nil {
tx.Rollback()
log.Error("params(subtitle:%+v).err(%v)", subtitle, err)
return
}
if err = tx.Commit(); err != nil {
log.Error("params(subtitle:%+v).err(%v)", subtitle, err)
return
}
}()
subtitle.RejectComment = ""
if tx, err = s.dao.BeginBiliDMTran(c); err != nil {
log.Error("error(%v)", err)
return
}
if err = s.dao.TxUpdateSubtitle(tx, subtitle); err != nil {
log.Error("params(%+v).error(%v)", subtitle, err)
return
}
subtitlePub = &model.SubtitlePub{
Oid: subtitle.Oid,
Type: subtitle.Type,
Lan: subtitle.Lan,
SubtitleID: subtitle.ID,
}
if err = s.dao.TxAddSubtitlePub(tx, subtitlePub); err != nil {
log.Error("params(%+v).error(%v)", subtitlePub, err)
return
}
if err = s.dao.DelSubtitleCache(c, subtitle.Oid, subtitle.ID); err != nil {
log.Error("DelSubtitleCache.params(subtitle:%+v).err(%v)", subtitle, err)
return
}
if err = s.dao.DelVideoSubtitleCache(c, subtitle.Oid, subtitle.Type); err != nil {
log.Error("DelVideoSubtitleCache.params(subtitle:%+v).err(%v)", subtitle, err)
return
}
return
}

View File

@@ -0,0 +1,53 @@
package service
import (
"context"
"testing"
"go-common/app/job/main/dm2/model"
. "github.com/smartystreets/goconvey/convey"
)
func TestSubtitle(t *testing.T) {
var (
oid int64 = 10109227
subtitleID int64 = 1
)
Convey("", t, func() {
err := svr.SubtitleFilter(context.Background(), oid, subtitleID)
So(err, ShouldBeNil)
})
}
func TestSubtitleFilter(t *testing.T) {
body := &model.SubtitleBody{
Bodys: []*model.SubtitleItem{
{
From: 0,
To: 10,
Content: "习近平",
},
{
From: 0,
To: 10,
Content: "习大大",
},
{
From: 0,
To: 10,
Content: "不要哇",
},
{
From: 0,
To: 10,
Content: "呀咩爹",
},
},
}
Convey("subtitle filter", t, func() {
hits, err := svr.checkFilter(context.Background(), body)
So(err, ShouldBeNil)
t.Logf("hits:%v", hits)
})
}

View File

@@ -0,0 +1,277 @@
package service
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"time"
"go-common/app/job/main/dm2/model"
"go-common/app/job/main/dm2/model/oplog"
"go-common/library/log"
)
func (s *Service) taskResProc() {
var (
c = context.Background()
tasks []*model.TaskInfo
err error
)
ticker := time.NewTicker(time.Duration(s.conf.TaskConf.ResInterval))
defer ticker.Stop()
for range ticker.C {
if tasks, err = s.dao.TaskInfos(c, model.TaskStateSearch); err != nil {
log.Error("s.dao.TaskInfos error(%v)", err)
continue
}
for _, task := range tasks {
count, url, state, err := s.dao.TaskSearchRes(c, task)
if err != nil {
log.Error("s.dao.TaskSearchRes(%+v) error(%v)", task, err)
continue
}
if state == model.TaskSearchFail {
task.State = model.TaskStateFail
} else if state == model.TaskSearchSuc {
task.Result = url
task.Count = count
if task.Sub > 0 {
task.State = model.TaskStateWait
} else {
task.State = model.TaskStateSuc
}
}
s.dao.UpdateTask(c, task)
}
}
}
func (s *Service) taskDelProc() {
var (
c = context.Background()
err error
)
ticker := time.NewTicker(time.Duration(s.conf.TaskConf.DelInterval))
defer ticker.Stop()
for range ticker.C {
if err = s.taskSchedule(c); err != nil {
log.Error("taskDelProc error(%v)", err)
continue
}
}
}
func (s *Service) taskSchedule(c context.Context) (err error) {
var (
ok bool
now = time.Now()
expire = now.Add(time.Duration(s.conf.TaskConf.DelInterval))
expireStr = expire.Format(time.RFC3339)
oldExpireStr, oldExpireGetSetStr string
oldExpire time.Time
)
if ok, err = s.dao.SetnxTaskJob(c, expireStr); err != nil {
return
}
// redis中不存在
if ok {
if err = s.taskDelJob(c); err != nil {
s.dao.DelTaskJob(c)
log.Error("taskDelJob,error(%v)", err)
return
}
return
}
// redis中已经存在
// 判断是否过期了
if oldExpireStr, err = s.dao.GetTaskJob(c); err != nil {
return
}
if oldExpire, err = time.Parse(time.RFC3339, oldExpireStr); err != nil {
return
}
if oldExpire.Sub(now) > 0 {
return
}
if oldExpireGetSetStr, err = s.dao.GetSetTaskJob(c, expireStr); err != nil {
return
}
if oldExpireGetSetStr != oldExpireStr {
return
}
if err = s.taskDelJob(c); err != nil {
s.dao.DelTaskJob(c)
log.Error("taskDelJob,error(%v)", err)
return
}
return
}
// TODO: operation_time && operation_rate
func (s *Service) taskDelJob(c context.Context) (err error) {
var (
task *model.TaskInfo
)
if task, err = s.dao.OneTask(c); err != nil || task == nil {
return
}
task.State = model.TaskStateDelDM
s.dao.UpdateTask(c, task)
var delCount int64
if delCount, task.LastIndex, task.State, err = s.taskDelDM(c, task); err != nil {
return
}
if task.State == model.TaskStateDelDM {
task.State = model.TaskStateSuc
}
if _, err = s.dao.UptSubTask(c, task.ID, delCount, time.Now()); err != nil {
return
}
_, err = s.dao.UpdateTask(c, task)
return
}
func (s *Service) taskDelDM(c context.Context, eTask *model.TaskInfo) (delCount int64, lastIndex, state int32, err error) {
taskDelNum := s.conf.TaskConf.DelNum
taskResFieldLen := s.conf.TaskConf.ResFieldLen
res, err := http.Get(eTask.Result)
if err != nil {
log.Error("s.taskDelDM.HttpGet(%s) error(%v)", eTask.Result, err)
return
}
resp, err := ioutil.ReadAll(res.Body)
if err != nil {
res.Body.Close()
log.Error("s.taskDelDM.ioutilRead error(%v)", err)
return
}
res.Body.Close()
lines := bytes.Split(resp, []byte("\n"))
total := len(lines)
n := (total-1)/taskDelNum + 1
for i := int(eTask.LastIndex); i < n; i++ {
var (
task *model.TaskInfo
subTask *model.SubTask
)
start := i * taskDelNum
end := (i + 1) * taskDelNum
if end > total {
end = total
}
OidDMid := make(map[int64][]int64)
for _, line := range lines[start:end] {
var dmid, oid int64
fields := bytes.Split(line, []byte("\001"))
if len(fields) < taskResFieldLen {
log.Error("fields lenth too small:%d", len(fields))
continue
}
if dmid, err = strconv.ParseInt(string(fields[0]), 10, 64); err != nil {
log.Error("ParseInt(%s) error(%v)", string(fields[0]), err)
continue
}
if oid, err = strconv.ParseInt(string(fields[1]), 10, 64); err != nil {
log.Error("ParseInt(%s) error(%v)", string(fields[1]), err)
continue
}
OidDMid[oid] = append(OidDMid[oid], dmid)
}
for oid, dmids := range OidDMid {
var affected int64
if affected, err = s.dao.DelDMs(c, oid, dmids, model.StateTaskDel); err != nil {
log.Error("dm task(id:%d) del dm(oid:%d,dmids:%v) error(%v)", eTask.ID, oid, dmids, err)
continue
}
if affected > 0 {
s.OpLog(c, oid, 0, time.Now().Unix(), int(model.SubTypeVideo), dmids, "status", "", strconv.FormatInt(int64(model.StateTaskDel), 10), "弹幕任务删除", oplog.SourceManager, oplog.OperatorSystem)
delCount += affected
if _, err = s.dao.UptSubjectCount(c, model.SubTypeVideo, oid, affected); err != nil {
log.Error("dm task update count(oid:%d,affected:%d) error(%v)", oid, affected, err)
}
}
time.Sleep(50 * time.Millisecond)
}
if len(OidDMid) > 0 {
log.Warn("dm task(id:%d) del dm(oid,dmids:%+v)", eTask.ID, OidDMid)
}
lastIndex = int32(i + 1)
task, err = s.dao.OneTask(c)
if err == nil && task != nil && task.ID != eTask.ID && task.Priority > eTask.Priority {
state = model.TaskStateWait
return
}
if eTask, err = s.dao.TaskInfoByID(c, eTask.ID); err != nil || task == nil {
continue
}
state = eTask.State
if state != model.TaskStateDelDM {
return
}
if subTask, err = s.dao.SubTask(c, eTask.ID); err != nil || subTask == nil {
continue
}
tCount := subTask.Tcount + delCount
if tCount >= s.conf.TaskConf.DelLimit && subTask.Tcount < s.conf.TaskConf.DelLimit {
log.Warn("task(id:%d) del dm reach limit(count:%d)", eTask.ID, tCount)
s.sendWechatWorkMsg(c, eTask, tCount)
state = model.TaskStatePause
return
}
}
return
}
func (s *Service) sendWechatWorkMsg(c context.Context, task *model.TaskInfo, count int64) (err error) {
content := fmt.Sprintf(model.TaskNoticeContent, task.ID, task.Title, count)
users := s.conf.TaskConf.MsgCC
users = append(users, task.Creator, task.Reviewer)
return s.dao.SendWechatWorkMsg(c, content, model.TaskNoticeTitle, users)
}
// OpLog put a new infoc format operation log into the channel
func (s *Service) OpLog(c context.Context, cid, operator, OperationTime int64, typ int, dmids []int64, subject, originVal, currentVal, remark string, source oplog.Source, operatorType oplog.OperatorType) (err error) {
infoLog := new(oplog.Infoc)
infoLog.Oid = cid
infoLog.Type = typ
infoLog.DMIds = dmids
infoLog.Subject = subject
infoLog.OriginVal = originVal
infoLog.CurrentVal = currentVal
infoLog.OperationTime = strconv.FormatInt(OperationTime, 10)
infoLog.Source = source
infoLog.OperatorType = operatorType
infoLog.Operator = operator
infoLog.Remark = remark
select {
case s.opsLogCh <- infoLog:
default:
err = fmt.Errorf("opsLogCh full")
log.Error("opsLogCh full (%v)", infoLog)
}
return
}
func (s *Service) oplogproc() {
for opLog := range s.opsLogCh {
if len(opLog.Subject) == 0 || len(opLog.CurrentVal) == 0 || opLog.Source <= 0 ||
opLog.Operator < 0 || opLog.OperatorType <= 0 {
log.Warn("oplogproc() it is an illegal log, warn(%v, %v, %v)", opLog.Subject, opLog.Subject, opLog.CurrentVal)
continue
} else {
for _, dmid := range opLog.DMIds {
if dmid > 0 {
s.dmOperationLogSvc.Info(opLog.Subject, strconv.FormatInt(opLog.Oid, 10), strconv.Itoa(opLog.Type),
strconv.FormatInt(dmid, 10), opLog.Source.String(), opLog.OriginVal,
opLog.CurrentVal, strconv.FormatInt(opLog.Operator, 10), opLog.OperatorType.String(),
opLog.OperationTime, opLog.Remark)
} else {
log.Warn("oplogproc() it is an illegal log, for dmid value, warn(%d, %+v)", dmid, opLog)
}
}
}
}
}

View File

@@ -0,0 +1,50 @@
package service
import (
"context"
"testing"
"go-common/app/job/main/dm2/model"
"github.com/smartystreets/goconvey/convey"
)
func TestServicetaskResProc(t *testing.T) {
convey.Convey("taskResProc", t, func(ctx convey.C) {
ctx.Convey("When everything goes positive", func(ctx convey.C) {
svr.taskResProc()
ctx.Convey("No return values", func(ctx convey.C) {
})
})
})
}
func TestServicetaskDelProc(t *testing.T) {
convey.Convey("taskDelProc", t, func(ctx convey.C) {
ctx.Convey("When everything goes positive", func(ctx convey.C) {
svr.taskDelProc()
ctx.Convey("No return values", func(ctx convey.C) {
})
})
})
}
func TestServicetaskDelDM(t *testing.T) {
convey.Convey("taskDelDM", t, func(ctx convey.C) {
var (
c = context.Background()
task = &model.TaskInfo{
Result: "http://berserker.bilibili.co/avenger/download/hdfs?path=/api/hive/query/148/672bc22888af701529e8b3052fd2c4a7/1543546463/1547966/result",
}
)
ctx.Convey("When everything gose positive", func(ctx convey.C) {
delCount, lastIndex, state, err := svr.taskDelDM(c, task)
ctx.Convey("Then err should be nil.delCount,pause should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(state, convey.ShouldNotBeNil)
ctx.So(delCount, convey.ShouldNotBeNil)
t.Log(delCount, lastIndex, state)
})
})
})
}

View File

@@ -0,0 +1,204 @@
package service
import (
"context"
"encoding/json"
"time"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
)
func (s *Service) trackSubject(c context.Context, m *model.BinlogMsg) (err error) {
nw := &model.Subject{}
if err = json.Unmarshal(m.New, &nw); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", m.New, err)
return
}
switch m.Action {
case "insert":
if err = s.dao.AddSubjectCache(c, nw); err != nil {
log.Error("s.dao.AddSubjectCache(%v) error(%v)", nw, err)
return
}
case "delete":
if err = s.dao.DelSubjectCache(c, nw.Type, nw.Oid); err != nil {
log.Error("s.dao.DelSubjectCahce(%v) error(%v)", nw, err)
return
}
case "update":
old := model.Subject{}
if err = json.Unmarshal(m.Old, &old); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", m.Old, err)
return
}
if err = s.dao.AddSubjectCache(c, nw); err != nil { // 全量缓存subject
log.Error("s.dao.AddSubjectCache(%v) error(%v)", nw, err)
return
}
if nw.Childpool != old.Childpool || nw.Maxlimit != old.Maxlimit || nw.State != old.State {
// 立刻刷新全段弹幕缓存
flush := &model.Flush{Oid: nw.Oid, Type: nw.Type, Force: true}
s.flushDmCache(c, flush)
// 立刻刷新分段弹幕缓存
s.flushXMLSegCache(c, nw)
}
}
return
}
func (s *Service) trackIndex(c context.Context, m *model.BinlogMsg) (err error) {
if m.Action != "update" {
return
}
dm := &model.DM{}
old := &model.DM{}
if err = json.Unmarshal(m.New, &dm); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", m.New, err)
return
}
if err = json.Unmarshal(m.Old, &old); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", m.Old, err)
return
}
s.asyncAddRecent(c, dm) // 更新up主最新1000条弹幕
s.asyncAddFlushDM(c, &model.Flush{
Type: dm.Type,
Oid: dm.Oid,
Force: true,
}) // 刷新全段弹幕
sub, err := s.subject(c, dm.Type, dm.Oid)
if err != nil {
return
}
p, err := s.pageinfo(c, sub.Pid, dm)
if err != nil {
return
}
if dm.NeedUpdateSpecial(old) {
if err = s.specialLocationUpdate(c, dm.Type, dm.Oid); err != nil {
return
}
}
s.dao.DelIdxContentCaches(c, dm.Type, dm.Oid, dm.ID) // 删除content cache
s.asyncAddFlushDMSeg(c, &model.FlushDMSeg{
Type: dm.Type,
Oid: dm.Oid,
Force: true,
Page: p,
})
return
}
func (s *Service) trackVideoup(c context.Context, aid int64) (err error) {
var (
retry = 5
tp = model.SubTypeVideo
videos []*model.Video
)
for i := 0; i < retry; i++ {
if videos, err = s.dao.Videos(c, aid); err == nil {
break
}
time.Sleep(time.Second)
}
if err != nil {
log.Error("track video failed,aid(%d),error(%v)", aid, err)
return
}
for _, v := range videos {
for i := 0; i < retry; i++ {
if err = s.syncVideo(c, tp, v); err == nil {
break
}
time.Sleep(time.Second)
}
}
return
}
func (s *Service) syncVideo(c context.Context, tp int32, v *model.Video) (err error) {
log.Info("sync video:%+v", v)
sub, err := s.dao.Subject(c, tp, v.Cid)
if err != nil {
return
}
if sub == nil {
if v.XCodeState >= model.VideoXcodeHDFinish {
// 生成弹幕蒙版
var attr int32
for _, mid := range s.maskMid {
if mid == v.Mid {
if err = s.dao.GenerateMask(c, v.Cid, mid, model.MaskPlatAll, model.MaskPriorityHgih, v.Aid, 0, 0); err != nil {
break
}
attr = attr | (model.AttrYes << model.AttrSubMaskOpen)
break
}
}
if _, err = s.dao.AddSubject(c, tp, v.Cid, v.Aid, v.Mid, s.maxlimit(v.Duration), attr); err != nil {
return
}
}
} else {
if sub.Mid != v.Mid {
if _, err = s.dao.UpdateSubMid(c, tp, v.Cid, v.Mid); err != nil {
return
}
if err = s.updateSubtilte(c, tp, v); err != nil {
log.Error("updateSubtilte(params:%+v),error(%v)", v, err)
return
}
}
}
return
}
func (s *Service) updateSubtilte(c context.Context, tp int32, v *model.Video) (err error) {
var (
subtitles []*model.Subtitle
subtitle *model.Subtitle
)
if subtitles, err = s.dao.GetSubtitles(c, tp, v.Cid); err != nil {
log.Error("updateSubtilte(params:%+v),error(%v)", v, err)
return
}
for _, subtitle = range subtitles {
subtitle.UpMid = v.Mid
if err = s.dao.UpdateSubtitle(c, subtitle); err != nil {
log.Error("updateSubtilte(params:%+v),error(%v)", v, err)
return
}
s.dao.DelSubtitleCache(c, v.Cid, subtitle.ID)
if subtitle.Status == model.SubtitleStatusDraft || subtitle.Status == model.SubtitleStatusToAudit {
s.dao.DelSubtitleDraftCache(c, v.Cid, tp, subtitle.Mid, subtitle.Lan)
}
}
s.dao.DelVideoSubtitleCache(c, v.Cid, tp)
return
}
func (s *Service) maxlimit(duration int64) (limit int64) {
switch {
case duration == 0:
limit = 1500
case duration > 3600:
limit = 8000
case duration > 2400:
limit = 6000
case duration > 900:
limit = 3000
case duration > 600:
limit = 1500
case duration > 150:
limit = 1000
case duration > 60:
limit = 500
case duration > 30:
limit = 300
case duration <= 30:
limit = 100
}
return
}

View File

@@ -0,0 +1,15 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestTrackVideoup(t *testing.T) {
Convey("", t, func() {
err := svr.trackVideoup(context.TODO(), 10114205)
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,157 @@
package service
import (
"context"
"time"
"go-common/app/job/main/dm2/model"
"go-common/library/log"
)
func (s *Service) transferProc() {
var (
c = context.TODO()
interval = time.Duration(time.Second * 60)
)
for {
time.Sleep(interval)
if !s.dao.AddTransferLock(c) {
continue
}
trans, err := s.dao.Transfers(c, model.StatInit)
if err != nil || len(trans) == 0 {
continue
}
for _, t := range trans {
log.Info("dm transfer(%+v) start", t)
s.transfer(c, t)
}
}
}
// transfer transfer dm.
func (s *Service) transfer(c context.Context, t *model.Transfer) {
var (
err error
limit int64 = 500
startID = t.Dmid
tp = model.SubTypeVideo
)
t.State = model.StatTransfing
if _, err = s.dao.UpdateTransfer(c, t); err != nil {
log.Error("s.dao.UpdateTransfer(%+v) error(%v)", t, err)
return
}
if err = s.dao.DelTransferLock(c); err != nil {
log.Error("s.dao.DelTransferLock() error")
}
targetSub, err := s.dao.Subject(c, tp, t.ToCid)
if err != nil || targetSub == nil {
log.Error("s.dao.Subject(cid:%d) error(%v)", t.ToCid, err)
s.transerFailNow(c, t)
return
}
originSub, err := s.dao.Subject(c, tp, t.FromCid)
if err != nil || originSub == nil {
log.Error("s.dao.Subject(cid:%d) error(%v)", t.ToCid, err)
s.transerFailNow(c, t)
return
}
for {
// get transfer dm per page
var dms []*model.DM
if dms, err = s.transferDMS(c, tp, originSub.Oid, startID, limit); err != nil {
time.Sleep(1 * time.Second)
continue
}
if len(dms) == 0 {
break
}
for _, dm := range dms {
if dm.ID <= startID {
continue
} else {
startID = dm.ID
}
var id int64
if id, err = s.seqRPC.ID(c, s.seqArg); err != nil {
log.Error("seqRPC.ID() error(%v)", err)
time.Sleep(100 * time.Millisecond)
continue
}
if dm.Pool == model.PoolSpecial {
dm.ContentSpe.ID = id
}
dm.Oid = targetSub.Oid // 修改这个dm 的主键id和oid
dm.ID = id
dm.Content.ID = id
if t.Offset != 0 {
dm.Progress = dm.Progress + int32(t.Offset*1000)
}
if err = s.actionAddDM(c, targetSub, dm); err != nil {
continue
}
t.Dmid = startID //记录转移到的dmid
}
s.dao.UpdateTransfer(c, t)
time.Sleep(1 * time.Second)
}
t.State = model.StatFinished
if _, err = s.dao.UpdateTransfer(c, t); err != nil {
log.Error("s.dao.UpdateTransfer(%+v) error(%v)", t, err)
}
// 刷新弹幕缓存
s.flushDmCache(c, &model.Flush{Oid: t.ToCid, Type: tp, Force: true})
s.flushAllDmSegCache(c, t.ToCid, tp)
}
func (s *Service) transerFailNow(c context.Context, t *model.Transfer) {
t.State = model.StatFailed
if _, err := s.dao.UpdateTransfer(c, t); err != nil {
log.Error("s.dao.UpdateTransfer(%+v) error(%v)", t, err)
}
}
// NewCommentList get dm list from new db
func (s *Service) transferDMS(c context.Context, tp int32, oid, minID, limit int64) (dms []*model.DM, err error) {
contentSpec := make(map[int64]*model.ContentSpecial)
idxMap, dmids, special, err := s.dao.DMIndexs(c, tp, oid, minID, limit)
if err != nil {
log.Error("s.dao.DMIndexs(oid:%d mindID:%d) error(%v)", oid, minID, err)
return
}
if len(dmids) == 0 {
return
}
contents, err := s.dao.Contents(c, oid, dmids)
if err != nil {
log.Error("s.dao.Contents(oid:%d dmids:%v) error(%v)", oid, dmids, err)
return
}
if len(special) > 0 {
if contentSpec, err = s.dao.ContentsSpecial(c, special); err != nil {
log.Error("s.dao.ContentSpecials(oid:%d special:%v) error(%v)", oid, special, err)
return
}
}
for _, dmid := range dmids {
dm, ok := idxMap[dmid]
if !ok {
continue
}
content, ok := contents[dmid]
if !ok {
continue
}
dm.Content = content
if dm.Pool == model.PoolSpecial {
contentspe, ok := contentSpec[dm.ID]
if !ok {
continue
}
dm.ContentSpe = contentspe
}
dms = append(dms, dm)
}
return
}

View File

@@ -0,0 +1,33 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
"go-common/app/job/main/dm2/model"
)
func TestTransferDMS(t *testing.T) {
Convey("test NewCommentList", t, func() {
ll, err := svr.transferDMS(context.TODO(), 1, 1012, 0, 10)
So(err, ShouldBeNil)
So(ll, ShouldNotBeEmpty)
})
}
func TestTransfer(t *testing.T) {
trans := &model.Transfer{
ID: 265,
FromCid: 1012,
ToCid: 1211,
Mid: 0,
Dmid: 123,
Offset: 0,
State: 0,
}
Convey("transfer", t, func() {
svr.transfer(context.TODO(), trans)
})
}