Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@ -0,0 +1,23 @@
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/service/main/feed/cmd:all-srcs",
"//app/service/main/feed/conf:all-srcs",
"//app/service/main/feed/dao:all-srcs",
"//app/service/main/feed/http:all-srcs",
"//app/service/main/feed/model:all-srcs",
"//app/service/main/feed/rpc/client:all-srcs",
"//app/service/main/feed/rpc/server:all-srcs",
"//app/service/main/feed/service:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,92 @@
### Feed-Service
##### Version 1.13.2
> 1. add ut
##### Version 1.13.1
> 1. 使用新的rpc server
##### Version 1.13.0
> 1. 使用bm
##### Version 1.12.8
> 1.增加register
##### Version 1.12.7
> 1.使用account-service v7
##### Version 1.12.6
> 1.查找线上问题
##### Version 1.12.5
> 1.使用pb重构 改为调用archives3接口
##### Version 1.12.4
> 1.升级http client
##### Version 1.12.3
> 1.换up主缓存key避免上个版本稿件服务导致的脏数据
##### Version 1.12.1
> 1.代码优化, 去掉mc的time缓存改为存在redis score中
> 2.未读数不再需要查archives2接口
> 3.生成feed数据只查询需要的稿件数据 不再全量查询
> 4.针对up主缓存 读取方式从expire改为ttl防止可能出现的缓存不一致现象
##### Version 1.11.9
> 1.合并大仓库修复commit丢失的问题
##### Version 1.11.1
> 1.fix 稿件map不存在导致的panic
##### Version 1.11.0
> 1. 合并大仓库
> 2. 修复丢失up主稿件问题
##### Version 1.10.10
> 1.fix 文章动态除0bug
##### Version 1.10.9
> 1.fix 稿件状态判断
##### Version 1.10.8
> 1.优化部分prom监控
##### Version 1.10.7
> 1.更新获取up主最新投稿的接口
##### Version 1.10.6
> 1.恢复up主过审稿件缓存
##### Version 1.10.5
> 1.去掉up主过审稿件缓存
##### Version 1.10.4
> 1.新增降级监控
##### Version 1.10.3
> 1.文章动态支持动态不展示功能
##### Version 1.10.2
> 1.fix文章动态翻页问题
##### Version 1.10.1
> 1.支持文章动态
##### Version 1.9.0
> 1.更新go-common、go-busines,以及更新关注接口
##### Version 1.8.2
> 1.加回up主缓存
##### Version 1.8.1
> 1.删除本地up主缓存
##### Version 1.8.0
> 1.去掉番剧未读数API && 增加稿件转移rpc
##### Version 1.7.4
> 1.多打一条日志
##### Version 1.7.3
> 1.fixed fold bug

View File

@ -0,0 +1,9 @@
# Owner
renwei
zhapuyu
# Author
wangxu01
# Reviewer
zhapuyu

View File

@ -0,0 +1,15 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- renwei
- wangxu01
- zhapuyu
labels:
- main
- service
- service/main/feed
options:
no_parent_owners: true
reviewers:
- wangxu01
- zhapuyu

View File

@ -0,0 +1,13 @@
## feed-service
提供用户关注动态流RPC服务
wiki 详见: http://info.bilibili.co/pages/viewpage.action?pageId=1741757
### Test
#### 执行:
执行 open_test_web_ui.sh 脚本: ./open_test_web_ui.sh
#### 依赖:
本机的mc(127.0.0.1:11211)和redis(127.0.0.1:6379)

View File

@ -0,0 +1,46 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "cmd",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
data = [
"convey-test.toml",
"feed-service-test.toml",
],
importpath = "go-common/app/service/main/feed/cmd",
tags = ["automanaged"],
deps = [
"//app/service/main/feed/conf:go_default_library",
"//app/service/main/feed/http:go_default_library",
"//app/service/main/feed/rpc/server:go_default_library",
"//app/service/main/feed/service:go_default_library",
"//library/log:go_default_library",
"//library/net/trace:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,83 @@
# convey 测试用 注意每次会重置redis/memcache里的数据
[xlog]
dir = "/data/log/feed-service/"
[xlog.syslog]
proto = "udp"
addr = "172.18.19.22:9999"
project = "web-interface"
chanSize = 10240
[bm]
addr = "0.0.0.0:6362"
timeout = "1s"
[rpcServer]
proto = "tcp"
addr = "127.0.0.1:6361"
[accountRPC]
[archiveRPC]
[articleRPC]
[multiRedis]
maxArcsNum = 50 # 对每个up主回源时候 一次最多拿多少个稿件 也是每个up主稿件缓存的限制量
TTLUpper = "480h"
expireUpper = "480h"
expireFeed = "16h"
[multiRedis.Cache]
name = "feed-service"
proto = "tcp"
addr = "127.0.0.1:6379"
idle = 10
active = 10
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "10s"
[memcache]
name = "feed-service"
proto = "tcp"
addr = "127.0.0.1:11211"
idle = 10
active = 10
dialTimeout = "2s"
readTimeout = "2s"
writeTimeout = "2s"
idleTimeout = "7h"
expire = "15m"
bangumiExpire = "15m"
[httpClient]
key = "e7482d29be4a95b8"
secret = "9e803791cdef756e75faee68e12b7442"
dial = "500ms"
timeout = "2s"
keepAlive = "60s"
timer = 10
[httpClient.breaker]
window ="10s"
sleep ="10ms"
bucket = 10
ratio = 0.5
request = 100
[app]
key = "e7482d29be4a95b8"
secret = "9e803791cdef756e75faee68e12b7442"
[feed]
appLength = 200
webLength = 400
archiveFeedLength = 200
archiveFeedExpire = "5m"
bangumiFeedExpire = "5m"
appPullInterval = "5m"
webPullInterval = "15s"
bulkSize = 500
minUpCnt = 10
maxTotalCnt = 100

View File

@ -0,0 +1,71 @@
# This is a TOML document. Boom.
[bm]
addr = "0.0.0.0:6362"
timeout = "1s"
[rpcServer]
proto = "tcp"
addr = "0.0.0.0:6361"
[accountRPC]
[archiveRPC]
[articleRPC]
[multiRedis]
maxArcsNum = 50 # 对每个up主回源时候 一次最多拿多少个稿件 也是每个up主稿件缓存的限制量
TTLUpper = "480h"
expireUpper = "480h"
expireFeed = "16h"
[multiRedis.Cache]
name = "feed-service"
proto = "tcp"
addr = "172.18.33.60:6891"
idle = 10
active = 10
dialTimeout = "1s"
readTimeout = "1s"
writeTimeout = "1s"
idleTimeout = "10s"
[memcache]
name = "feed-service"
proto = "tcp"
addr = "172.18.33.61:11230"
idle = 10
active = 10
dialTimeout = "2s"
readTimeout = "2s"
writeTimeout = "2s"
idleTimeout = "7h"
expire = "15m"
bangumiExpire = "15m"
[httpClient]
key = "e7482d29be4a95b8"
secret = "9e803791cdef756e75faee68e12b7442"
dial = "500ms"
timeout = "2s"
keepAlive = "60s"
timer = 10
[httpClient.breaker]
window ="10s"
sleep ="10ms"
bucket = 10
ratio = 0.5
request = 100
[feed]
appLength = 200
webLength = 400
archiveFeedLength = 200
archiveFeedExpire = "5m"
bangumiFeedExpire = "5m"
appPullInterval = "5m"
webPullInterval = "15s"
bulkSize = 500
minUpCnt = 10
maxTotalCnt = 100

View File

@ -0,0 +1,51 @@
package main
import (
"flag"
"os"
"os/signal"
"syscall"
"time"
"go-common/app/service/main/feed/conf"
"go-common/app/service/main/feed/http"
rpc "go-common/app/service/main/feed/rpc/server"
"go-common/app/service/main/feed/service"
"go-common/library/log"
"go-common/library/net/trace"
)
func main() {
flag.Parse()
// init conf,log,trace,stat,perf.
if err := conf.Init(); err != nil {
panic(err)
}
log.Init(conf.Conf.Xlog)
defer log.Close()
trace.Init(conf.Conf.Tracer)
defer trace.Close()
// service init
svr := service.New(conf.Conf)
http.Init(conf.Conf, svr)
rpcSvr := rpc.New(conf.Conf, svr)
// signal handler
log.Info("feed-service start")
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
for {
s := <-c
log.Info("feed-service get a signal %s", s.String())
switch s {
case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT:
log.Info("feed-service exit")
rpcSvr.Close()
time.Sleep(time.Second * 2)
return
case syscall.SIGHUP:
// TODO reload
default:
return
}
}
}

View File

@ -0,0 +1,6 @@
#!/bin/bash
command -v goconvey >/dev/null 2>&1 || { echo >&2 "required goconvey but it's not installed."; echo "Aborting."; echo "Please run commond: go get github.com/smartystreets/goconvey"; exit 1; }
cd ../
goconvey -excludedDirs "vendor,node_modules,rpc" -packages 1

View File

@ -0,0 +1,38 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["conf.go"],
importpath = "go-common/app/service/main/feed/conf",
tags = ["automanaged"],
deps = [
"//library/cache/memcache:go_default_library",
"//library/cache/redis:go_default_library",
"//library/conf:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/net/rpc:go_default_library",
"//library/net/trace:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/BurntSushi/toml:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,132 @@
package conf
import (
"errors"
"flag"
"go-common/library/cache/memcache"
"go-common/library/cache/redis"
"go-common/library/conf"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/net/rpc"
"go-common/library/net/trace"
"go-common/library/time"
"github.com/BurntSushi/toml"
)
// Conf global variable.
var (
Conf = &Config{}
client *conf.Client
confPath string
)
// Config struct of conf.
type Config struct {
// log
Xlog *log.Config
// BM
BM *bm.ServerConfig
// rpc server
RPCServer *rpc.ServerConfig
// redis
MultiRedis *MultiRedis
// memcache
Memcache *Memcache
// tracer
Tracer *trace.Config
// rpc client
ArchiveRPC *rpc.ClientConfig
AccountRPC *rpc.ClientConfig
ArticleRPC *rpc.ClientConfig
// httpClient
HTTPClient *bm.ClientConfig
// feed
Feed *Feed
}
// Feed .
type Feed struct {
AppLength int
WebLength int
ArchiveFeedLength int
ArticleFeedLength int
ArchiveFeedExpire time.Duration
BangumiFeedExpire time.Duration
AppPullInterval time.Duration
WebPullInterval time.Duration
ArtPullInterval time.Duration
BulkSize int
MinUpCnt int
MaxTotalCnt int
}
// MultiRedis .
type MultiRedis struct {
MaxArcsNum int
TTLUpper time.Duration
ExpireUpper time.Duration
ExpireFeed time.Duration
Local *redis.Config
Cache *redis.Config
}
// Memcache .
type Memcache struct {
*memcache.Config
Expire time.Duration
BangumiExpire time.Duration
}
func init() {
flag.StringVar(&confPath, "conf", "", "default config path")
}
// Init init conf
func Init() error {
if confPath != "" {
return local()
}
return remote()
}
func local() (err error) {
_, err = toml.DecodeFile(confPath, &Conf)
return
}
func remote() (err error) {
if client, err = conf.New(); err != nil {
return
}
if err = load(); err != nil {
return
}
go func() {
for range client.Event() {
log.Info("config reload")
if load() != nil {
log.Error("config reload error (%v)", err)
}
}
}()
return
}
func load() (err error) {
var (
s string
ok bool
tmpConf *Config
)
if s, ok = client.Toml2(); !ok {
return errors.New("load config center error")
}
if _, err = toml.Decode(s, &tmpConf); err != nil {
return errors.New("could not decode config")
}
*Conf = *tmpConf
return
}

View File

@ -0,0 +1,70 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"bangumi_test.go",
"dao_test.go",
"memcache_test.go",
"redis_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//app/service/main/feed/conf:go_default_library",
"//app/service/main/feed/model:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"bangumi.go",
"dao.go",
"memcache.go",
"redis.go",
],
importpath = "go-common/app/service/main/feed/dao",
tags = ["automanaged"],
deps = [
"//app/interface/openplatform/article/model:go_default_library",
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//app/service/main/feed/conf:go_default_library",
"//app/service/main/feed/model:go_default_library",
"//library/cache/memcache:go_default_library",
"//library/cache/redis:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/stat/prom:go_default_library",
"//library/sync/errgroup:go_default_library",
"//library/time:go_default_library",
"//library/xstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,75 @@
package dao
import (
"context"
"net/url"
"strconv"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/xstr"
)
const (
_bangumiURL = "http://bangumi.bilibili.co"
_pullURL = _bangumiURL + "/internal_api/follow_pull"
_pullSeasonsURL = _bangumiURL + "/internal_api/follow_seasons"
)
// BangumiPull pull bangumi feed.
func (d *Dao) BangumiPull(c context.Context, mid int64, ip string) (seasonIDS []int64, err error) {
params := url.Values{}
params.Set("mid", strconv.FormatInt(mid, 10))
var res struct {
Code int `json:"code"`
Result []*feedmdl.Pull `json:"result"`
}
if err = d.httpClient.Get(c, _pullURL, ip, params, &res); err != nil {
PromWarn("bangumi:Pull接口")
log.Error("d.client.Get(%s) error(%v)", _pullURL+"?"+params.Encode(), err)
return
}
if res.Code != 0 {
PromWarn("bangumi:Pull接口")
log.Error("url(%s) res code(%d) or res.result(%v)", _pullURL+"?"+params.Encode(), res.Code, res.Result)
err = ecode.Int(res.Code)
return
}
for _, r := range res.Result {
seasonIDS = append(seasonIDS, r.SeasonID)
}
return
}
// BangumiSeasons get bangumi info by seasonids.
func (d *Dao) BangumiSeasons(c context.Context, seasonIDs []int64, ip string) (psm map[int64]*feedmdl.Bangumi, err error) {
if len(seasonIDs) == 0 {
return
}
params := url.Values{}
params.Set("season_ids", xstr.JoinInts(seasonIDs))
var res struct {
Code int `json:"code"`
Result []*feedmdl.Bangumi `json:"result"`
}
if err = d.httpClient.Get(c, _pullSeasonsURL, ip, params, &res); err != nil {
PromWarn("bangumi:详情接口")
log.Error("d.client.Get(%s) error(%v)", _pullSeasonsURL+"?"+params.Encode(), err)
return
}
if res.Code != 0 {
PromWarn("bangumi:详情接口")
log.Error("url(%s) res code(%d) or res.result(%v)", _pullSeasonsURL+"?"+params.Encode(), res.Code, res.Result)
err = ecode.Int(res.Code)
return
}
psm = make(map[int64]*feedmdl.Bangumi, len(res.Result))
for _, p := range res.Result {
if p == nil {
continue
}
psm[p.SeasonID] = p
}
return
}

View File

@ -0,0 +1,38 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoBangumiPull(t *testing.T) {
var (
c = context.Background()
mid = int64(2)
ip = ""
)
convey.Convey("BangumiPull", t, func(ctx convey.C) {
seasonIDS, err := d.BangumiPull(c, mid, ip)
ctx.Convey("Then err should be nil.seasonIDS should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(seasonIDS, convey.ShouldBeNil)
})
})
}
func TestDaoBangumiSeasons(t *testing.T) {
var (
c = context.Background()
seasonIDs = []int64{5735, 5714, 5702, 5725}
ip = ""
)
convey.Convey("BangumiSeasons", t, func(ctx convey.C) {
psm, err := d.BangumiSeasons(c, seasonIDs, ip)
ctx.Convey("Then err should be nil.psm should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(psm, convey.ShouldNotBeNil)
})
})
}

View File

@ -0,0 +1,108 @@
package dao
import (
"context"
"time"
"go-common/app/service/main/feed/conf"
"go-common/library/cache/memcache"
"go-common/library/cache/redis"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/stat/prom"
)
var (
// CachedCount .
CachedCount = prom.CacheHit
// MissedCount .
MissedCount = prom.CacheMiss
infosCount = prom.BusinessInfoCount
warnsCount = prom.BusinessErrCount
)
// PromError stat and log.
func PromError(name string, format string, args ...interface{}) {
prom.BusinessErrCount.Incr(name)
log.Error(format, args...)
}
// PromInfo add prom info
func PromInfo(name string) {
infosCount.Incr(name)
}
// PromWarn add prom warn
func PromWarn(name string) {
warnsCount.Incr(name)
}
// Dao struct info of Dao.
type Dao struct {
// redis
redis *redis.Pool
redisTTLUpper int32
redisExpireUpper int32
redisExpireFeed int32
redisExpireArchiveFeed int32
redisExpireBangumiFeed int32
// memcache
mc *memcache.Pool
mcExpire int32
bangumiExpire int32
// feed Config
appFeedLength int
webFeedLength int
// conf
c *conf.Config
// bangumi http client
httpClient *bm.Client
}
// New new a Dao and return.
func New(c *conf.Config) (d *Dao) {
d = &Dao{
// conf
c: c,
// redis
redis: redis.NewPool(c.MultiRedis.Cache),
redisTTLUpper: int32(time.Duration(c.MultiRedis.TTLUpper) / time.Second),
redisExpireUpper: int32(time.Duration(c.MultiRedis.ExpireUpper) / time.Second),
redisExpireFeed: int32(time.Duration(c.MultiRedis.ExpireFeed) / time.Second),
redisExpireArchiveFeed: int32(time.Duration(c.Feed.ArchiveFeedExpire) / time.Second),
redisExpireBangumiFeed: int32(time.Duration(c.Feed.BangumiFeedExpire) / time.Second),
// mc
mc: memcache.NewPool(c.Memcache.Config),
mcExpire: int32(time.Duration(c.Memcache.Expire) / time.Second),
bangumiExpire: int32(time.Duration(c.Memcache.Expire) / time.Second),
// feed Config
appFeedLength: c.Feed.AppLength,
webFeedLength: c.Feed.WebLength,
httpClient: bm.NewClient(c.HTTPClient),
}
if d.appFeedLength == 0 {
d.appFeedLength = 200
}
if d.webFeedLength == 0 {
d.webFeedLength = 400
}
return
}
// Ping ping health of redis and mc.
func (d *Dao) Ping(c context.Context) (err error) {
if err = d.pingRedis(c); err != nil {
return
}
return d.pingMC(c)
}
// Close close connections of redis and mc.
func (d *Dao) Close() {
if d.redis != nil {
d.redis.Close()
}
if d.mc != nil {
d.mc.Close()
}
}

View File

@ -0,0 +1,35 @@
package dao
import (
"flag"
"go-common/app/service/main/feed/conf"
"os"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.web-svr.feed-service")
flag.Set("conf_token", "a7fb455a62b8ba2cd751211b6781ef90")
flag.Set("tree_id", "23908")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/feed-service-test.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
m.Run()
os.Exit(0)
}

View File

@ -0,0 +1,200 @@
package dao
import (
"context"
"strconv"
"sync"
"go-common/app/service/main/archive/api"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/cache/memcache"
"go-common/library/sync/errgroup"
)
const (
_prefixArc = "ap_"
_prefixBangumi = "bp_"
_bulkSize = 100
)
func arcKey(aid int64) string {
return _prefixArc + strconv.FormatInt(aid, 10)
}
func bangumiKey(bid int64) string {
return _prefixBangumi + strconv.FormatInt(bid, 10)
}
// pingMc ping memcache
func (d *Dao) pingMC(c context.Context) (err error) {
conn := d.mc.Get(c)
item := memcache.Item{Key: "ping", Value: []byte{1}, Expiration: d.mcExpire}
err = conn.Set(&item)
conn.Close()
return
}
// AddArchivesCache batch set archives cache.
func (d *Dao) AddArchivesCache(c context.Context, vs ...*api.Arc) (err error) {
conn := d.mc.Get(c)
defer conn.Close()
for _, v := range vs {
if v == nil {
continue
}
item := memcache.Item{Key: arcKey(v.Aid), Object: v, Flags: memcache.FlagProtobuf, Expiration: d.mcExpire}
if err = conn.Set(&item); err != nil {
PromError("mc:增加稿件缓存", "conn.Store(%s) error(%v)", arcKey(v.Aid), err)
return
}
}
return
}
// AddArchivesCacheMap batch set archives cache.
func (d *Dao) AddArchivesCacheMap(c context.Context, arcm map[int64]*api.Arc) (err error) {
var arcs []*api.Arc
for _, arc := range arcm {
arcs = append(arcs, arc)
}
return d.AddArchivesCache(c, arcs...)
}
// ArchivesCache batch get archive from cache.
func (d *Dao) ArchivesCache(c context.Context, aids []int64) (cached map[int64]*api.Arc, missed []int64, err error) {
if len(aids) == 0 {
return
}
cached = make(map[int64]*api.Arc, len(aids))
allKeys := make([]string, 0, len(aids))
aidmap := make(map[string]int64, len(aids))
for _, aid := range aids {
k := arcKey(aid)
allKeys = append(allKeys, k)
aidmap[k] = aid
}
group, errCtx := errgroup.WithContext(c)
mutex := sync.Mutex{}
keysLen := len(allKeys)
for i := 0; i < keysLen; i += _bulkSize {
var keys []string
if (i + _bulkSize) > keysLen {
keys = allKeys[i:]
} else {
keys = allKeys[i : i+_bulkSize]
}
group.Go(func() (err error) {
conn := d.mc.Get(errCtx)
replys, err := conn.GetMulti(keys)
defer conn.Close()
if err != nil {
PromError("mc:获取稿件缓存", "conn.Gets(%v) error(%v)", keys, err)
err = nil
return
}
for _, reply := range replys {
arc := &api.Arc{}
if err = conn.Scan(reply, arc); err != nil {
PromError("获取稿件缓存json解析", "json.Unmarshal(%v) error(%v)", reply.Value, err)
err = nil
continue
}
mutex.Lock()
cached[aidmap[reply.Key]] = arc
delete(aidmap, reply.Key)
mutex.Unlock()
}
return
})
}
group.Wait()
missed = make([]int64, 0, len(aidmap))
for _, aid := range aidmap {
missed = append(missed, aid)
}
MissedCount.Add("archive", int64(len(missed)))
CachedCount.Add("archive", int64(len(cached)))
return
}
// DelArchiveCache delete archive cache.
func (d *Dao) DelArchiveCache(c context.Context, aid int64) (err error) {
conn := d.mc.Get(c)
defer conn.Close()
if err = conn.Delete(arcKey(aid)); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
PromError("mc:删除稿件缓存", "conn.Delete(%s) error(%v)", arcKey(aid), err)
return
}
}
return
}
// AddBangumisCacheMap batch set bangumis cache.
func (d *Dao) AddBangumisCacheMap(c context.Context, bm map[int64]*feedmdl.Bangumi) (err error) {
var bs []*feedmdl.Bangumi
for _, b := range bm {
bs = append(bs, b)
}
return d.AddBangumisCache(c, bs...)
}
// AddBangumisCache add batch set bangumi cache.
func (d *Dao) AddBangumisCache(c context.Context, bs ...*feedmdl.Bangumi) (err error) {
conn := d.mc.Get(c)
defer conn.Close()
for _, b := range bs {
if b == nil {
continue
}
item := memcache.Item{Key: bangumiKey(b.SeasonID), Object: b, Flags: memcache.FlagProtobuf, Expiration: d.bangumiExpire}
if err = conn.Set(&item); err != nil {
PromError("mc:增加番剧缓存", "conn.Store(%s) error(%v)", bangumiKey(b.SeasonID), err)
return
}
}
return
}
// BangumisCache batch get archive from cache.
func (d *Dao) BangumisCache(c context.Context, bids []int64) (cached map[int64]*feedmdl.Bangumi, missed []int64, err error) {
cached = make(map[int64]*feedmdl.Bangumi, len(bids))
if len(bids) == 0 {
return
}
keys := make([]string, 0, len(bids))
bidmap := make(map[string]int64, len(bids))
for _, bid := range bids {
k := bangumiKey(bid)
keys = append(keys, k)
bidmap[k] = bid
}
conn := d.mc.Get(c)
defer conn.Close()
replys, err := conn.GetMulti(keys)
if err != nil {
PromError("mc:获取番剧", "conn.Gets(%v) error(%v)", keys, err)
return
}
for _, reply := range replys {
b := &feedmdl.Bangumi{}
if err = conn.Scan(reply, b); err != nil {
PromError("获取番剧json解析", "json.Unmarshal(%v) error(%v)", reply.Value, err)
return
}
cached[bidmap[reply.Key]] = b
delete(bidmap, reply.Key)
}
missed = make([]int64, 0, len(bidmap))
for _, bid := range bidmap {
missed = append(missed, bid)
}
MissedCount.Add("bangumi", int64(len(missed)))
CachedCount.Add("bangumi", int64(len(cached)))
return
}

View File

@ -0,0 +1,59 @@
package dao
import (
"context"
"testing"
"go-common/app/service/main/archive/api"
feed "go-common/app/service/main/feed/model"
xtime "go-common/library/time"
. "github.com/smartystreets/goconvey/convey"
)
func Test_ArchivesCache(t *testing.T) {
arc := api.Arc{Aid: 1, PubDate: xtime.Time(100), Title: "title"}
c := context.TODO()
Convey("add cache", t, func() {
err := d.AddArchivesCacheMap(c, map[int64]*api.Arc{1: &arc})
So(err, ShouldBeNil)
Convey("get cache return cached data", func() {
cached, missed, err := d.ArchivesCache(c, []int64{1})
So(err, ShouldBeNil)
So(missed, ShouldBeEmpty)
So(cached, ShouldResemble, map[int64]*api.Arc{1: &arc})
})
Convey("del cache return null", func() {
err := d.DelArchiveCache(c, 1)
So(err, ShouldBeNil)
cached, missed, err := d.ArchivesCache(c, []int64{1})
So(err, ShouldBeNil)
So(cached, ShouldBeEmpty)
So(missed, ShouldResemble, []int64{1})
})
})
}
func Test_BangumiCache(t *testing.T) {
bangumi := feed.Bangumi{SeasonID: 1, Title: "t"}
c := context.TODO()
Convey("add cache", t, func() {
err := d.AddBangumisCacheMap(c, map[int64]*feed.Bangumi{1: &bangumi})
So(err, ShouldBeNil)
Convey("get cache return cached data", func() {
cached, missed, err := d.BangumisCache(c, []int64{1})
So(err, ShouldBeNil)
So(missed, ShouldBeEmpty)
So(cached, ShouldResemble, map[int64]*feed.Bangumi{1: &bangumi})
})
Convey("return missed", func() {
miss := int64(2000)
cached, missed, err := d.BangumisCache(c, []int64{miss})
So(err, ShouldBeNil)
So(cached, ShouldBeEmpty)
So(missed, ShouldResemble, []int64{miss})
})
})
}

View File

@ -0,0 +1,647 @@
package dao
import (
"context"
"fmt"
"strconv"
artmdl "go-common/app/interface/openplatform/article/model"
"go-common/app/service/main/archive/api"
"go-common/app/service/main/archive/model/archive"
"go-common/app/service/main/feed/model"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/cache/redis"
"go-common/library/log"
"go-common/library/time"
"go-common/library/xstr"
)
const (
_prefixUpper = "ua_" // upper's archive list
_prefixAppFeed = "af_" // user's app feed list
_prefixWebFeed = "wf_" // user's web feed list
_prefixAppLast = "al_" // user's last access
_prefixWebLast = "wl_" // user's last access
_prefixArtLast = "tl_" // user's last access of article
_prefixBangumiFeed = "banf_" // user's bangumi feed list
_prefixArchiveFeed = "arcf_" // user's archive feed list
_prefixArticleFeed = "artf_" // user's article feed list
_prefixAppUnreadCount = "ac_" // user's app unread count
_prefixWebUnreadCount = "wc_" // user's web unread count
_prefixArtUnreadCount = "tc_" // user's article unread count
)
func upperKey(mid int64) string {
return _prefixUpper + strconv.FormatInt(mid, 10)
}
func bangumiFeedKey(mid int64) string {
return _prefixBangumiFeed + strconv.FormatInt(mid, 10)
}
func archiveFeedKey(mid int64) string {
return _prefixArchiveFeed + strconv.FormatInt(mid, 10)
}
func from(i int64) (time.Time, int8) {
return time.Time((i >> 8)), int8(int64(i) & 0xff)
}
func combine(t time.Time, copyright int8) int64 {
return int64(t)<<8 | int64(copyright)
}
func feedKey(ft int, mid int64) string {
midStr := strconv.FormatInt(mid, 10)
if ft == model.TypeApp {
return _prefixAppFeed + midStr
} else if ft == model.TypeWeb {
return _prefixWebFeed + midStr
} else {
return _prefixArticleFeed + midStr
}
}
func unreadCountKey(ft int, mid int64) string {
midStr := strconv.FormatInt(mid%100000, 10)
if ft == model.TypeApp {
return _prefixAppUnreadCount + midStr
} else if ft == model.TypeWeb {
return _prefixWebUnreadCount + midStr
} else {
return _prefixArtUnreadCount + midStr
}
}
func lastKey(ft int, mid int64) string {
midStr := strconv.FormatInt(mid%100000, 10)
if ft == model.TypeApp {
return _prefixAppLast + midStr
} else if ft == model.TypeWeb {
return _prefixWebLast + midStr
} else {
return _prefixArtLast + midStr
}
}
// appFeedValue convert Feed to string, format: "type,id,fold,fold,fold..."
func appFeedValue(f *feedmdl.Feed) string {
ids := []int64{f.Type, f.ID}
for _, arc := range f.Fold {
ids = append(ids, arc.Aid)
}
return xstr.JoinInts(ids)
}
func recoverFeed(idsStr string) (fe *feedmdl.Feed, err error) {
var (
aid int64
ids []int64
)
if ids, err = xstr.SplitInts(idsStr); err != nil {
return
}
if len(ids) < 2 {
err = fmt.Errorf("recoverFeed failed idsStr(%v)", idsStr)
return
}
fe = &feedmdl.Feed{Type: ids[0], ID: ids[1]}
for _, aid = range ids[2:] {
fe.Fold = append(fe.Fold, &api.Arc{Aid: aid})
}
return
}
// pingRedis ping redis.
func (d *Dao) pingRedis(c context.Context) (err error) {
conn := d.redis.Get(c)
if _, err = conn.Do("SET", "PING", "PONG"); err != nil {
PromError("redis: ping remote", "remote redis: conn.Do(SET,PING,PONG) error(%v)", err)
}
conn.Close()
return
}
// LastAccessCache get last access time of user.
func (d *Dao) LastAccessCache(c context.Context, ft int, mid int64) (t int64, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := lastKey(ft, mid)
if t, err = redis.Int64(conn.Do("HGET", key, mid)); err != nil {
if err == redis.ErrNil {
err = nil
} else {
PromError("redis:获取上次访问时间", "conn.Do(HGET, %s, %s) error(%v)", key, mid, err)
}
}
return
}
// AddLastAccessCache add user's last access time.
func (d *Dao) AddLastAccessCache(c context.Context, ft int, mid int64, t int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := lastKey(ft, mid)
if _, err = conn.Do("HSET", key, mid, t); err != nil {
PromError("redis:增加上次访问时间", "conn.DO(HSET, %s, %d, %d) error(%v)", key, mid, t, err)
}
return
}
// ExpireFeedCache expire the user feed key.
func (d *Dao) ExpireFeedCache(c context.Context, ft int, mid int64) (ok bool, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(ft, mid)
if ok, err = redis.Bool(conn.Do("EXPIRE", key, d.redisExpireFeed)); err != nil {
PromError("redis:Feed缓存设定过期", "conn.Do(EXPIRE, %s, %d) error(%v)", key, d.redisExpireFeed, err)
}
return
}
// PurgeFeedCache purge the user feed key.
func (d *Dao) PurgeFeedCache(c context.Context, ft int, mid int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(ft, mid)
if _, err = redis.Bool(conn.Do("DEL", key)); err != nil {
PromError("redis:删除feed", "conn.Do(DEL, %s, %d) error(%v)", key, err)
}
return
}
// FeedCache get upper feed by cache.
func (d *Dao) FeedCache(c context.Context, ft int, mid int64, start, end int) (as []*feedmdl.Feed, bids []int64, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(ft, mid)
vs, err := redis.Values(conn.Do("ZREVRANGE", key, start, end, "WITHSCORES"))
if err != nil {
PromError("redis:获取feed", "conn.Do(ZREVRANGE,%s,%d,%d) error(%v)", key, start, end, err)
return
}
as = make([]*feedmdl.Feed, 0, len(vs))
for len(vs) > 0 {
var (
ts int64
idsStr string
fe *feedmdl.Feed
)
if vs, err = redis.Scan(vs, &idsStr, &ts); err != nil {
PromError("redis:获取feed", "redis.Scan(%v) error(%v)", vs, err)
return
}
if idsStr != "" {
fe, err = recoverFeed(idsStr)
fe.PubDate = time.Time(ts)
if err != nil {
PromError("恢复feed", "redis.recoverFeed(%v) error(%v)", idsStr, err)
err = nil
continue
}
as = append(as, fe)
switch fe.Type {
case feedmdl.BangumiType:
bids = append(bids, fe.ID)
}
}
}
return
}
// AddFeedCache add upper feed cache.
func (d *Dao) AddFeedCache(c context.Context, ft int, mid int64, as []*feedmdl.Feed) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(ft, mid)
if err = conn.Send("DEL", key); err != nil {
PromError("redis:删除feed缓存", "conn.Send(DEL, %s) error(%v)", key, err)
return
}
commondLen := 1
if len(as) > 0 {
var feedLen int
if ft == model.TypeApp {
feedLen = d.appFeedLength
} else {
feedLen = d.webFeedLength
}
if len(as) > feedLen {
as = as[:feedLen]
}
commonds := []interface{}{key}
for _, appFeed := range as {
ts := appFeed.PubDate.Time().Unix()
feedValue := appFeedValue(appFeed)
commonds = append(commonds, ts, feedValue)
}
if err = conn.Send("ZADD", commonds...); err != nil {
PromError("redis:增加feed缓存", "conn.Send(ZADD, %v, %v) error(%v)", key, commonds, err)
return
}
commondLen++
if err = conn.Send("EXPIRE", key, d.redisExpireFeed); err != nil {
PromError("redis:expire-feed缓存", "conn.Send(expire, %s, %v) error(%v)", key, d.redisExpireFeed, err)
return
}
commondLen++
}
if err = conn.Flush(); err != nil {
PromError("redis:feed缓存flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < commondLen; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:feed缓存receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// ExpireUppersCache expire the upper key.
func (d *Dao) ExpireUppersCache(c context.Context, mids []int64) (res map[int64]bool, err error) {
conn := d.redis.Get(c)
defer conn.Close()
res = make(map[int64]bool, len(mids))
for _, mid := range mids {
if err = conn.Send("TTL", upperKey(mid)); err != nil {
PromError("redis:up主ttl", "conn.Send(TTL, %s) error(%v)", upperKey(mid), err)
return
}
}
if err = conn.Flush(); err != nil {
PromError("redis:up主flush", "conn.Flush error(%v)", err)
return
}
var state int64
for _, mid := range mids {
if state, err = redis.Int64(conn.Receive()); err != nil {
PromError("redis:up主receive", "conn.Receive() error(%v)", err)
return
}
if int32(state) > (d.redisTTLUpper - d.redisExpireUpper) {
res[mid] = true
} else {
res[mid] = false
}
}
return
}
// UppersCaches batch get new archives of uppers by cache.
func (d *Dao) UppersCaches(c context.Context, mids []int64, start, end int) (res map[int64][]*archive.AidPubTime, err error) {
conn := d.redis.Get(c)
defer conn.Close()
res = make(map[int64][]*archive.AidPubTime, len(mids))
for _, mid := range mids {
if err = conn.Send("ZREVRANGE", upperKey(mid), start, end, "withscores"); err != nil {
PromError("redis:获取up主", "conn.Send(%s) error(%v)", upperKey(mid), err)
return
}
}
if err = conn.Flush(); err != nil {
PromError("redis:获取up主flush", "conn.Flush error(%v)", err)
return
}
for _, mid := range mids {
values, err := redis.Values(conn.Receive())
if err != nil {
PromError("redis:获取up主receive", "conn.Send(ZREVRANGE, %d) error(%v)", mid, err)
err = nil
continue
}
for len(values) > 0 {
arc := archive.AidPubTime{}
var score int64
if values, err = redis.Scan(values, &arc.Aid, &score); err != nil {
PromError("redis:scan UP主", "redis.Scan() error(%v)", err)
err = nil
continue
}
arc.PubDate, arc.Copyright = from(score)
res[mid] = append(res[mid], &arc)
}
}
CachedCount.Add("up", int64(len(res)))
return
}
// AddUpperCaches batch add passed archive of upper.
// set max num of upper's passed list.
func (d *Dao) AddUpperCaches(c context.Context, mArcs map[int64][]*archive.AidPubTime) (err error) {
var (
mid int64
arcs []*archive.AidPubTime
conn = d.redis.Get(c)
count int
)
defer conn.Close()
if len(mArcs) == 0 {
return
}
for mid, arcs = range mArcs {
if len(arcs) == 0 {
continue
}
key := upperKey(mid)
if err = conn.Send("DEL", key); err != nil {
PromError("redis:删除up主缓存", "conn.Send(DEL, %s) error(%v)", key, err)
return
}
count++
for _, arc := range arcs {
score := combine(arc.PubDate, arc.Copyright)
if err = conn.Send("ZADD", key, "CH", score, arc.Aid); err != nil {
PromError("redis:增加up主缓存", "conn.Send(ZADD, %s, %d, %d) error(%v)", key, arc.Aid, err)
return
}
count++
}
if err = conn.Send("ZREMRANGEBYRANK", key, 0, -(d.c.MultiRedis.MaxArcsNum + 1)); err != nil {
PromError("redis:清理up主缓存", "conn.Send(ZREMRANGEBYRANK, %s) error(%v)", key, err)
return
}
count++
if err = conn.Send("EXPIRE", key, d.redisTTLUpper); err != nil {
PromError("redis:expireup主缓存", "conn.Send(EXPIRE, %s, %v) error(%v)", key, d.redisTTLUpper, err)
return
}
count++
}
if err = conn.Flush(); err != nil {
PromError("redis:增加up主flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < count; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:增加up主receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// AddUpperCache .
func (d *Dao) AddUpperCache(c context.Context, mid int64, arc *archive.AidPubTime) (err error) {
var conn = d.redis.Get(c)
defer conn.Close()
key := upperKey(mid)
score := combine(arc.PubDate, arc.Copyright)
if err = conn.Send("ZADD", key, "CH", score, arc.Aid); err != nil {
PromError("redis:增加up主缓存", "conn.Send(ZADD, %s, %d, %d) error(%v)", key, arc.Aid, err)
return
}
if err = conn.Send("ZREMRANGEBYRANK", key, 0, -(d.c.MultiRedis.MaxArcsNum + 1)); err != nil {
PromError("redis:清理up主缓存", "conn.Send(ZREMRANGEBYRANK, %s) error(%v)", key, err)
return
}
if err = conn.Flush(); err != nil {
PromError("redis:增加up主flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:增加up主receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// DelUpperCache delete archive of upper cache.
func (d *Dao) DelUpperCache(c context.Context, mid int64, aid int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
if _, err = conn.Do("ZREM", upperKey(mid), aid); err != nil {
PromError("redis:删除up主", "conn.Do(ZERM, %s, %d) error(%v)", upperKey(mid), aid, err)
}
return
}
// AddArchiveFeedCache add archive feed cache.
func (d *Dao) AddArchiveFeedCache(c context.Context, mid int64, as []*feedmdl.Feed) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
if len(as) == 0 {
return
}
if len(as) > d.c.Feed.ArchiveFeedLength {
as = as[:d.c.Feed.ArchiveFeedLength]
}
key := archiveFeedKey(mid)
commonds := []interface{}{key}
for _, f := range as {
ts := f.PubDate.Time().Unix()
value := appFeedValue(f)
commonds = append(commonds, ts, value)
}
if err = conn.Send("ZADD", commonds...); err != nil {
PromError("redis:增加archive-feed缓存", "conn.Send(ZADD, %v, %v) error(%v)", key, commonds, err)
return
}
if err = conn.Send("EXPIRE", key, d.redisExpireArchiveFeed); err != nil {
PromError("redis:expire-archive-feed缓存", "conn.Send(expire, %s, %v) error(%v)", key, d.redisExpireArchiveFeed, err)
return
}
if err = conn.Flush(); err != nil {
PromError("redis:archive-feed-flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:archive-feed-receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// AddBangumiFeedCache add bangumi feed cache.
func (d *Dao) AddBangumiFeedCache(c context.Context, mid int64, as []*feedmdl.Feed) (err error) {
if len(as) == 0 {
return
}
conn := d.redis.Get(c)
defer conn.Close()
key := bangumiFeedKey(mid)
commonds := []interface{}{key}
for _, f := range as {
ts := f.PubDate.Time().Unix()
value := appFeedValue(f)
commonds = append(commonds, ts, value)
}
if err = conn.Send("ZADD", commonds...); err != nil {
PromError("redis:增加bangumi-feed缓存", "conn.Send(ZADD, %v, %v) error(%v)", key, commonds, err)
return
}
if err = conn.Send("EXPIRE", key, d.redisExpireBangumiFeed); err != nil {
PromError("redis:expire-bangumi-feed", "conn.Send(expire, %s, %v) error(%v)", key, d.redisExpireBangumiFeed, err)
return
}
if err = conn.Flush(); err != nil {
PromError("redis:bangumi-feed-flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:bangumi-feed-receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// ArchiveFeedCache get archive feed by cache.
func (d *Dao) ArchiveFeedCache(c context.Context, mid int64, start, end int) (as []*feedmdl.Feed, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := archiveFeedKey(mid)
vs, err := redis.Values(conn.Do("ZREVRANGE", key, start, end, "WITHSCORES"))
if err != nil {
PromError("redis:获取archive-feed", "conn.Do(ZREVRANGE,%s,%d,%d) error(%v)", key, start, end, err)
return
}
for len(vs) > 0 {
var (
ts int64
idsStr string
fe *feedmdl.Feed
)
if vs, err = redis.Scan(vs, &idsStr, &ts); err != nil {
PromError("redis:获取archive-feed", "redis.Scan(%v) error(%v)", vs, err)
return
}
if idsStr != "" {
fe, err = recoverFeed(idsStr)
fe.PubDate = time.Time(ts)
if err != nil {
PromError("恢复archive-feed", "redis.recoverFeed(%v) error(%v)", idsStr, err)
err = nil
continue
}
as = append(as, fe)
}
}
return
}
// BangumiFeedCache get bangumi feed by cache.
func (d *Dao) BangumiFeedCache(c context.Context, mid int64, start, end int) (bids []int64, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := bangumiFeedKey(mid)
vs, err := redis.Values(conn.Do("ZREVRANGE", key, start, end, "WITHSCORES"))
if err != nil {
PromError("redis:获取feed", "conn.Do(ZREVRANGE,%s,%d,%d) error(%v)", key, start, end, err)
return
}
bids = make([]int64, 0, len(vs))
for len(vs) > 0 {
var (
ts int64
idsStr string
fe *feedmdl.Feed
)
if vs, err = redis.Scan(vs, &idsStr, &ts); err != nil {
PromError("redis:获取bangumi-feed", "redis.Scan(%v) error(%v)", vs, err)
return
}
if idsStr != "" {
fe, err = recoverFeed(idsStr)
if err != nil {
PromError("恢复bangumi-feed", "redis.recoverFeed(%v) error(%v)", idsStr, err)
err = nil
continue
}
fe.PubDate = time.Time(ts)
bids = append(bids, fe.ID)
}
}
return
}
// ArticleFeedCache get article feed by cache.
func (d *Dao) ArticleFeedCache(c context.Context, mid int64, start, end int) (aids []int64, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(model.TypeArt, mid)
vs, err := redis.Values(conn.Do("ZREVRANGE", key, start, end))
if err != nil {
log.Error("ArticleFeedCache conn.Do(ZREVRANGE,%s,%d,%d) error(%v)", key, start, end, err)
return
}
for len(vs) > 0 {
var aid int64
if vs, err = redis.Scan(vs, &aid); err != nil {
log.Error("ArticleFeedCache redis.Scan(%v) error(%v)", vs, err)
return
}
aids = append(aids, aid)
}
return
}
// AddArticleFeedCache add article feed cache.
func (d *Dao) AddArticleFeedCache(c context.Context, mid int64, as []*artmdl.Meta) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
if len(as) == 0 {
return
}
if len(as) > d.c.Feed.ArticleFeedLength {
as = as[:d.c.Feed.ArticleFeedLength]
}
key := feedKey(model.TypeArt, mid)
commonds := []interface{}{key}
for _, a := range as {
ts := a.PublishTime.Time().Unix()
commonds = append(commonds, ts, a.ID)
}
if err = conn.Send("ZADD", commonds...); err != nil {
log.Error("AddArticleFeedCache conn.Send(ZADD, %v, %v) error(%v)", key, commonds, err)
return
}
if err = conn.Send("EXPIRE", key, d.redisExpireArchiveFeed); err != nil {
log.Error("AddArticleFeedCache conn.Send(expire, %s, %v) error(%v)", key, d.redisExpireArchiveFeed, err)
return
}
if err = conn.Flush(); err != nil {
log.Error("AddArticleFeedCache conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("AddArticleFeedCache conn.Receive error(%v)", err)
return
}
}
return
}
// UnreadCountCache get unread count cache of user.
func (d *Dao) UnreadCountCache(c context.Context, ft int, mid int64) (count int, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := unreadCountKey(ft, mid)
if count, err = redis.Int(conn.Do("HGET", key, mid)); err != nil {
if err == redis.ErrNil {
err = nil
} else {
PromError("redis:获取未读数", "conn.Do(HGET, %s, %v) error(%v)", key, mid, err)
}
}
return
}
// AddUnreadCountCache add user's unread count cache.
func (d *Dao) AddUnreadCountCache(c context.Context, ft int, mid int64, count int) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := unreadCountKey(ft, mid)
if _, err = conn.Do("HSET", key, mid, count); err != nil {
PromError("redis:增加未读数", "conn.DO(HSET, %s, %d, %d) error(%v)", key, mid, count, err)
}
return
}

View File

@ -0,0 +1,249 @@
package dao
import (
"context"
"testing"
"time"
"go-common/app/service/main/archive/api"
"go-common/app/service/main/archive/model/archive"
"go-common/app/service/main/feed/model"
feed "go-common/app/service/main/feed/model"
xtime "go-common/library/time"
. "github.com/smartystreets/goconvey/convey"
)
func Test_FeedValue(t *testing.T) {
var (
arc = api.Arc{Aid: 1}
arc2 = api.Arc{Aid: 2}
f feed.Feed
)
Convey("with fold avs", t, func() {
f = feed.Feed{ID: 100, Fold: []*api.Arc{&arc, &arc2}}
So(appFeedValue(&f), ShouldEqual, "0,100,1,2")
})
Convey("without fold avs", t, func() {
f = feed.Feed{ID: 1}
So(appFeedValue(&f), ShouldEqual, "0,1")
})
Convey("bangumi", t, func() {
f = feed.Feed{ID: 100, Type: feed.BangumiType}
So(appFeedValue(&f), ShouldEqual, "1,100")
})
}
func Test_RecoverFeed(t *testing.T) {
var (
arc = api.Arc{Aid: 1}
arc2 = api.Arc{Aid: 2}
b = feed.Feed{ID: 100, Type: feed.BangumiType}
f feed.Feed
)
Convey("bangumi", t, func() {
r, err := recoverFeed("1,100")
So(r, ShouldResemble, &b)
So(err, ShouldBeNil)
})
Convey("with fold avs", t, func() {
f = feed.Feed{ID: 100, Fold: []*api.Arc{&arc, &arc2}}
r, err := recoverFeed("0,100,1,2")
So(r, ShouldResemble, &f)
So(err, ShouldBeNil)
})
Convey("without fold avs", t, func() {
f = feed.Feed{ID: 100}
r, err := recoverFeed("0,100")
So(r, ShouldResemble, &f)
So(err, ShouldBeNil)
})
}
func Test_pingRedis(t *testing.T) {
Convey("ping redis", t, func() {
So(d.pingRedis(context.TODO()), ShouldBeNil)
})
}
func Test_LastAccessCache(t *testing.T) {
var (
mid = int64(1)
ts = int64(100)
err error
)
Convey("add cache", t, func() {
err = d.AddLastAccessCache(context.TODO(), model.TypeApp, mid, ts)
So(err, ShouldBeNil)
Convey("get cache", func() {
t1, err := d.LastAccessCache(context.TODO(), model.TypeApp, mid)
So(t1, ShouldEqual, t1)
So(err, ShouldBeNil)
})
})
}
func Test_FeedCache(t *testing.T) {
var (
mid = int64(1)
now = time.Now().Unix()
err error
a1 = api.Arc{Aid: 1, PubDate: xtime.Time(now)}
a2 = api.Arc{Aid: 2, PubDate: xtime.Time(now - 1000)}
a3 = api.Arc{Aid: 3}
bangumi = feed.Bangumi{SeasonID: 100}
f = feed.Feed{ID: 1, Archive: &a1, PubDate: a1.PubDate, Fold: []*api.Arc{&a3}}
f1 = feed.Feed{ID: 2, Archive: &a2, PubDate: a2.PubDate}
b = feed.Feed{ID: 100, Type: feed.BangumiType, Bangumi: &bangumi}
feeds = []*feed.Feed{&f, &f1, &b}
)
Convey("add cache", t, func() {
for name, client := range map[string]int{"app": model.TypeApp, "web": model.TypeWeb} {
err = d.AddFeedCache(context.TODO(), client, mid, feeds)
So(err, ShouldBeNil)
Convey(name+"get cache", func() {
res, bids, err := d.FeedCache(context.TODO(), client, mid, 0, 0)
So(res, ShouldResemble, []*feed.Feed{{ID: f.ID, Fold: []*api.Arc{&a3}, PubDate: f.PubDate}})
So(bids, ShouldBeEmpty)
So(err, ShouldBeNil)
})
Convey(name+"get cache when end > length", func() {
res, bids, err := d.FeedCache(context.TODO(), client, mid, 0, 10)
So(res, ShouldResemble, []*feed.Feed{
{ID: a1.Aid, Fold: []*api.Arc{&a3}, PubDate: a1.PubDate},
{ID: a2.Aid, PubDate: a2.PubDate},
{ID: 100, Type: feed.BangumiType},
})
So(bids, ShouldResemble, []int64{100})
So(err, ShouldBeNil)
})
Convey(name+"expire cache", func() {
ok, err := d.ExpireFeedCache(context.TODO(), client, mid)
So(ok, ShouldEqual, true)
So(err, ShouldBeNil)
})
Convey(name+"purge cache", func() {
err := d.PurgeFeedCache(context.TODO(), client, mid)
So(err, ShouldBeNil)
})
}
})
}
func Test_UppersCache(t *testing.T) {
var (
mid = int64(1)
mid2 = int64(2)
now = time.Now().Unix()
err error
a1 = archive.AidPubTime{Aid: 1, PubDate: xtime.Time(now), Copyright: 1}
a2 = archive.AidPubTime{Aid: 2, PubDate: xtime.Time(now - 1), Copyright: 0}
a3 = archive.AidPubTime{Aid: 3, PubDate: xtime.Time(now - 2), Copyright: 0}
)
Convey("add cache", t, func() {
err = d.AddUpperCaches(context.TODO(), map[int64][]*archive.AidPubTime{mid: {&a1, &a2}, mid2: {&a3}})
So(err, ShouldBeNil)
Convey("get cache", func() {
_, err := d.UppersCaches(context.TODO(), []int64{mid, mid2}, 0, 2)
So(err, ShouldBeNil)
// So(res, ShouldResemble, map[int64][]*archive.AidPubTime{mid: {&a1, &a2}, mid2: {&a3}})
})
Convey("expire cache", func() {
res, err := d.ExpireUppersCache(context.TODO(), []int64{mid})
So(err, ShouldBeNil)
So(res, ShouldResemble, map[int64]bool{mid: true})
})
Convey("get expired cache", func() {
d.redisExpireUpper = 0
res, err := d.ExpireUppersCache(context.TODO(), []int64{mid})
So(err, ShouldBeNil)
So(res, ShouldResemble, map[int64]bool{mid: false})
_, err = d.UppersCaches(context.TODO(), []int64{mid}, 0, 2)
So(err, ShouldBeNil)
// So(nres, ShouldResemble, map[int64][]*archive.AidPubTime{mid: {&a1, &a2}})
})
Convey("purge cache", func() {
err := d.DelUpperCache(context.TODO(), mid, a1.Aid)
So(err, ShouldBeNil)
})
})
}
func Test_ArchiveFeedCache(t *testing.T) {
var (
mid = int64(1)
now = time.Now().Unix()
err error
a1 = api.Arc{Aid: 1, PubDate: xtime.Time(now), Author: api.Author{Mid: mid}}
a2 = api.Arc{Aid: 2, PubDate: xtime.Time(now - 1), Author: api.Author{Mid: mid}}
a3 = api.Arc{Aid: 3, PubDate: xtime.Time(now - 2), Author: api.Author{Mid: mid}}
f1 = feed.Feed{ID: a1.Aid, Archive: &a1, PubDate: a1.PubDate, Fold: []*api.Arc{&a3}}
f2 = feed.Feed{ID: a2.Aid, Archive: &a2, PubDate: a2.PubDate}
fs = []*feed.Feed{&f1, &f2}
)
Convey("add cache", t, func() {
err = d.AddArchiveFeedCache(context.TODO(), mid, fs)
So(err, ShouldBeNil)
Convey("get cache", func() {
as, err := d.ArchiveFeedCache(context.TODO(), mid, 0, 2)
So(as, ShouldResemble, []*feed.Feed{
{ID: a1.Aid, PubDate: a1.PubDate, Fold: []*api.Arc{{Aid: 3}}},
{ID: a2.Aid, PubDate: a2.PubDate},
})
So(err, ShouldBeNil)
})
})
}
func Test_BangumiFeedCache(t *testing.T) {
var (
mid = int64(1)
err error
b1 = feed.Bangumi{SeasonID: 100}
b2 = feed.Bangumi{SeasonID: 200}
f1 = feed.Feed{ID: b1.SeasonID, Type: feed.BangumiType, Bangumi: &b1}
f2 = feed.Feed{ID: b2.SeasonID, Type: feed.BangumiType, Bangumi: &b2}
fs = []*feed.Feed{&f1, &f2}
)
Convey("add cache", t, func() {
err = d.AddBangumiFeedCache(context.TODO(), mid, fs)
So(err, ShouldBeNil)
Convey("get cache", func() {
res, err := d.BangumiFeedCache(context.TODO(), mid, 0, 2)
So(res, ShouldResemble, []int64{b2.SeasonID, b1.SeasonID})
So(err, ShouldBeNil)
})
})
}
func Test_UnreadCountCache(t *testing.T) {
var (
mid = int64(1)
count = 100
err error
)
Convey("add cache", t, func() {
err = d.AddUnreadCountCache(context.TODO(), model.TypeApp, mid, count)
So(err, ShouldBeNil)
Convey("get cache", func() {
c, err := d.UnreadCountCache(context.TODO(), model.TypeApp, mid)
So(c, ShouldEqual, count)
So(err, ShouldBeNil)
})
Convey("get wrong cache", func() {
c, err := d.UnreadCountCache(context.TODO(), model.TypeWeb, mid)
So(c, ShouldEqual, 0)
So(err, ShouldBeNil)
})
})
}

View File

@ -0,0 +1,33 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["http.go"],
importpath = "go-common/app/service/main/feed/http",
tags = ["automanaged"],
deps = [
"//app/service/main/feed/conf:go_default_library",
"//app/service/main/feed/service:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,41 @@
package http
import (
"net/http"
"go-common/app/service/main/feed/conf"
"go-common/app/service/main/feed/service"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
)
var fSrv *service.Service
// Init init http service
func Init(c *conf.Config, srv *service.Service) {
fSrv = srv
// init outer router
engineOuter := bm.DefaultServer(c.BM)
outerRouter(engineOuter)
if err := engineOuter.Start(); err != nil {
log.Error("bm.DefaultServer error(%v)", err)
panic(err)
}
}
// outerRouter init outer router
func outerRouter(r *bm.Engine) {
r.Ping(ping)
r.Register(register)
}
func ping(c *bm.Context) {
if err := fSrv.Ping(c); err != nil {
log.Error("ping error(%v)", err)
c.AbortWithStatus(http.StatusServiceUnavailable)
}
}
func register(c *bm.Context) {
c.JSON(map[string]interface{}{}, nil)
}

View File

@ -0,0 +1,62 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//proto:def.bzl",
"go_proto_library",
)
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"bangumi.go",
"feed.go",
"rpc.go",
],
embed = [":model_go_proto"],
importpath = "go-common/app/service/main/feed/model",
tags = ["automanaged"],
deps = [
"//app/interface/openplatform/article/model:go_default_library",
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//library/time:go_default_library",
"@com_github_gogo_protobuf//gogoproto:go_default_library",
"@com_github_golang_protobuf//proto:go_default_library",
],
)
proto_library(
name = "model_proto",
srcs = ["feed.proto"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["@gogo_special_proto//github.com/gogo/protobuf/gogoproto"],
)
go_proto_library(
name = "model_go_proto",
compilers = ["@io_bazel_rules_go//proto:gogofast_proto"],
importpath = "go-common/app/service/main/feed/model",
proto = ":model_proto",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["@com_github_gogo_protobuf//gogoproto:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,6 @@
package model
type Pull struct {
SeasonID int64 `json:"season_id"`
Ts int64 `json:"ts"`
}

View File

@ -0,0 +1,72 @@
package model
import (
artmdl "go-common/app/interface/openplatform/article/model"
"go-common/app/service/main/archive/api"
"go-common/app/service/main/archive/model/archive"
"go-common/library/time"
)
// feed type
const (
ArchiveType = 0
BangumiType = 1
TypeApp = iota
TypeWeb
TypeArt
)
// FeedType return feed type
func FeedType(app bool) int {
if app {
return TypeApp
}
return TypeWeb
}
// Feed struct of Feed
type Feed struct {
Type int64 `json:"type"`
// Data is *api.Arc or *Bangumi
Archive *api.Arc `json:"archive"`
Bangumi *Bangumi `json:"bangumi"`
// ID is aid or SeasonID
ID int64 `json:"id"`
PubDate time.Time `json:"pubdate"`
Fold []*api.Arc `json:"fold"`
}
type Feeds []*Feed
func (as Feeds) Len() int { return len(as) }
func (as Feeds) Less(i, j int) bool {
if as[i].PubDate != as[j].PubDate {
return as[i].PubDate > as[j].PubDate
}
return as[i].ID > as[j].ID
}
func (as Feeds) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
type ArticleFeeds []*artmdl.Meta
func (as ArticleFeeds) Len() int { return len(as) }
func (as ArticleFeeds) Less(i, j int) bool {
if as[i].PublishTime != as[j].PublishTime {
return as[i].PublishTime > as[j].PublishTime
}
return as[i].ID > as[j].ID
}
func (as ArticleFeeds) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
// Arcs AidPubTime slice
type Arcs []*archive.AidPubTime
func (as Arcs) Len() int { return len(as) }
func (as Arcs) Less(i, j int) bool {
if as[i].PubDate != as[j].PubDate {
return as[i].PubDate > as[j].PubDate
}
return as[i].Aid > as[j].Aid
}
func (as Arcs) Swap(i, j int) { as[i], as[j] = as[j], as[i] }

View File

@ -0,0 +1,825 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: feed.proto
/*
Package model is a generated protocol buffer package.
It is generated from these files:
feed.proto
It has these top-level messages:
NewEp
Bangumi
*/
package model
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type NewEp struct {
Cover string `protobuf:"bytes,1,opt,name=Cover,proto3" json:"cover"`
Dm int64 `protobuf:"varint,2,opt,name=Dm,proto3" json:"dm"`
EpisodeID int64 `protobuf:"varint,3,opt,name=EpisodeID,proto3" json:"episode_id"`
Index string `protobuf:"bytes,4,opt,name=Index,proto3" json:"index"`
IndexTitle string `protobuf:"bytes,5,opt,name=IndexTitle,proto3" json:"index_title"`
Play int64 `protobuf:"varint,6,opt,name=Play,proto3" json:"play"`
}
func (m *NewEp) Reset() { *m = NewEp{} }
func (m *NewEp) String() string { return proto.CompactTextString(m) }
func (*NewEp) ProtoMessage() {}
func (*NewEp) Descriptor() ([]byte, []int) { return fileDescriptorFeed, []int{0} }
type Bangumi struct {
BgmType int32 `protobuf:"varint,1,opt,name=BgmType,proto3" json:"bgm_type"`
Cover string `protobuf:"bytes,2,opt,name=Cover,proto3" json:"cover"`
IsFinish int32 `protobuf:"varint,3,opt,name=IsFinish,proto3" json:"is_finish"`
NewEp NewEp `protobuf:"bytes,4,opt,name=NewEp" json:"new_ep"`
SeasonID int64 `protobuf:"varint,5,opt,name=SeasonID,proto3" json:"season_id"`
Title string `protobuf:"bytes,6,opt,name=Title,proto3" json:"title"`
TotalCount int64 `protobuf:"varint,7,opt,name=TotalCount,proto3" json:"total_count"`
Ts int64 `protobuf:"varint,8,opt,name=Ts,proto3" json:"ts"`
}
func (m *Bangumi) Reset() { *m = Bangumi{} }
func (m *Bangumi) String() string { return proto.CompactTextString(m) }
func (*Bangumi) ProtoMessage() {}
func (*Bangumi) Descriptor() ([]byte, []int) { return fileDescriptorFeed, []int{1} }
func init() {
proto.RegisterType((*NewEp)(nil), "model.NewEp")
proto.RegisterType((*Bangumi)(nil), "model.Bangumi")
}
func (m *NewEp) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NewEp) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Cover) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintFeed(dAtA, i, uint64(len(m.Cover)))
i += copy(dAtA[i:], m.Cover)
}
if m.Dm != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintFeed(dAtA, i, uint64(m.Dm))
}
if m.EpisodeID != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintFeed(dAtA, i, uint64(m.EpisodeID))
}
if len(m.Index) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintFeed(dAtA, i, uint64(len(m.Index)))
i += copy(dAtA[i:], m.Index)
}
if len(m.IndexTitle) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintFeed(dAtA, i, uint64(len(m.IndexTitle)))
i += copy(dAtA[i:], m.IndexTitle)
}
if m.Play != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintFeed(dAtA, i, uint64(m.Play))
}
return i, nil
}
func (m *Bangumi) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Bangumi) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.BgmType != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintFeed(dAtA, i, uint64(m.BgmType))
}
if len(m.Cover) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintFeed(dAtA, i, uint64(len(m.Cover)))
i += copy(dAtA[i:], m.Cover)
}
if m.IsFinish != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintFeed(dAtA, i, uint64(m.IsFinish))
}
dAtA[i] = 0x22
i++
i = encodeVarintFeed(dAtA, i, uint64(m.NewEp.Size()))
n1, err := m.NewEp.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
if m.SeasonID != 0 {
dAtA[i] = 0x28
i++
i = encodeVarintFeed(dAtA, i, uint64(m.SeasonID))
}
if len(m.Title) > 0 {
dAtA[i] = 0x32
i++
i = encodeVarintFeed(dAtA, i, uint64(len(m.Title)))
i += copy(dAtA[i:], m.Title)
}
if m.TotalCount != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintFeed(dAtA, i, uint64(m.TotalCount))
}
if m.Ts != 0 {
dAtA[i] = 0x40
i++
i = encodeVarintFeed(dAtA, i, uint64(m.Ts))
}
return i, nil
}
func encodeVarintFeed(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *NewEp) Size() (n int) {
var l int
_ = l
l = len(m.Cover)
if l > 0 {
n += 1 + l + sovFeed(uint64(l))
}
if m.Dm != 0 {
n += 1 + sovFeed(uint64(m.Dm))
}
if m.EpisodeID != 0 {
n += 1 + sovFeed(uint64(m.EpisodeID))
}
l = len(m.Index)
if l > 0 {
n += 1 + l + sovFeed(uint64(l))
}
l = len(m.IndexTitle)
if l > 0 {
n += 1 + l + sovFeed(uint64(l))
}
if m.Play != 0 {
n += 1 + sovFeed(uint64(m.Play))
}
return n
}
func (m *Bangumi) Size() (n int) {
var l int
_ = l
if m.BgmType != 0 {
n += 1 + sovFeed(uint64(m.BgmType))
}
l = len(m.Cover)
if l > 0 {
n += 1 + l + sovFeed(uint64(l))
}
if m.IsFinish != 0 {
n += 1 + sovFeed(uint64(m.IsFinish))
}
l = m.NewEp.Size()
n += 1 + l + sovFeed(uint64(l))
if m.SeasonID != 0 {
n += 1 + sovFeed(uint64(m.SeasonID))
}
l = len(m.Title)
if l > 0 {
n += 1 + l + sovFeed(uint64(l))
}
if m.TotalCount != 0 {
n += 1 + sovFeed(uint64(m.TotalCount))
}
if m.Ts != 0 {
n += 1 + sovFeed(uint64(m.Ts))
}
return n
}
func sovFeed(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozFeed(x uint64) (n int) {
return sovFeed(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *NewEp) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NewEp: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NewEp: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cover", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthFeed
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cover = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Dm", wireType)
}
m.Dm = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Dm |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field EpisodeID", wireType)
}
m.EpisodeID = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.EpisodeID |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthFeed
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Index = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IndexTitle", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthFeed
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.IndexTitle = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Play", wireType)
}
m.Play = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Play |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipFeed(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthFeed
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Bangumi) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Bangumi: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Bangumi: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field BgmType", wireType)
}
m.BgmType = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.BgmType |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Cover", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthFeed
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Cover = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field IsFinish", wireType)
}
m.IsFinish = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.IsFinish |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field NewEp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthFeed
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := m.NewEp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SeasonID", wireType)
}
m.SeasonID = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.SeasonID |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Title", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthFeed
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Title = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field TotalCount", wireType)
}
m.TotalCount = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.TotalCount |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Ts", wireType)
}
m.Ts = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFeed
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Ts |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipFeed(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthFeed
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipFeed(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowFeed
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowFeed
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowFeed
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthFeed
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowFeed
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipFeed(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthFeed = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowFeed = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("feed.proto", fileDescriptorFeed) }
var fileDescriptorFeed = []byte{
// 431 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xd1, 0x6a, 0xdb, 0x30,
0x14, 0x86, 0x63, 0x27, 0x76, 0x1c, 0xb5, 0xeb, 0xc0, 0x17, 0x43, 0x8c, 0x61, 0x95, 0x5e, 0x8c,
0x16, 0xb6, 0x04, 0xba, 0x37, 0x70, 0xd3, 0x41, 0x6e, 0xc6, 0xd0, 0x7c, 0x6f, 0x9c, 0x58, 0x71,
0x05, 0xb6, 0x65, 0x2a, 0x65, 0x9d, 0xdf, 0x64, 0x4f, 0x34, 0x72, 0xd9, 0x27, 0x10, 0x6b, 0x76,
0xa7, 0xa7, 0x18, 0x3a, 0xea, 0xea, 0xdc, 0xec, 0x4e, 0xfa, 0xfe, 0xdf, 0x3a, 0xe7, 0xfc, 0xc7,
0x08, 0x6d, 0x19, 0x2b, 0xe7, 0xdd, 0xbd, 0x50, 0x22, 0x0e, 0x1a, 0x51, 0xb2, 0xfa, 0xed, 0xc7,
0x8a, 0xab, 0xbb, 0xdd, 0x7a, 0xbe, 0x11, 0xcd, 0xa2, 0x12, 0x95, 0x58, 0x80, 0xba, 0xde, 0x6d,
0xe1, 0x06, 0x17, 0x38, 0xb9, 0xaf, 0x2e, 0xb4, 0x87, 0x82, 0x2f, 0xec, 0xe1, 0xb6, 0x8b, 0x09,
0x0a, 0x6e, 0xc4, 0x77, 0x76, 0x8f, 0xbd, 0x73, 0xef, 0x72, 0x96, 0xce, 0x8c, 0x26, 0xc1, 0xc6,
0x02, 0xea, 0x78, 0xfc, 0x06, 0xf9, 0xcb, 0x06, 0xfb, 0xe7, 0xde, 0xe5, 0x38, 0x0d, 0x8d, 0x26,
0x7e, 0xd9, 0x50, 0x7f, 0xd9, 0xc4, 0x1f, 0xd0, 0xec, 0xb6, 0xe3, 0x52, 0x94, 0x6c, 0xb5, 0xc4,
0x63, 0x90, 0xcf, 0x8c, 0x26, 0x88, 0x39, 0x98, 0xf3, 0x92, 0x0e, 0x06, 0x5b, 0x66, 0xd5, 0x96,
0xec, 0x07, 0x9e, 0x0c, 0x65, 0xb8, 0x05, 0xd4, 0xf1, 0x78, 0x81, 0x10, 0x1c, 0x32, 0xae, 0x6a,
0x86, 0x03, 0x70, 0xbd, 0x36, 0x9a, 0x9c, 0x80, 0x2b, 0x57, 0x16, 0xd3, 0x23, 0x4b, 0xfc, 0x0e,
0x4d, 0xbe, 0xd6, 0x45, 0x8f, 0x43, 0x28, 0x1d, 0x19, 0x4d, 0x26, 0x5d, 0x5d, 0xf4, 0x14, 0xe8,
0xc5, 0x2f, 0x1f, 0x4d, 0xd3, 0xa2, 0xad, 0x76, 0x0d, 0x8f, 0xdf, 0xa3, 0x69, 0x5a, 0x35, 0x59,
0xdf, 0x31, 0x18, 0x32, 0x48, 0x4f, 0x8d, 0x26, 0xd1, 0xba, 0x6a, 0x72, 0xd5, 0x77, 0x8c, 0xfe,
0x13, 0x87, 0x28, 0xfc, 0xff, 0x44, 0x71, 0x85, 0xa2, 0x95, 0xfc, 0xcc, 0x5b, 0x2e, 0xef, 0x60,
0xe2, 0x20, 0x7d, 0x65, 0x34, 0x99, 0x71, 0x99, 0x6f, 0x01, 0xd2, 0x17, 0x39, 0xbe, 0x7e, 0xce,
0x17, 0xe6, 0x3d, 0xb9, 0x3e, 0x9d, 0xc3, 0x9a, 0xe6, 0xc0, 0xd2, 0xb3, 0xbd, 0x26, 0x23, 0xa3,
0x49, 0xd8, 0xb2, 0x87, 0x9c, 0x75, 0xf4, 0x79, 0x15, 0x57, 0x28, 0xfa, 0xc6, 0x0a, 0x29, 0xda,
0xd5, 0x12, 0x02, 0x18, 0xbb, 0xe7, 0x25, 0x30, 0x9b, 0xe7, 0x8b, 0x6c, 0x5b, 0x75, 0x41, 0x85,
0x43, 0xab, 0x2e, 0x22, 0xc7, 0x6d, 0x9c, 0x99, 0x50, 0x45, 0x7d, 0x23, 0x76, 0xad, 0xc2, 0x53,
0x78, 0x0d, 0xe2, 0x54, 0x96, 0xe6, 0x1b, 0x8b, 0xe9, 0x91, 0xc5, 0xae, 0x39, 0x93, 0x38, 0x1a,
0xd6, 0xac, 0x24, 0xf5, 0x33, 0x99, 0xe2, 0xfd, 0x53, 0x32, 0x7a, 0x7c, 0x4a, 0x46, 0xfb, 0x43,
0xe2, 0x3d, 0x1e, 0x12, 0xef, 0xf7, 0x21, 0xf1, 0x7e, 0xfe, 0x49, 0x46, 0xeb, 0x10, 0x7e, 0xa5,
0x4f, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x54, 0x10, 0x16, 0x1a, 0x8e, 0x02, 0x00, 0x00,
}

View File

@ -0,0 +1,29 @@
syntax = "proto3";
package model;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.goproto_enum_prefix_all) = false;
option (gogoproto.goproto_getters_all) = false;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.sizer_all) = true;
message NewEp {
string Cover = 1 [(gogoproto.jsontag) = "cover"];
int64 Dm = 2 [(gogoproto.jsontag) = "dm"];
int64 EpisodeID = 3 [(gogoproto.jsontag) = "episode_id"];
string Index = 4 [(gogoproto.jsontag) = "index"];
string IndexTitle = 5 [(gogoproto.jsontag) = "index_title"];
int64 Play = 6 [(gogoproto.jsontag) = "play"];
}
message Bangumi {
int32 BgmType = 1 [(gogoproto.jsontag) = "bgm_type"];
string Cover = 2 [(gogoproto.jsontag) = "cover"];
int32 IsFinish = 3 [(gogoproto.jsontag) = "is_finish"];
NewEp NewEp = 4 [(gogoproto.jsontag) = "new_ep", (gogoproto.nullable) = false];
int64 SeasonID = 5 [(gogoproto.jsontag) = "season_id"];
string Title = 6 [(gogoproto.jsontag) = "title"];
int64 TotalCount = 7 [(gogoproto.jsontag) = "total_count"];
int64 Ts = 8 [(gogoproto.jsontag) = "ts"];
}

View File

@ -0,0 +1,45 @@
package model
type ArgFeed struct {
Mid int64
Pn int
Ps int
RealIP string
}
type ArgArc struct {
Aid int64
Mid int64
PubDate int64
RealIP string
}
type ArgAidMid struct {
Aid int64
Mid int64
RealIP string
}
type ArgMid struct {
Mid int64
RealIP string
}
type ArgUnreadCount struct {
Mid int64
WithoutBangumi bool
RealIP string
}
type ArgFold struct {
Aid int64
Mid int64
RealIP string
}
type ArgChangeUpper struct {
Aid int64
OldMid int64
NewMid int64
RealIP string
}

View File

@ -0,0 +1,42 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["feed_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//app/service/main/feed/model:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["feed.go"],
importpath = "go-common/app/service/main/feed/rpc/client",
tags = ["automanaged"],
deps = [
"//app/interface/openplatform/article/model:go_default_library",
"//app/service/main/feed/model:go_default_library",
"//library/net/rpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,123 @@
package client
import (
"context"
artmdl "go-common/app/interface/openplatform/article/model"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/net/rpc"
)
const (
_appFeed = "RPC.AppFeed"
_webFeed = "RPC.WebFeed"
_archiveFeed = "RPC.ArchiveFeed"
_bangumiFeed = "RPC.BangumiFeed"
_addArc = "RPC.AddArc"
_delArc = "RPC.DelArc"
_purgeFeedCache = "RPC.PurgeFeedCache"
_fold = "RPC.Fold"
_appUnreadCount = "RPC.AppUnreadCount"
_webUnreadCount = "RPC.WebUnreadCount"
_changeArcUpper = "RPC.ChangeArcUpper"
_articleFeed = "RPC.ArticleFeed"
_articleUnreadCount = "RPC.ArticleUnreadCount"
)
const (
_appid = "community.service.feed"
)
var (
_noArg = &struct{}{}
)
// Service struct info.
type Service struct {
client *rpc.Client2
}
// New new service instance and return.
func New(c *rpc.ClientConfig) (s *Service) {
s = &Service{}
s.client = rpc.NewDiscoveryCli(_appid, c)
return
}
// AppFeed receive ArgMid contains mid and real ip, then init app feed.
func (s *Service) AppFeed(c context.Context, arg *feedmdl.ArgFeed) (res []*feedmdl.Feed, err error) {
err = s.client.Call(c, _appFeed, arg, &res)
return
}
// WebFeed receive ArgMid contains mid and real ip, then init web feed without fold.
func (s *Service) WebFeed(c context.Context, arg *feedmdl.ArgFeed) (res []*feedmdl.Feed, err error) {
err = s.client.Call(c, _webFeed, arg, &res)
return
}
// ArchiveFeed receive ArgMid contains mid and real ip
func (s *Service) ArchiveFeed(c context.Context, arg *feedmdl.ArgFeed) (res []*feedmdl.Feed, err error) {
err = s.client.Call(c, _archiveFeed, arg, &res)
return
}
// BangumiFeed receive ArgMid contains mid and real ip
func (s *Service) BangumiFeed(c context.Context, arg *feedmdl.ArgFeed) (res []*feedmdl.Feed, err error) {
err = s.client.Call(c, _bangumiFeed, arg, &res)
return
}
// ArticleFeed receive ArgMid and return article feed.
func (s *Service) ArticleFeed(c context.Context, arg *feedmdl.ArgFeed) (res []*artmdl.Meta, err error) {
err = s.client.Call(c, _articleFeed, arg, &res)
return
}
// ArticleUnreadCount return unread count of article feed.
func (s *Service) ArticleUnreadCount(c context.Context, arg *feedmdl.ArgMid) (res int, err error) {
err = s.client.Call(c, _articleUnreadCount, arg, &res)
return
}
// AddArc add archive when archive passed. purge cache.
func (s *Service) AddArc(c context.Context, arg *feedmdl.ArgArc) (err error) {
err = s.client.Call(c, _addArc, arg, &struct{}{})
return
}
// DelArc delete archive when archive not passed. purge cache.
func (s *Service) DelArc(c context.Context, arg *feedmdl.ArgAidMid) (err error) {
err = s.client.Call(c, _delArc, arg, &struct{}{})
return
}
// PurgeFeedCache purge cache when attention/unattention upper
func (s *Service) PurgeFeedCache(c context.Context, arg *feedmdl.ArgMid) (err error) {
err = s.client.Call(c, _purgeFeedCache, arg, &struct{}{})
return
}
// Fold receive ArgFold contains mid, then return upper's fold archives.
func (s *Service) Fold(c context.Context, arg *feedmdl.ArgFold) (res []*feedmdl.Feed, err error) {
err = s.client.Call(c, _fold, arg, &res)
return
}
// AppUnreadCount receive ArgUnreadCount contains mid, and withoutBangumi then return unread count.
func (s *Service) AppUnreadCount(c context.Context, arg *feedmdl.ArgUnreadCount) (res int, err error) {
err = s.client.Call(c, _appUnreadCount, arg, &res)
return
}
// WebUnreadCount receive ArgUnreadCount contains mid, then return unread count.
func (s *Service) WebUnreadCount(c context.Context, arg *feedmdl.ArgMid) (res int, err error) {
err = s.client.Call(c, _webUnreadCount, arg, &res)
return
}
// ChangeArcUpper refresh feed cache when change archive's author
func (s *Service) ChangeArcUpper(c context.Context, arg *feedmdl.ArgChangeUpper) (err error) {
err = s.client.Call(c, _changeArcUpper, arg, &struct{}{})
return
}

View File

@ -0,0 +1,102 @@
package client
import (
"context"
"testing"
"time"
model "go-common/app/service/main/feed/model"
)
func TestFeed(t *testing.T) {
s := New(nil)
time.Sleep(1 * time.Second)
testAppFeed(t, s)
testWebFeed(t, s)
testAddArc(t, s)
testDelArc(t, s)
testPurgeFeedCache(t, s)
testFold(t, s)
}
func testAppFeed(t *testing.T, s *Service) {
if res, err := s.AppFeed(context.TODO(), &model.ArgFeed{Mid: 27515256, Pn: 1, Ps: 20}); err != nil {
t.Errorf("Service: AppFeed err: %v", err)
} else {
t.Logf("Service: AppFeed: %v", res)
}
}
func testWebFeed(t *testing.T, s *Service) {
if res, err := s.WebFeed(context.TODO(), &model.ArgFeed{Mid: 27515256, Pn: 1, Ps: 20}); err != nil {
t.Errorf("Service: WebFeed err: %v", err)
} else {
t.Logf("Service: WebFeed: %v", res)
}
}
func testArchiveFeed(t *testing.T, s *Service) {
if res, err := s.ArchiveFeed(context.TODO(), &model.ArgFeed{Mid: 27515256, Pn: 1, Ps: 20}); err != nil {
t.Errorf("Service: ArchiveFeed err: %v", err)
} else {
t.Logf("Service: ArchiveFeed: %v", res)
}
}
func testBangumiFeed(t *testing.T, s *Service) {
if res, err := s.BangumiFeed(context.TODO(), &model.ArgFeed{Mid: 27515256, Pn: 1, Ps: 20}); err != nil {
t.Errorf("Service: BangumiFeed err: %v", err)
} else {
t.Logf("Service: BangumiFeed: %v", res)
}
}
func testAddArc(t *testing.T, s *Service) {
if err := s.AddArc(context.TODO(), &model.ArgArc{Aid: 1}); err != nil {
t.Errorf("Service: AddArc err: %v", err)
}
}
func testDelArc(t *testing.T, s *Service) {
if err := s.DelArc(context.TODO(), &model.ArgAidMid{Aid: 1}); err != nil {
t.Errorf("Service: DelArc err: %v", err)
}
}
func testPurgeFeedCache(t *testing.T, s *Service) {
if err := s.PurgeFeedCache(context.TODO(), &model.ArgMid{Mid: 27515256}); err != nil {
t.Errorf("Service: PurgeFeedCache err: %v", err)
}
}
func testFold(t *testing.T, s *Service) {
if res, err := s.Fold(context.TODO(), &model.ArgFold{Aid: 1, Mid: 27515256}); err != nil {
t.Errorf("Service: Fold err: %v", err)
} else {
t.Logf("Service: Fold: %v", res)
}
}
func testAppUnreadCount(t *testing.T, s *Service) {
if res, err := s.AppUnreadCount(context.TODO(), &model.ArgUnreadCount{Mid: 27515256, WithoutBangumi: false}); err != nil {
t.Errorf("Service: UnreadCount err: %v", err)
} else {
t.Logf("Service: UnreadCount: %v", res)
}
}
func testWebUnreadCount(t *testing.T, s *Service) {
if res, err := s.WebUnreadCount(context.TODO(), &model.ArgMid{Mid: 27515256}); err != nil {
t.Errorf("Service: UnreadCount err: %v", err)
} else {
t.Logf("Service: UnreadCount: %v", res)
}
}
func testChangeArcUpper(t *testing.T, s *Service) {
if err := s.ChangeArcUpper(context.TODO(), &model.ArgChangeUpper{Aid: 1, OldMid: 1, NewMid: 2, RealIP: "127.0.0.1"}); err != nil {
t.Errorf("Service: ChangeArcUpper err: %v", err)
} else {
t.Logf("Service: ChangeArcUpper ok")
}
}

View File

@ -0,0 +1,48 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["rpc_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/interface/openplatform/article/model:go_default_library",
"//app/service/main/feed/model:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["rpc.go"],
importpath = "go-common/app/service/main/feed/rpc/server",
tags = ["automanaged"],
deps = [
"//app/interface/openplatform/article/model:go_default_library",
"//app/service/main/feed/conf:go_default_library",
"//app/service/main/feed/model:go_default_library",
"//app/service/main/feed/service:go_default_library",
"//library/net/rpc:go_default_library",
"//library/net/rpc/context:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,109 @@
package server
import (
artmdl "go-common/app/interface/openplatform/article/model"
"go-common/app/service/main/feed/conf"
feedmdl "go-common/app/service/main/feed/model"
"go-common/app/service/main/feed/service"
"go-common/library/net/rpc"
"go-common/library/net/rpc/context"
)
// RPC struct info
type RPC struct {
s *service.Service
}
// New new rpc server.
func New(c *conf.Config, s *service.Service) (svr *rpc.Server) {
r := &RPC{s: s}
svr = rpc.NewServer(c.RPCServer)
if err := svr.Register(r); err != nil {
panic(err)
}
return
}
// Ping check connection success.
func (r *RPC) Ping(c context.Context, arg *struct{}, res *struct{}) (err error) {
return
}
// AddArc add archive when archive passed. purge cache.
func (r *RPC) AddArc(c context.Context, arg *feedmdl.ArgArc, res *struct{}) (err error) {
err = r.s.AddArc(c, arg.Mid, arg.Aid, arg.PubDate, arg.RealIP)
return
}
// DelArc delete archive when archive not passed. purge cache.
func (r *RPC) DelArc(c context.Context, arg *feedmdl.ArgAidMid, res *struct{}) (err error) {
err = r.s.DelArc(c, arg.Mid, arg.Aid, arg.RealIP)
return
}
// PurgeFeedCache purge cache when attention/unattention upper
func (r *RPC) PurgeFeedCache(c context.Context, arg *feedmdl.ArgMid, res *struct{}) (err error) {
err = r.s.PurgeFeedCache(c, arg.Mid, arg.RealIP)
return
}
// AppFeed receive ArgMid contains mid and real ip, then return app feed.
func (r *RPC) AppFeed(c context.Context, arg *feedmdl.ArgFeed, res *[]*feedmdl.Feed) (err error) {
*res, err = r.s.Feed(c, true, arg.Mid, arg.Pn, arg.Ps, arg.RealIP)
return
}
// WebFeed receive ArgMid contains mid and real ip, then return app feed.
func (r *RPC) WebFeed(c context.Context, arg *feedmdl.ArgFeed, res *[]*feedmdl.Feed) (err error) {
*res, err = r.s.Feed(c, false, arg.Mid, arg.Pn, arg.Ps, arg.RealIP)
return
}
// Fold receive ArgFold contains mid, then return upper's fold archives.
func (r *RPC) Fold(c context.Context, arg *feedmdl.ArgFold, res *[]*feedmdl.Feed) (err error) {
*res, err = r.s.Fold(c, arg.Mid, arg.Aid, arg.RealIP)
return
}
// AppUnreadCount receive ArgUnreadCount contains mid, then return unread count.
func (r *RPC) AppUnreadCount(c context.Context, arg *feedmdl.ArgUnreadCount, res *int) (err error) {
*res, err = r.s.UnreadCount(c, true, arg.WithoutBangumi, arg.Mid, arg.RealIP)
return
}
// WebUnreadCount receive ArgUnreadCount contains mid, then return unread count.
func (r *RPC) WebUnreadCount(c context.Context, arg *feedmdl.ArgMid, res *int) (err error) {
withoutBangumi := false
*res, err = r.s.UnreadCount(c, false, withoutBangumi, arg.Mid, arg.RealIP)
return
}
// ArchiveFeed receive ArgFeed contains mid and real ip
func (r *RPC) ArchiveFeed(c context.Context, arg *feedmdl.ArgFeed, res *[]*feedmdl.Feed) (err error) {
*res, err = r.s.ArchiveFeed(c, arg.Mid, arg.Pn, arg.Ps, arg.RealIP)
return
}
// BangumiFeed receive ArgFeed contains mid and real ip
func (r *RPC) BangumiFeed(c context.Context, arg *feedmdl.ArgFeed, res *[]*feedmdl.Feed) (err error) {
*res, err = r.s.BangumiFeed(c, arg.Mid, arg.Pn, arg.Ps, arg.RealIP)
return
}
// ChangeArcUpper refresh feed cache when change archive's author
func (r *RPC) ChangeArcUpper(c context.Context, arg *feedmdl.ArgChangeUpper, res *struct{}) (err error) {
err = r.s.ChangeAuthor(c, arg.Aid, arg.OldMid, arg.NewMid, arg.RealIP)
return
}
// ArticleFeed receive ArgFeed contains mid and real ip
func (r *RPC) ArticleFeed(c context.Context, arg *feedmdl.ArgFeed, res *[]*artmdl.Meta) (err error) {
*res, err = r.s.ArticleFeed(c, arg.Mid, arg.Pn, arg.Ps, arg.RealIP)
return
}
// ArticleUnreadCount receive ArgUnreadCount contains mid, then return unread count.
func (r *RPC) ArticleUnreadCount(c context.Context, arg *feedmdl.ArgMid, res *int) (err error) {
*res, err = r.s.ArticleUnreadCount(c, arg.Mid, arg.RealIP)
return
}

View File

@ -0,0 +1,42 @@
package server
import (
artmdl "go-common/app/interface/openplatform/article/model"
feed "go-common/app/service/main/feed/model"
"net/rpc"
"testing"
)
const (
addr = "172.16.33.57:6361"
_testArticleFeed = "RPC.ArticleFeed"
)
func TestFeedRpc(t *testing.T) {
client, err := rpc.Dial("tcp", addr)
defer client.Close()
if err != nil {
t.Errorf("rpc.Dial(tcp, \"%s\") error(%v)", addr, err)
t.FailNow()
}
feedRPC(client, t)
}
func feedRPC(client *rpc.Client, t *testing.T) {
arg := &feed.ArgFeed{}
arg.Mid = 88888929
res := &[]*artmdl.Meta{}
if err := client.Call(_testArticleFeed, arg, &res); err != nil {
t.Errorf("client.Call(%s) error(%v)", _testArticleFeed, err)
t.FailNow()
} else {
result("article", t, res)
}
}
func result(name string, t *testing.T, res interface{}) {
t.Log("[==========" + name + "单元测试结果==========]")
t.Log(res)
t.Log("[↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑↑]\r\n")
}

View File

@ -0,0 +1,82 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"archive_feed_test.go",
"archive_test.go",
"bangumi_feed_test.go",
"feed_test.go",
"mock_test.go",
"service_test.go",
"unread_count_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/interface/openplatform/article/model:go_default_library",
"//app/service/main/account/model:go_default_library",
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//app/service/main/feed/conf:go_default_library",
"//app/service/main/feed/dao:go_default_library",
"//app/service/main/feed/model:go_default_library",
"//library/cache/redis:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"archive.go",
"archive_feed.go",
"article.go",
"article_feed.go",
"bangumi_feed.go",
"feed.go",
"new_feed.go",
"service.go",
"unread_count.go",
],
importpath = "go-common/app/service/main/feed/service",
tags = ["automanaged"],
deps = [
"//app/interface/openplatform/article/model:go_default_library",
"//app/interface/openplatform/article/rpc/client:go_default_library",
"//app/service/main/account/model:go_default_library",
"//app/service/main/account/rpc/client:go_default_library",
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/api/gorpc:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//app/service/main/feed/conf:go_default_library",
"//app/service/main/feed/dao:go_default_library",
"//app/service/main/feed/model:go_default_library",
"//library/log:go_default_library",
"//library/sync/errgroup:go_default_library",
"//library/time:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,243 @@
package service
import (
"context"
"sync"
accmdl "go-common/app/service/main/account/model"
"go-common/app/service/main/archive/api"
arcmdl "go-common/app/service/main/archive/model/archive"
"go-common/app/service/main/feed/dao"
"go-common/library/log"
"go-common/library/time"
"go-common/library/sync/errgroup"
)
const _upsArcBulkSize = 50
// AddArc add archive when archive passed.
func (s *Service) AddArc(c context.Context, mid, aid int64, pubDate int64, ip string) (err error) {
res, err := s.dao.ExpireUppersCache(c, []int64{mid})
if (err == nil) && res[mid] {
err = s.dao.AddUpperCache(c, mid, &arcmdl.AidPubTime{Aid: aid, PubDate: time.Time(pubDate)})
}
log.Info("service.AddArc(aid: %v, mid: %v, pubDate:%v, ip: %v) err(%v)", aid, mid, pubDate, ip, err)
dao.PromInfo("archive:稿件过审增加缓存")
return
}
// DelArc delete archive when archive not passed.
func (s *Service) DelArc(c context.Context, mid, aid int64, ip string) (err error) {
s.dao.DelArchiveCache(c, aid)
err = s.dao.DelUpperCache(c, mid, aid)
log.Info("service.DelArc(aid: %v, mid: %v, ip: %v) err(%v)", aid, mid, ip, err)
dao.PromInfo("archive:稿件删除缓存")
return
}
// upArcs get new archives of uppers.
func (s *Service) upArcs(c context.Context, minTotalCount int, ip string, mids ...int64) (res map[int64][]*arcmdl.AidPubTime, err error) {
var (
start, end int
cache = true
cached, missed []int64
oks map[int64]bool
tmpRes map[int64][]*arcmdl.AidPubTime
length = len(mids)
)
if length == 0 {
return
}
end = minTotalCount/length + s.c.Feed.MinUpCnt
if oks, err = s.dao.ExpireUppersCache(c, mids); err != nil {
cache = false
missed = mids
} else {
for mid, ok := range oks {
if ok {
cached = append(cached, mid)
} else {
missed = append(missed, mid)
}
}
}
if len(cached) > 0 {
if res, err = s.dao.UppersCaches(c, cached, start, end); err != nil {
dao.PromError("up主缓存", "dao.UppersCaches(%v) error(%v)", cached, err)
missed = mids
err = nil
cache = false
}
}
if res == nil {
res = make(map[int64][]*arcmdl.AidPubTime, len(mids))
}
if len(missed) > 0 {
if tmpRes, err = s.upsPassed(c, missed, ip); err != nil {
dao.PromError("up主回源", "upsPassed(%v) error(%v)", missed, err)
err = nil
tmpRes, _ = s.dao.UppersCaches(c, missed, start, end)
} else {
if cache {
s.addCache(func() {
s.dao.AddUpperCaches(context.Background(), tmpRes)
})
}
}
for mid, arcs := range tmpRes {
if len(arcs) == 0 {
continue
}
var tmp []*arcmdl.AidPubTime
if len(arcs) > end+1 {
tmp = arcs[start : end+1]
} else {
tmp = arcs
}
res[mid] = tmp
}
}
return
}
// attenUpArcs get new archives of attention uppers.
func (s *Service) attenUpArcs(c context.Context, minTotalCount int, mid int64, ip string) (res map[int64][]*arcmdl.AidPubTime, err error) {
var mids []int64
arg := &accmdl.ArgMid{Mid: mid}
if mids, err = s.accRPC.Attentions3(c, arg); err != nil {
dao.PromError("关注rpc接口:Attentions", "s.accRPC.Attentions(%d) error(%v)", mid, err)
return
}
return s.upArcs(c, minTotalCount, ip, mids...)
}
func (s *Service) upsPassed(c context.Context, mids []int64, ip string) (res map[int64][]*arcmdl.AidPubTime, err error) {
dao.MissedCount.Add("up", int64(len(mids)))
var (
group *errgroup.Group
errCtx context.Context
midsLen, i int
mutex = sync.Mutex{}
)
res = make(map[int64][]*arcmdl.AidPubTime)
group, errCtx = errgroup.WithContext(c)
midsLen = len(mids)
for ; i < midsLen; i += _upsArcBulkSize {
var partMids []int64
if i+_upsArcBulkSize > midsLen {
partMids = mids[i:]
} else {
partMids = mids[i : i+_upsArcBulkSize]
}
group.Go(func() (err error) {
var tmpRes map[int64][]*arcmdl.AidPubTime
arg := &arcmdl.ArgUpsArcs2{Mids: partMids, Pn: 1, Ps: s.c.MultiRedis.MaxArcsNum, RealIP: ip}
if tmpRes, err = s.arcRPC.UpsPassed2(errCtx, arg); err != nil {
dao.PromError("up稿件回源RPC接口:UpsPassed2", "s.arcRPC.UpsPassed2(%+v) error(%v)", arg, err)
err = nil
return
}
mutex.Lock()
for mid, arcs := range tmpRes {
res[mid] = arcs
}
mutex.Unlock()
return
})
}
group.Wait()
return
}
func (s *Service) archives(c context.Context, aids []int64, ip string) (res map[int64]*api.Arc, err error) {
var (
missed []int64
mutex = sync.Mutex{}
bulkSize = s.c.Feed.BulkSize
addCache = true
)
if len(aids) == 0 {
return
}
res, missed, err = s.dao.ArchivesCache(c, aids)
if err != nil {
dao.PromError("稿件缓存", "dao.ArchivesCache() error(%v)", err)
missed = aids
addCache = false
err = nil
} else if res != nil && len(missed) == 0 {
return
}
if res == nil {
res = make(map[int64]*api.Arc, len(aids))
}
group, errCtx := errgroup.WithContext(c)
missedLen := len(missed)
for i := 0; i < missedLen; i += bulkSize {
var partAids []int64
if i+bulkSize < missedLen {
partAids = missed[i : i+bulkSize]
} else {
partAids = missed[i:missedLen]
}
group.Go(func() error {
var (
tmpRes map[int64]*api.Arc
arcErr error
arg *arcmdl.ArgAids2
)
arg = &arcmdl.ArgAids2{Aids: partAids, RealIP: ip}
if tmpRes, arcErr = s.arcRPC.Archives3(errCtx, arg); arcErr != nil {
dao.PromError("稿件rpc接口:Archives2", "s.arcRPC.Archives3() error(%v)", err)
// only log err message
return nil
}
mutex.Lock()
for aid, arc := range tmpRes {
res[aid] = arc
}
mutex.Unlock()
if addCache {
s.addCache(func() {
s.dao.AddArchivesCacheMap(context.Background(), tmpRes)
})
}
return nil
})
}
group.Wait()
// check state
for aid, arc := range res {
if !arc.IsNormal() {
delete(res, aid)
}
}
return
}
func (s *Service) archive(c context.Context, aid int64, ip string) (res *api.Arc, err error) {
arg := &arcmdl.ArgAid2{Aid: aid, RealIP: ip}
res, err = s.arcRPC.Archive3(c, arg)
return
}
// ChangeAuthor refresh feed cache
func (s *Service) ChangeAuthor(c context.Context, aid int64, oldMid int64, newMid int64, ip string) (err error) {
s.dao.DelArchiveCache(c, aid)
s.dao.DelUpperCache(c, oldMid, aid)
arc, err := s.archive(c, aid, ip)
if err != nil {
dao.PromError("稿件转移", "s.archive(%v) error(%v)", aid, err)
return
}
if !arc.IsNormal() {
return
}
arc.Author.Mid = newMid
res, err := s.dao.ExpireUppersCache(c, []int64{newMid})
if (err == nil) && res[newMid] {
err = s.dao.AddUpperCache(c, newMid, &arcmdl.AidPubTime{Aid: arc.Aid, PubDate: arc.PubDate})
}
return
}

View File

@ -0,0 +1,49 @@
package service
import (
"context"
"sort"
"go-common/app/service/main/feed/dao"
feedmdl "go-common/app/service/main/feed/model"
)
// ArchiveFeed get feed of ups.
func (s *Service) ArchiveFeed(c context.Context, mid int64, pn, ps int, ip string) (res []*feedmdl.Feed, err error) {
if res, err = s.archiveFeedCache(c, mid, pn, ps, ip); (err == nil) && (len(res) > 0) {
return
}
res, err = s.archiveFeed(c, mid, pn, ps, ip)
return
}
func (s *Service) archiveFeed(c context.Context, mid int64, pn, ps int, ip string) (res []*feedmdl.Feed, err error) {
dao.MissedCount.Incr("archive-feed")
var (
start = (pn - 1) * ps
end = start + ps // from slice, end no -1
tmpRes []*feedmdl.Feed
)
tmpRes = s.genArchiveFeed(c, true, mid, s.c.Feed.ArchiveFeedLength, ip)
sort.Sort(feedmdl.Feeds(tmpRes))
res = s.sliceFeeds(tmpRes, start, end)
res, err = s.fillArchiveFeeds(c, res, ip)
s.addCache(func() {
s.dao.AddArchiveFeedCache(context.Background(), mid, tmpRes)
})
return
}
// archiveFeedCache get archive feed by cache.
func (s *Service) archiveFeedCache(c context.Context, mid int64, pn, ps int, ip string) (res []*feedmdl.Feed, err error) {
dao.CachedCount.Incr("archive-feed")
var (
start = (pn - 1) * ps
end = start + ps - 1 // from cache, end-1
)
if res, err = s.dao.ArchiveFeedCache(c, mid, start, end); err != nil {
return
}
res, err = s.fillArchiveFeeds(c, res, ip)
return
}

View File

@ -0,0 +1,23 @@
package service
import (
"context"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func Test_ArchiveFeed(t *testing.T) {
Convey("archive feed", t, WithService(t, func(svf *Service) {
res, err := svf.ArchiveFeed(context.TODO(), _mid, 1, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
Convey("return feed for page 2", func() {
time.Sleep(time.Millisecond * 300) // wait cache ready
res, err := svf.ArchiveFeed(context.TODO(), _mid, 2, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
})
}))
}

View File

@ -0,0 +1,92 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func Test_AddArc(t *testing.T) {
Convey("should return without err", t, WithService(t, func(svf *Service) {
err := svf.AddArc(context.TODO(), _mid, _dataAV, 0, _ip)
So(err, ShouldBeNil)
}))
}
func Test_DelArc(t *testing.T) {
Convey("should return without err", t, WithService(t, func(svf *Service) {
err := svf.DelArc(context.TODO(), _mid, _dataAV, _ip)
So(err, ShouldBeNil)
}))
}
func Test_UpArcs(t *testing.T) {
Convey("should return archives", t, WithService(t, func(svf *Service) {
res, err := svf.upArcs(context.TODO(), 10, _ip, 1)
So(res, ShouldNotBeEmpty)
So(err, ShouldBeNil)
}))
Convey("should return blank arcs if not attention ups", t, WithService(t, func(svf *Service) {
res, err := svf.upArcs(context.TODO(), 10, _ip)
So(res, ShouldBeEmpty)
So(err, ShouldBeNil)
}))
}
func Test_attenUpArcs(t *testing.T) {
Convey("should return archives", t, WithService(t, func(svf *Service) {
res, err := svf.attenUpArcs(context.TODO(), 10, _mid, _ip)
So(res, ShouldNotBeEmpty)
So(err, ShouldBeNil)
}))
Convey("should return only time archives", t, WithService(t, func(svf *Service) {
res, err := svf.attenUpArcs(context.TODO(), 10, _mid, _ip)
So(res, ShouldNotBeEmpty)
So(err, ShouldBeNil)
}))
}
func Test_upsPassed(t *testing.T) {
Convey("should return archives", t, WithService(t, func(svf *Service) {
res, err := svf.upsPassed(context.TODO(), []int64{_mid}, _ip)
So(res, ShouldNotBeEmpty)
So(err, ShouldBeNil)
}))
}
func Test_archives(t *testing.T) {
Convey("should return archives", t, WithService(t, func(svf *Service) {
res, err := svf.archives(context.TODO(), []int64{_dataAV}, _ip)
So(res, ShouldNotBeEmpty)
So(err, ShouldBeNil)
}))
}
func Test_archive(t *testing.T) {
Convey("should return archive", t, WithService(t, func(svf *Service) {
res, err := svf.archive(context.TODO(), _dataAV, _ip)
So(res, ShouldNotBeEmpty)
So(err, ShouldBeNil)
}))
}
func Test_ChangeAuthor(t *testing.T) {
Convey("should return without err", t, WithService(t, func(svf *Service) {
c := context.TODO()
oldArc, _ := svf.archive(c, _dataAV, _ip)
svf.AddArc(c, oldArc.Author.Mid, _dataAV, 0, _ip)
err := svf.ChangeAuthor(c, oldArc.Aid, oldArc.Author.Mid, 2, _ip)
So(err, ShouldBeNil)
res, _ := svf.dao.UppersCaches(c, []int64{oldArc.Author.Mid}, 0, -1)
So(res[oldArc.Author.Mid], ShouldBeEmpty)
res, _ = svf.dao.UppersCaches(c, []int64{2}, 0, -1)
So(len(res), ShouldNotBeEmpty)
}))
}

View File

@ -0,0 +1,114 @@
package service
import (
"context"
"sync"
artmdl "go-common/app/interface/openplatform/article/model"
accmdl "go-common/app/service/main/account/model"
"go-common/app/service/main/feed/dao"
"go-common/library/log"
"go-common/library/sync/errgroup"
)
const _upsArtBulkSize = 50
// attenUpArticles get new articles of attention uppers.
func (s *Service) attenUpArticles(c context.Context, minTotalCount int, mid int64, ip string) (res map[int64][]*artmdl.Meta, err error) {
var mids []int64
arg := &accmdl.ArgMid{Mid: mid}
if mids, err = s.accRPC.Attentions3(c, arg); err != nil {
dao.PromError("关注rpc接口:Attentions", "s.accRPC.Attentions(%d) error(%v)", mid, err)
return
}
if len(mids) == 0 {
return
}
count := minTotalCount/len(mids) + s.c.Feed.MinUpCnt
return s.upsArticle(c, count, ip, mids...)
}
func (s *Service) upsArticle(c context.Context, count int, ip string, mids ...int64) (res map[int64][]*artmdl.Meta, err error) {
dao.MissedCount.Add("upArt", int64(len(mids)))
var (
group *errgroup.Group
errCtx context.Context
midsLen, i int
mutex = sync.Mutex{}
)
res = make(map[int64][]*artmdl.Meta)
group, errCtx = errgroup.WithContext(c)
midsLen = len(mids)
for ; i < midsLen; i += _upsArtBulkSize {
var partMids []int64
if i+_upsArcBulkSize > midsLen {
partMids = mids[i:]
} else {
partMids = mids[i : i+_upsArtBulkSize]
}
group.Go(func() (err error) {
var tmpRes map[int64][]*artmdl.Meta
arg := &artmdl.ArgUpsArts{Mids: partMids, Pn: 1, Ps: count, RealIP: ip}
if tmpRes, err = s.artRPC.UpsArtMetas(errCtx, arg); err != nil {
log.Error("s.artRPC.UpsArtMetas(%+v) error(%v)", arg, err)
err = nil
return
}
mutex.Lock()
for mid, arcs := range tmpRes {
for _, arc := range arcs {
if arc.AttrVal(artmdl.AttrBitNoDistribute) {
continue
}
res[mid] = append(res[mid], arc)
}
}
mutex.Unlock()
return
})
}
group.Wait()
return
}
func (s *Service) articles(c context.Context, ip string, aids ...int64) (res map[int64]*artmdl.Meta, err error) {
var (
mutex = sync.Mutex{}
bulkSize = s.c.Feed.BulkSize
)
res = make(map[int64]*artmdl.Meta, len(aids))
group, errCtx := errgroup.WithContext(c)
aidsLen := len(aids)
for i := 0; i < aidsLen; i += bulkSize {
var partAids []int64
if i+bulkSize < aidsLen {
partAids = aids[i : i+bulkSize]
} else {
partAids = aids[i:aidsLen]
}
group.Go(func() error {
var (
tmpRes map[int64]*artmdl.Meta
artErr error
arg *artmdl.ArgAids
)
arg = &artmdl.ArgAids{Aids: partAids, RealIP: ip}
if tmpRes, artErr = s.artRPC.ArticleMetas(errCtx, arg); artErr != nil {
log.Error("s.artRPC.ArticleMetas() error(%v)", artErr)
return nil
}
mutex.Lock()
for aid, arc := range tmpRes {
if arc.AttrVal(artmdl.AttrBitNoDistribute) {
continue
}
res[aid] = arc
}
mutex.Unlock()
return nil
})
}
group.Wait()
return
}

View File

@ -0,0 +1,120 @@
package service
import (
"context"
"sort"
artmdl "go-common/app/interface/openplatform/article/model"
"go-common/app/service/main/feed/dao"
"go-common/app/service/main/feed/model"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/log"
)
// ArticleFeed get feed of ups.
func (s *Service) ArticleFeed(c context.Context, mid int64, pn, ps int, ip string) (res []*artmdl.Meta, err error) {
var (
fp = pn == 1
cached bool
ft = model.TypeArt
from = "1"
)
if fp {
// if first page and in 5 minites, return cached feed.
fp = s.checkLast(c, ft, mid)
}
// refresh expire feed cache if user access feed.
if cached, err = s.dao.ExpireFeedCache(c, ft, mid); err != nil {
dao.PromError("expire feed cache", "s.dao.ExpireFeedCache(%d) error(%v)", mid, err)
err = nil
}
defer func() {
for _, meta := range res {
if meta != nil && meta.Author != nil && meta.Author.Name == "" {
dao.PromError("bug:noauthor"+from, "bugfix: %+v, author: %+v from: %v", meta, meta.Author, from)
}
}
}()
if cached && !fp {
// if cache err, will return directly.
if res, err = s.articleFeedCache(c, mid, pn, ps, ip); err == nil {
return
}
dao.PromError("获取文章feed cache", "s.articleFeedCache(%v, %v, %v, %v, len: %v) error(%v)", ft, mid, pn, ps, len(res), err)
}
res, err = s.articleFeed(c, mid, pn, ps, ip)
from = "2"
if fp {
s.addCache(func() {
s.dao.AddUnreadCountCache(context.Background(), ft, mid, 0)
})
}
return
}
func (s *Service) articleFeed(c context.Context, mid int64, pn, ps int, ip string) (res []*artmdl.Meta, err error) {
dao.MissedCount.Incr("Article-feed")
var (
start = (pn - 1) * ps
end = start + ps // from slice, end no -1
tmpRes []*artmdl.Meta
)
tmpRes = s.genArticleFeed(c, mid, s.c.Feed.ArticleFeedLength, ip)
if len(tmpRes) == 0 || len(tmpRes) < start {
// 当用户取关所有up主时清除缓存
s.addCache(func() {
s.dao.AddArticleFeedCache(context.Background(), mid, tmpRes)
})
return
}
sort.Sort(feedmdl.ArticleFeeds(tmpRes))
if end < len(tmpRes) {
res = tmpRes[start:end]
} else {
res = tmpRes[start:]
}
s.addCache(func() {
s.dao.AddArticleFeedCache(context.Background(), mid, tmpRes)
})
return
}
// articleFeedCache get Article feed by cache.
func (s *Service) articleFeedCache(c context.Context, mid int64, pn, ps int, ip string) (res []*artmdl.Meta, err error) {
dao.CachedCount.Incr("Article-feed")
var (
start = (pn - 1) * ps
end = start + ps - 1 // from cache, end-1
aids []int64
am map[int64]*artmdl.Meta
)
if aids, err = s.dao.ArticleFeedCache(c, mid, start, end); err != nil || len(aids) == 0 {
return
}
if am, err = s.articles(c, ip, aids...); err != nil {
return
}
for _, aid := range aids {
if _, ok := am[aid]; ok {
res = append(res, am[aid])
}
}
return
}
func (s *Service) genArticleFeed(c context.Context, mid int64, minTotalCount int, ip string) (res []*artmdl.Meta) {
var (
marts map[int64][]*artmdl.Meta
err error
)
if marts, err = s.attenUpArticles(c, minTotalCount, mid, ip); err != nil {
log.Error("s.attenUpArticles(mid: %v) err: %v", mid, err)
return
}
for _, as := range marts {
for _, a := range as {
res = append(res, a)
}
}
return
}

View File

@ -0,0 +1,53 @@
package service
import (
"context"
"sort"
"go-common/app/service/main/feed/dao"
feedmdl "go-common/app/service/main/feed/model"
)
// BangumiFeed get feed of bangumi.
func (s *Service) BangumiFeed(c context.Context, mid int64, pn, ps int, ip string) (res []*feedmdl.Feed, err error) {
if res, err = s.bangumiFeedCache(c, mid, pn, ps, ip); (err == nil) && (len(res) > 0) {
return
}
res, err = s.bangumiFeed(c, mid, pn, ps, ip)
return
}
func (s *Service) bangumiFeed(c context.Context, mid int64, pn, ps int, ip string) (res []*feedmdl.Feed, err error) {
dao.MissedCount.Incr("bangumi-feed")
var (
start = (pn - 1) * ps
end = start + ps // from slice, end no -1
tmpRes []*feedmdl.Feed
)
tmpRes = s.genBangumiFeed(c, mid, ip)
sort.Sort(feedmdl.Feeds(tmpRes))
res = s.sliceFeeds(tmpRes, start, end)
s.addCache(func() {
s.dao.AddBangumiFeedCache(c, mid, tmpRes)
})
return
}
// bangumiFeedCache get bangumi feed by cache.
func (s *Service) bangumiFeedCache(c context.Context, mid int64, pn, ps int, ip string) (res []*feedmdl.Feed, err error) {
dao.CachedCount.Incr("bangumi-feed")
var (
start = (pn - 1) * ps
end = start + ps - 1 // from cache, end-1
endPos = end
bids []int64
)
if bids, err = s.dao.BangumiFeedCache(c, mid, start, endPos); err != nil {
return
}
if res, err = s.bangumiFeedFromSeason(c, bids, ip); err != nil {
dao.PromError("获取番剧feed", "s.bangumiFeed(bids: %v) err: %v", bids, err)
return
}
return
}

View File

@ -0,0 +1,29 @@
package service
import (
"context"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func Test_BangumiFeed(t *testing.T) {
Convey("bangumi feed", t, WithService(t, func(svf *Service) {
res, err := svf.BangumiFeed(context.TODO(), _bangumiMid, 1, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
Convey("return feed for page 2", func() {
time.Sleep(time.Millisecond * 300) // wait cache ready
res, err := svf.BangumiFeed(context.TODO(), _bangumiMid, 2, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
})
Convey("bangumi feed cache", func() {
time.Sleep(time.Millisecond * 300) // wait cache ready
res, err := svf.bangumiFeedCache(context.TODO(), _bangumiMid, 3, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
})
}))
}

View File

@ -0,0 +1,446 @@
package service
import (
"context"
"fmt"
"sort"
"time"
"go-common/app/service/main/archive/api"
arcmdl "go-common/app/service/main/archive/model/archive"
"go-common/app/service/main/feed/dao"
feedmdl "go-common/app/service/main/feed/model"
xtime "go-common/library/time"
"go-common/library/sync/errgroup"
)
func archiveAppName(ft int) string {
if ft == feedmdl.TypeApp {
return "app"
} else if ft == feedmdl.TypeWeb {
return "web"
}
return "article"
}
func (s *Service) pullInterval(ft int) xtime.Duration {
if ft == feedmdl.TypeApp {
return s.c.Feed.AppPullInterval
} else if ft == feedmdl.TypeWeb {
return s.c.Feed.WebPullInterval
} else {
return s.c.Feed.ArtPullInterval
}
}
// Feed get feed of ups and bangumi.
func (s *Service) Feed(c context.Context, app bool, mid int64, pn, ps int, ip string) (res []*feedmdl.Feed, err error) {
var (
fp = pn == 1
cached bool
ft = feedmdl.FeedType(app)
)
if fp {
// if first page and in 5 minites, return cached feed.
fp = s.checkLast(c, ft, mid)
}
// refresh expire feed cache if user access feed.
if cached, err = s.dao.ExpireFeedCache(c, ft, mid); err != nil {
dao.PromError("expire feed cache", "s.dao.ExpireFeedCache(%d) error(%v)", mid, err)
err = nil
}
// check feed if exist same aid
defer func() {
var exists = map[int64]bool{}
for _, feed := range res {
if _, ok := exists[feed.ID]; ok {
dao.PromError("重复动态", "feed same user: %d, id:%d", mid, feed.ID)
} else {
exists[feed.ID] = true
}
}
}()
if cached && !fp {
// when gen feed err will return
if res, err = s.feedCache(c, ft, mid, pn, ps, ip); err != nil {
dao.PromError("获取Feed cache", "s.feedCache(%v, %v, %v, %v, len: %v) error(%v)", app, mid, pn, ps, len(res), err)
}
return
}
res, err = s.feed(c, ft, mid, pn, ps, ip)
if fp {
s.addCache(func() {
s.dao.AddUnreadCountCache(context.Background(), ft, mid, 0)
})
}
return
}
// checkLast check if last access is in 5 minites.
func (s *Service) checkLast(c context.Context, ft int, mid int64) (fp bool) {
var (
t int64
err error
now = time.Now()
pullInterval xtime.Duration
)
fp = true
if t, err = s.dao.LastAccessCache(c, ft, mid); err != nil {
return
}
last := time.Unix(t, 0)
pullInterval = s.pullInterval(ft)
if now.Sub(last) < time.Duration(pullInterval) {
fp = false
} else {
s.addCache(func() {
s.dao.AddLastAccessCache(context.TODO(), ft, mid, now.Unix())
})
}
return
}
func (s *Service) genArchiveFeed(c context.Context, fold bool, mid int64, minTotalCount int, ip string) (res []*feedmdl.Feed) {
var (
marcs map[int64][]*arcmdl.AidPubTime
err error
)
if marcs, err = s.attenUpArcs(c, minTotalCount, mid, ip); err != nil {
dao.PromError("获取关注up主稿件", "s.attenUpArcs(mid: %v) err: %v", mid, err)
return
}
if fold {
for _, as := range marcs {
switch len(as) {
case 0: // no archives from upper
case 1: // just have one archive of upper
res = append(res, arcTimeToFeed(as[0]))
default: // fold
for _, appFeed := range s.fold(as) {
res = append(res, appFeed)
}
}
}
} else {
for _, as := range marcs {
for _, a := range as {
res = append(res, arcTimeToFeed(a))
}
}
}
return
}
func (s *Service) genBangumiFeed(c context.Context, mid int64, ip string) (res []*feedmdl.Feed) {
var (
seasonIDs []int64
err error
)
if seasonIDs, err = s.bangumi.BangumiPull(c, mid, ip); err != nil {
return nil
}
res, _ = s.bangumiFeedFromSeason(c, seasonIDs, ip)
return
}
// feed get feed.
func (s *Service) feed(c context.Context, ft int, mid int64, pn, ps int, ip string) (res []*feedmdl.Feed, err error) {
dao.MissedCount.Incr(archiveAppName(ft) + "-feed")
var (
start = (pn - 1) * ps
end = start + ps // from slice, end no -1
tmpRes []*feedmdl.Feed
bangumiFeeds []*feedmdl.Feed
group *errgroup.Group
errCtx context.Context
minTotalCount int
fold bool
)
group, errCtx = errgroup.WithContext(c)
if ft == feedmdl.TypeApp {
minTotalCount = s.c.Feed.AppLength
fold = true
} else {
minTotalCount = s.c.Feed.WebLength
fold = false
}
// fetch archives
group.Go(func() error {
tmpRes = s.genArchiveFeed(errCtx, fold, mid, minTotalCount, ip)
return nil
})
// fetch bangumis
group.Go(func() error {
bangumiFeeds = s.genBangumiFeed(errCtx, mid, ip)
return nil
})
group.Wait()
// merge archives and bangumis
tmpRes = append(tmpRes, bangumiFeeds...)
if len(tmpRes) == 0 || len(tmpRes) < start {
// 当用户取关所有up主时清除缓存
s.addCache(func() {
s.dao.AddFeedCache(context.Background(), ft, mid, tmpRes)
})
return
}
// sort by aid desc ,then set cache.
sort.Sort(feedmdl.Feeds(tmpRes))
res = s.sliceFeeds(tmpRes, start, end)
res, err = s.fillArchiveFeeds(c, res, ip)
s.addCache(func() {
s.dao.AddFeedCache(context.Background(), ft, mid, tmpRes)
})
return
}
func (s *Service) sliceFeeds(fs []*feedmdl.Feed, start, end int) (res []*feedmdl.Feed) {
if start > len(fs) {
return
}
if end < len(fs) {
res = fs[start:end]
} else {
res = fs[start:]
}
return
}
// feedCache get feed by cache.
func (s *Service) feedCache(c context.Context, ft int, mid int64, pn, ps int, ip string) (res []*feedmdl.Feed, err error) {
dao.CachedCount.Incr(archiveAppName(ft) + "-feed")
var (
start = (pn - 1) * ps
end = start + ps - 1 // from cache, end-1
endPos = end
bids []int64
bangumiFeeds []*feedmdl.Feed
group *errgroup.Group
errCtx context.Context
arcErr, bangumiErr error
)
if res, bids, err = s.dao.FeedCache(c, ft, mid, start, endPos); err != nil {
err = nil
return
}
group, errCtx = errgroup.WithContext(c)
group.Go(func() error {
res, arcErr = s.fillArchiveFeeds(errCtx, res, ip)
return nil
})
group.Go(func() error {
bangumiFeeds, bangumiErr = s.bangumiFeedFromSeason(errCtx, bids, ip)
return nil
})
group.Wait()
if (arcErr != nil) && (bangumiErr != nil) {
dao.PromError("生成feed流", "s.feedCache(mid: %v) arc:%v bangumi: %v", mid, arcErr, bangumiErr)
err = fmt.Errorf("s.feedCache(mid: %v) arc:%v bangumi: %v", mid, arcErr, bangumiErr)
return
}
s.replaceFeeds(res, bangumiFeeds)
return
}
func (s *Service) fillArchiveFeeds(c context.Context, fs []*feedmdl.Feed, ip string) (res []*feedmdl.Feed, err error) {
var (
allAids []int64
am map[int64]*api.Arc
)
if len(fs) == 0 {
return
}
for _, fe := range fs {
if fe.Type != feedmdl.ArchiveType {
continue
}
allAids = append(allAids, fe.ID)
for _, a := range fe.Fold {
allAids = append(allAids, a.Aid)
}
}
if am, err = s.archives(c, allAids, ip); err != nil {
return
}
for _, fe := range fs {
if fe.Type == feedmdl.ArchiveType {
fe = fmtArc(fe, am)
}
if fe != nil {
res = append(res, fe)
}
}
return
}
func fmtArc(feed *feedmdl.Feed, archives map[int64]*api.Arc) *feedmdl.Feed {
if feed == nil {
return nil
}
var arcs []*api.Arc
if v, ok := archives[feed.ID]; ok {
arcs = append(arcs, v)
}
for _, arc := range feed.Fold {
if v, ok := archives[arc.Aid]; ok {
arcs = append(arcs, v)
}
}
if len(arcs) == 0 {
return nil
}
feed.Archive = arcs[0]
feed.PubDate = arcs[0].PubDate
feed.ID = arcs[0].Aid
feed.Fold = arcs[1:]
return feed
}
func (s *Service) replaceFeeds(res []*feedmdl.Feed, fs []*feedmdl.Feed) {
var (
f *feedmdl.Feed
key string
format = "%v-%v"
m = make(map[string]*feedmdl.Feed)
)
for _, f = range fs {
key = fmt.Sprintf(format, f.Type, f.ID)
m[key] = f
}
for i, f := range res {
key = fmt.Sprintf(format, f.Type, f.ID)
if _, ok := m[key]; ok {
res[i] = m[key]
}
}
}
// fold archives in every 4 hours
func (s *Service) fold(as []*arcmdl.AidPubTime) (res []*feedmdl.Feed) {
if len(as) == 0 {
return
}
sort.Sort(feedmdl.Arcs(as))
var (
fa = arcTimeToFeed(as[0]) // the cover archive of item
ft = as[0].PubDate.Time() // first archive pubdate
// at every 4 o'clock we will fold archives with a cover archive and the count of folded archives
// ps. hour at [0,4),ch is 0;hour at [4,8),ch is 4;hour at [8,12),ch is 8;hour at [12,16),ch is 12;hour at [16,20),ch is 16;hour at [20,24),ch is 20;
ch = (ft.Hour() / 4) * 4 // check hour
at time.Time // archive pubdate
)
for k, a := range as[1:] {
isEnd := k == (len(as[1:]) - 1)
y1, m1, d1 := ft.Date()
at = a.PubDate.Time()
y2, m2, d2 := at.Date()
// NOTE: original video(copyright == 1) does not fold
if a.Copyright != 1 && (y1 == y2 && m1 == m2 && d1 == d2 && at.Hour() >= ch) {
fa.Fold = append(fa.Fold, &api.Arc{Aid: a.Aid})
if isEnd {
res = append(res, fa)
}
} else {
res = append(res, fa)
fa = arcTimeToFeed(a)
if isEnd {
res = append(res, fa)
} else {
// next item
ft = a.PubDate.Time()
ch = (ft.Hour() / 4) * 4
}
}
}
return
}
//Fold get fold archives.
func (s *Service) Fold(c context.Context, mid int64, aid int64, ip string) (res []*feedmdl.Feed, err error) {
var (
arcsm map[int64][]*arcmdl.AidPubTime
arcs []*arcmdl.AidPubTime
)
if arcsm, err = s.upArcs(c, s.c.Feed.AppLength, ip, mid); err != nil {
return
}
sort.Sort(feedmdl.Arcs(arcsm[mid]))
for i, arc := range arcsm[mid] {
if arc.Aid == aid {
arcs = arcsm[mid][i:]
break
}
}
switch len(arcs) {
case 0, 1:
default:
for _, appFeed := range s.fold(arcs) {
if appFeed.ID == aid {
for _, arc := range appFeed.Fold {
res = append(res, arcToFeed(arc))
}
}
}
}
res, err = s.fillArchiveFeeds(c, res, ip)
return
}
// PurgeFeedCache purge cache when attention/unattention upper
func (s *Service) PurgeFeedCache(c context.Context, mid int64, ip string) (err error) {
if err = s.dao.PurgeFeedCache(c, feedmdl.TypeApp, mid); err != nil {
return
}
if err = s.dao.PurgeFeedCache(c, feedmdl.TypeWeb, mid); err != nil {
return
}
err = s.dao.PurgeFeedCache(c, feedmdl.TypeArt, mid)
return
}
func (s *Service) bangumiFeedFromSeason(c context.Context, seasonIDs []int64, ip string) (feeds []*feedmdl.Feed, err error) {
var (
bm map[int64]*feedmdl.Bangumi
cached map[int64]*feedmdl.Bangumi
missed []int64
addCache = true
)
if cached, missed, err = s.dao.BangumisCache(c, seasonIDs); err != nil {
dao.PromError("番剧feed中调用缓存", "s.dao.BangumisCache err: %v", err)
err = nil
addCache = false
missed = seasonIDs
}
if len(missed) > 0 {
if bm, err = s.bangumi.BangumiSeasons(c, missed, ip); err != nil {
return
}
if addCache {
s.addCache(func() {
s.dao.AddBangumisCacheMap(context.Background(), bm)
})
}
for bid, b := range bm {
cached[bid] = b
}
}
for _, bid := range seasonIDs {
if bangumi, ok := cached[bid]; ok {
feeds = append(feeds, bangumiToFeed(bangumi))
}
}
return
}
func arcToFeed(arc *api.Arc) *feedmdl.Feed {
return &feedmdl.Feed{ID: arc.Aid, Type: feedmdl.ArchiveType, PubDate: arc.PubDate}
}
func arcTimeToFeed(arc *arcmdl.AidPubTime) *feedmdl.Feed {
return &feedmdl.Feed{ID: arc.Aid, Type: feedmdl.ArchiveType, PubDate: arc.PubDate}
}
func bangumiToFeed(b *feedmdl.Bangumi) *feedmdl.Feed {
return &feedmdl.Feed{ID: b.SeasonID, Bangumi: b, Type: feedmdl.BangumiType, PubDate: xtime.Time(b.Ts)}
}

View File

@ -0,0 +1,169 @@
package service
import (
"context"
"testing"
"time"
"go-common/app/service/main/archive/api"
"go-common/app/service/main/archive/model/archive"
feed "go-common/app/service/main/feed/model"
xtime "go-common/library/time"
"strconv"
. "github.com/smartystreets/goconvey/convey"
)
func Test_fold(t *testing.T) {
Convey("fold archives", t, WithBlankService(func(svf *Service) {
var t, t2, t3 time.Time
t, _ = time.Parse("2006-01-02 15:04:05", "2017-03-01 00:00:00")
t2, _ = time.Parse("2006-01-02 15:04:05", "2017-03-01 03:00:00")
t3, _ = time.Parse("2006-01-02 15:04:05", "2017-03-01 05:00:00")
arc := archive.AidPubTime{Aid: 1, Copyright: 0, PubDate: xtime.Time(t.Unix())}
arc1 := archive.AidPubTime{Aid: 1, Copyright: 1, PubDate: xtime.Time(t.Unix())}
arc2 := archive.AidPubTime{Aid: 2, Copyright: 1, PubDate: xtime.Time(t2.Unix())}
arc3 := archive.AidPubTime{Aid: 3, Copyright: 1, PubDate: xtime.Time(t3.Unix())}
Convey("fold reprinted archives", func() {
arcs := []*archive.AidPubTime{&arc2, &arc, &arc3}
res := []*feed.Feed{
&feed.Feed{ID: arc3.Aid, PubDate: arc3.PubDate},
&feed.Feed{ID: arc2.Aid, Fold: []*api.Arc{&api.Arc{Aid: arc.Aid}}, PubDate: arc2.PubDate},
}
So(svf.fold(arcs), ShouldResemble, res)
})
Convey("not fold original archives", func() {
arcs := []*archive.AidPubTime{&arc2, &arc1, &arc3}
res := []*feed.Feed{
&feed.Feed{ID: arc3.Aid, PubDate: arc3.PubDate},
&feed.Feed{ID: arc2.Aid, PubDate: arc2.PubDate},
&feed.Feed{ID: arc1.Aid, PubDate: arc1.PubDate},
}
So(svf.fold(arcs), ShouldResemble, res)
})
}))
}
func Test_Feed(t *testing.T) {
for name, client := range map[string]bool{"app": true, "web": false} {
for _, mid := range []int64{_mid, _bangumiMid} {
midStr := strconv.FormatInt(mid, 10)
Convey(name+midStr+" with fold return feed", t, WithService(t, func(svf *Service) {
res, err := svf.Feed(context.TODO(), client, mid, 1, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
Convey(name+"return feed for page 2", func() {
time.Sleep(time.Millisecond * 300) // wait cache ready
res, err := svf.Feed(context.TODO(), client, mid, 2, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
})
}))
Convey(name+midStr+" without fold return feed", t, WithService(t, func(svf *Service) {
res, err := svf.Feed(context.TODO(), client, mid, 1, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
Convey(name+"return feed for page 2", func() {
time.Sleep(time.Millisecond * 300) // wait cache ready
res, err := svf.Feed(context.TODO(), client, mid, 2, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldNotBeEmpty)
})
}))
}
Convey(name+"user don't have attention ups and bangumis", t, WithService(t, func(svf *Service) {
midStr := strconv.FormatInt(_blankMid, 10)
Convey("with fold return feed", func() {
res, err := svf.Feed(context.TODO(), client, _blankMid, 1, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldBeEmpty)
Convey(name+"return feed for page 2", func() {
time.Sleep(time.Millisecond * 300) // wait cache ready
res, err := svf.Feed(context.TODO(), client, _blankMid, 2, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldBeEmpty)
})
})
Convey(name+midStr+" without fold return feed", func() {
res, err := svf.Feed(context.TODO(), client, _blankMid, 1, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldBeEmpty)
Convey(name+"return feed for page 2", func() {
time.Sleep(time.Millisecond * 300) // wait cache ready
res, err := svf.Feed(context.TODO(), client, _blankMid, 2, 2, _ip)
So(err, ShouldBeNil)
So(res, ShouldBeEmpty)
})
})
}))
}
}
func Test_PurgeFeedCache(t *testing.T) {
Convey("should return without err", t, WithService(t, func(svf *Service) {
err := svf.PurgeFeedCache(context.TODO(), _mid, _ip)
So(err, ShouldBeNil)
}))
}
func Test_bangumiFeed(t *testing.T) {
Convey("should return without err", t, WithService(t, func(svf *Service) {
feeds, err := svf.bangumiFeedFromSeason(context.TODO(), []int64{_seasonID}, _ip)
So(err, ShouldBeNil)
So(feeds, ShouldNotBeEmpty)
}))
}
func Test_fillArchiveFeeds(t *testing.T) {
Convey("fill feeds", t, WithService(t, func(svf *Service) {
f1 := &feed.Feed{ID: _arc1.Aid, PubDate: _arc1.PubDate, Fold: []*api.Arc{&api.Arc{Aid: _arc2.Aid}}}
f2 := &feed.Feed{ID: _arc1.Aid, PubDate: _arc1.PubDate, Archive: _arc1, Fold: []*api.Arc{_arc2}}
bangumi := &feed.Feed{ID: 1, Type: feed.BangumiType}
fs := []*feed.Feed{f1, bangumi}
expt := []*feed.Feed{f2, bangumi}
feeds, _ := svf.fillArchiveFeeds(context.TODO(), fs, _ip)
So(feeds, ShouldResemble, expt)
}))
}
func Test_replaceFeeds(t *testing.T) {
Convey("replace feeds", t, WithBlankService(func(svf *Service) {
arc1 := api.Arc{Aid: 1, Copyright: 1}
arc2 := api.Arc{Aid: 2, Copyright: 1}
arc4 := api.Arc{Aid: 4, Copyright: 1}
bangumi := feed.Bangumi{SeasonID: 1}
f1 := &feed.Feed{Archive: &arc1, ID: arc1.Aid}
f2 := &feed.Feed{Archive: &arc2, ID: arc2.Aid}
f3 := &feed.Feed{Bangumi: &bangumi, ID: bangumi.SeasonID, Type: feed.BangumiType}
blankf3 := &feed.Feed{ID: bangumi.SeasonID, Type: feed.BangumiType}
f4 := &feed.Feed{Archive: &arc4, ID: arc4.Aid}
blankf4 := &feed.Feed{ID: arc4.Aid}
Convey("replace bangumi feed", func() {
resfs := []*feed.Feed{f1, blankf3, f2}
fs := []*feed.Feed{f3}
svf.replaceFeeds(resfs, fs)
So(resfs, ShouldResemble, []*feed.Feed{f1, f3, f2})
})
Convey("replace archive feed", func() {
resfs := []*feed.Feed{f1, blankf4, f2}
fs := []*feed.Feed{f4}
svf.replaceFeeds(resfs, fs)
So(resfs, ShouldResemble, []*feed.Feed{f1, f4, f2})
})
Convey("blank feed", func() {
resfs := []*feed.Feed{f1, f2}
fs := []*feed.Feed{}
svf.replaceFeeds(resfs, fs)
So(resfs, ShouldResemble, []*feed.Feed{f1, f2})
})
}))
}

View File

@ -0,0 +1,213 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: ./service.go
// Package service is a generated GoMock package.
package service
import (
context "context"
model "go-common/app/interface/openplatform/article/model"
model0 "go-common/app/service/main/account/model"
"go-common/app/service/main/archive/api"
archive "go-common/app/service/main/archive/model/archive"
model1 "go-common/app/service/main/feed/model"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
)
// MockArcRPC is a mock of ArcRPC interface
type MockArcRPC struct {
ctrl *gomock.Controller
recorder *MockArcRPCMockRecorder
}
// MockArcRPCMockRecorder is the mock recorder for MockArcRPC
type MockArcRPCMockRecorder struct {
mock *MockArcRPC
}
// NewMockArcRPC creates a new mock instance
func NewMockArcRPC(ctrl *gomock.Controller) *MockArcRPC {
mock := &MockArcRPC{ctrl: ctrl}
mock.recorder = &MockArcRPCMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockArcRPC) EXPECT() *MockArcRPCMockRecorder {
return m.recorder
}
// Archive3 mocks base method
func (m *MockArcRPC) Archive3(c context.Context, arg *archive.ArgAid2) (*api.Arc, error) {
ret := m.ctrl.Call(m, "Archive3", c, arg)
ret0, _ := ret[0].(*api.Arc)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Archive3 indicates an expected call of Archive3
func (mr *MockArcRPCMockRecorder) Archive3(c, arg interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Archive3", reflect.TypeOf((*MockArcRPC)(nil).Archive3), c, arg)
}
// Archives3 mocks base method
func (m *MockArcRPC) Archives3(c context.Context, arg *archive.ArgAids2) (map[int64]*api.Arc, error) {
ret := m.ctrl.Call(m, "Archives3", c, arg)
ret0, _ := ret[0].(map[int64]*api.Arc)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Archives3 indicates an expected call of Archives3
func (mr *MockArcRPCMockRecorder) Archives3(c, arg interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Archives3", reflect.TypeOf((*MockArcRPC)(nil).Archives3), c, arg)
}
// UpsPassed2 mocks base method
func (m *MockArcRPC) UpsPassed2(c context.Context, arg *archive.ArgUpsArcs2) (map[int64][]*archive.AidPubTime, error) {
ret := m.ctrl.Call(m, "UpsPassed2", c, arg)
ret0, _ := ret[0].(map[int64][]*archive.AidPubTime)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpsPassed2 indicates an expected call of UpsPassed2
func (mr *MockArcRPCMockRecorder) UpsPassed2(c, arg interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsPassed2", reflect.TypeOf((*MockArcRPC)(nil).UpsPassed2), c, arg)
}
// MockAccRPC is a mock of AccRPC interface
type MockAccRPC struct {
ctrl *gomock.Controller
recorder *MockAccRPCMockRecorder
}
// MockAccRPCMockRecorder is the mock recorder for MockAccRPC
type MockAccRPCMockRecorder struct {
mock *MockAccRPC
}
// NewMockAccRPC creates a new mock instance
func NewMockAccRPC(ctrl *gomock.Controller) *MockAccRPC {
mock := &MockAccRPC{ctrl: ctrl}
mock.recorder = &MockAccRPCMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockAccRPC) EXPECT() *MockAccRPCMockRecorder {
return m.recorder
}
// Attentions3 mocks base method
func (m *MockAccRPC) Attentions3(c context.Context, arg *model0.ArgMid) ([]int64, error) {
ret := m.ctrl.Call(m, "Attentions3", c, arg)
ret0, _ := ret[0].([]int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// Attentions3 indicates an expected call of Attentions3
func (mr *MockAccRPCMockRecorder) Attentions3(c, arg interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attentions3", reflect.TypeOf((*MockAccRPC)(nil).Attentions3), c, arg)
}
// MockArtRPC is a mock of ArtRPC interface
type MockArtRPC struct {
ctrl *gomock.Controller
recorder *MockArtRPCMockRecorder
}
// MockArtRPCMockRecorder is the mock recorder for MockArtRPC
type MockArtRPCMockRecorder struct {
mock *MockArtRPC
}
// NewMockArtRPC creates a new mock instance
func NewMockArtRPC(ctrl *gomock.Controller) *MockArtRPC {
mock := &MockArtRPC{ctrl: ctrl}
mock.recorder = &MockArtRPCMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockArtRPC) EXPECT() *MockArtRPCMockRecorder {
return m.recorder
}
// UpsArtMetas mocks base method
func (m *MockArtRPC) UpsArtMetas(c context.Context, arg *model.ArgUpsArts) (map[int64][]*model.Meta, error) {
ret := m.ctrl.Call(m, "UpsArtMetas", c, arg)
ret0, _ := ret[0].(map[int64][]*model.Meta)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// UpsArtMetas indicates an expected call of UpsArtMetas
func (mr *MockArtRPCMockRecorder) UpsArtMetas(c, arg interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsArtMetas", reflect.TypeOf((*MockArtRPC)(nil).UpsArtMetas), c, arg)
}
// ArticleMetas mocks base method
func (m *MockArtRPC) ArticleMetas(c context.Context, arg *model.ArgAids) (map[int64]*model.Meta, error) {
ret := m.ctrl.Call(m, "ArticleMetas", c, arg)
ret0, _ := ret[0].(map[int64]*model.Meta)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ArticleMetas indicates an expected call of ArticleMetas
func (mr *MockArtRPCMockRecorder) ArticleMetas(c, arg interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ArticleMetas", reflect.TypeOf((*MockArtRPC)(nil).ArticleMetas), c, arg)
}
// MockBangumi is a mock of Bangumi interface
type MockBangumi struct {
ctrl *gomock.Controller
recorder *MockBangumiMockRecorder
}
// MockBangumiMockRecorder is the mock recorder for MockBangumi
type MockBangumiMockRecorder struct {
mock *MockBangumi
}
// NewMockBangumi creates a new mock instance
func NewMockBangumi(ctrl *gomock.Controller) *MockBangumi {
mock := &MockBangumi{ctrl: ctrl}
mock.recorder = &MockBangumiMockRecorder{mock}
return mock
}
// EXPECT returns an object that allows the caller to indicate expected use
func (m *MockBangumi) EXPECT() *MockBangumiMockRecorder {
return m.recorder
}
// BangumiPull mocks base method
func (m *MockBangumi) BangumiPull(c context.Context, mid int64, ip string) ([]int64, error) {
ret := m.ctrl.Call(m, "BangumiPull", c, mid, ip)
ret0, _ := ret[0].([]int64)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// BangumiPull indicates an expected call of BangumiPull
func (mr *MockBangumiMockRecorder) BangumiPull(c, mid, ip interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BangumiPull", reflect.TypeOf((*MockBangumi)(nil).BangumiPull), c, mid, ip)
}
// BangumiSeasons mocks base method
func (m *MockBangumi) BangumiSeasons(c context.Context, seasonIDs []int64, ip string) (map[int64]*model1.Bangumi, error) {
ret := m.ctrl.Call(m, "BangumiSeasons", c, seasonIDs, ip)
ret0, _ := ret[0].(map[int64]*model1.Bangumi)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// BangumiSeasons indicates an expected call of BangumiSeasons
func (mr *MockBangumiMockRecorder) BangumiSeasons(c, seasonIDs, ip interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BangumiSeasons", reflect.TypeOf((*MockBangumi)(nil).BangumiSeasons), c, seasonIDs, ip)
}

View File

@ -0,0 +1,45 @@
package service
// ///////
// struct Resource {
// Key []string
// FoldFunc func
// // 列表接口
// list func(mid int64) ids
// 详情接口(ids []int64 ) // 根据id获取详情
// expire int // 过期时间
// len int // 缓存长度
// }
// Resources []Resource
/*
struct Item {
Key string
FoldFunc func([]*feed.Feed) ([]*feed.Feed)
List func(mid int64) (ids []int64) // 列表接口
Items(ids []int64 ) // 根据id获取详情
expire int // 过期时间
len int // 缓存长度
}
struct Resource {
[]*Item
}
func (s *Service) Feed(c context.Context, tid int64, mid int64, pn, ps int, ip string) (res []*feed.Feed, err error) {
switch tid {
case 1:
feed(c, &Resource{"article"}, mid, pn, ps)
}
}
func (s *Service) feed(c context.Context, *Resource, mid int64, pn, ps int) (res []*feed.Feed, err error) {
}
// app (archive + bangumi + article)
// app (archive + bangumi)
// web (archive + bangumi)
// article
// archive
// bangumi
// live?
*/

View File

@ -0,0 +1,92 @@
package service
import (
"context"
artmdl "go-common/app/interface/openplatform/article/model"
artrpc "go-common/app/interface/openplatform/article/rpc/client"
account "go-common/app/service/main/account/model"
accrpc "go-common/app/service/main/account/rpc/client"
"go-common/app/service/main/archive/api"
arcrpc "go-common/app/service/main/archive/api/gorpc"
"go-common/app/service/main/archive/model/archive"
"go-common/app/service/main/feed/conf"
"go-common/app/service/main/feed/dao"
feedmdl "go-common/app/service/main/feed/model"
)
// Service struct info.
type Service struct {
c *conf.Config
dao *dao.Dao
arcRPC ArcRPC
accRPC AccRPC
artRPC ArtRPC
bangumi Bangumi
missch chan func()
}
//go:generate mockgen -source=./service.go -destination=mock_test.go -package=service
type ArcRPC interface {
Archive3(c context.Context, arg *archive.ArgAid2) (res *api.Arc, err error)
Archives3(c context.Context, arg *archive.ArgAids2) (res map[int64]*api.Arc, err error)
UpsPassed2(c context.Context, arg *archive.ArgUpsArcs2) (res map[int64][]*archive.AidPubTime, err error)
}
type AccRPC interface {
Attentions3(c context.Context, arg *account.ArgMid) (res []int64, err error)
}
type ArtRPC interface {
UpsArtMetas(c context.Context, arg *artmdl.ArgUpsArts) (res map[int64][]*artmdl.Meta, err error)
ArticleMetas(c context.Context, arg *artmdl.ArgAids) (res map[int64]*artmdl.Meta, err error)
}
type Bangumi interface {
BangumiPull(c context.Context, mid int64, ip string) (seasonIDS []int64, err error)
BangumiSeasons(c context.Context, seasonIDs []int64, ip string) (psm map[int64]*feedmdl.Bangumi, err error)
}
// New new a Service and return.
func New(c *conf.Config) (s *Service) {
d := dao.New(c)
s = &Service{
c: c,
dao: d,
bangumi: d,
arcRPC: arcrpc.New2(c.ArchiveRPC),
accRPC: accrpc.New3(c.AccountRPC),
artRPC: artrpc.New(c.ArticleRPC),
missch: make(chan func(), 1000),
}
go s.cacheproc()
return
}
func (s *Service) addCache(fn func()) {
select {
case s.missch <- fn:
default:
dao.PromError("cache队列已满", "cacheproc chan full!!!")
}
}
func (s *Service) cacheproc() {
for i := 0; i < 10; i++ {
go func() {
for {
fn := <-s.missch
fn()
}
}()
}
}
// Ping check server ok
func (s *Service) Ping(c context.Context) (err error) {
return s.dao.Ping(c)
}
// Close dao
func (s *Service) Close() {
s.dao.Close()
}

View File

@ -0,0 +1,106 @@
package service
import (
"context"
"flag"
"path/filepath"
"testing"
account "go-common/app/service/main/account/model"
"go-common/app/service/main/archive/api"
"go-common/app/service/main/archive/model/archive"
"go-common/app/service/main/feed/conf"
"go-common/app/service/main/feed/dao"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/cache/redis"
xtime "go-common/library/time"
"github.com/golang/mock/gomock"
. "github.com/smartystreets/goconvey/convey"
)
var (
_mid = int64(27515256)
_bangumiMid = int64(2)
_blankMid = int64(27515280)
_ip = "127.0.0.1"
_seasonID = int64(1)
_dataAV = int64(5463626)
_arc1 = &api.Arc{Aid: 1, PubDate: xtime.Time(1500262304)}
_arc2 = &api.Arc{Aid: 2, PubDate: xtime.Time(1500242304)}
_arc3 = &api.Arc{Aid: 3, PubDate: xtime.Time(1500222304)}
_arc4 = &api.Arc{Aid: 4, PubDate: xtime.Time(1500202304)}
_arc5 = &api.Arc{Aid: 5, PubDate: xtime.Time(1500162304)}
_arc6 = &api.Arc{Aid: 6, PubDate: xtime.Time(1500142304)}
)
var (
s *Service
)
func WithBlankService(f func(s *Service)) func() {
return func() {
ss := &Service{}
f(ss)
}
}
func CleanCache() {
c := context.TODO()
pool := redis.NewPool(conf.Conf.MultiRedis.Cache)
pool.Get(c).Do("FLUSHDB")
}
func init() {
dir, _ := filepath.Abs("../cmd/convey-test.toml")
flag.Set("conf", dir)
conf.Init()
s = &Service{
c: conf.Conf,
dao: dao.New(conf.Conf),
missch: make(chan func(), 1000),
}
go s.cacheproc()
}
func WithService(t *testing.T, f func(s *Service)) func() {
return func() {
mockCtrl := gomock.NewController(t)
arcMock := NewMockArcRPC(mockCtrl)
s.arcRPC = arcMock
arcMock.EXPECT().Archive3(gomock.Any(), gomock.Any()).Return(&api.Arc{Aid: 100, Author: api.Author{}}, nil).AnyTimes()
arcMock.EXPECT().Archives3(gomock.Any(), gomock.Any()).Return(map[int64]*api.Arc{
1: _arc1,
2: _arc2,
3: _arc3,
4: _arc4,
5: _arc5,
6: _arc6,
}, nil).AnyTimes()
arcMock.EXPECT().UpsPassed2(gomock.Any(), gomock.Any()).Return(map[int64][]*archive.AidPubTime{
1: []*archive.AidPubTime{&archive.AidPubTime{Aid: _arc1.Aid, PubDate: _arc1.PubDate}, &archive.AidPubTime{Aid: _arc2.Aid, PubDate: _arc2.PubDate}, &archive.AidPubTime{Aid: _arc3.Aid, PubDate: _arc3.PubDate}},
2: []*archive.AidPubTime{&archive.AidPubTime{Aid: _arc4.Aid, PubDate: _arc4.PubDate}, &archive.AidPubTime{Aid: _arc5.Aid, PubDate: _arc5.PubDate}, &archive.AidPubTime{Aid: _arc6.Aid, PubDate: _arc6.PubDate}},
}, nil).AnyTimes()
accMock := NewMockAccRPC(mockCtrl)
s.accRPC = accMock
accMock.EXPECT().Attentions3(gomock.Any(), &account.ArgMid{Mid: _mid}).Return([]int64{1, 2, 3}, nil).AnyTimes()
accMock.EXPECT().Attentions3(gomock.Any(), &account.ArgMid{Mid: _bangumiMid}).Return([]int64{1, 2, 3}, nil).AnyTimes()
accMock.EXPECT().Attentions3(gomock.Any(), &account.ArgMid{Mid: _blankMid}).Return([]int64{}, nil).AnyTimes()
banMock := NewMockBangumi(mockCtrl)
s.bangumi = banMock
banMock.EXPECT().BangumiPull(gomock.Any(), gomock.Eq(_bangumiMid), gomock.Any()).Return([]int64{1, 2, 3, 4, 5, 6}, nil).AnyTimes()
banMock.EXPECT().BangumiPull(gomock.Any(), gomock.Any(), gomock.Any()).Return([]int64{}, nil).AnyTimes()
banMock.EXPECT().BangumiSeasons(gomock.Any(), gomock.Any(), gomock.Any()).Return(map[int64]*feedmdl.Bangumi{
1: &feedmdl.Bangumi{Title: "title", SeasonID: 1, Ts: 1500142304},
2: &feedmdl.Bangumi{Title: "title", SeasonID: 2, Ts: 1500142304},
3: &feedmdl.Bangumi{Title: "title", SeasonID: 3, Ts: 1500142304},
4: &feedmdl.Bangumi{Title: "title", SeasonID: 4, Ts: 1500142304},
5: &feedmdl.Bangumi{Title: "title", SeasonID: 5, Ts: 1500142304},
6: &feedmdl.Bangumi{Title: "title", SeasonID: 6, Ts: 1500142304},
}, nil).AnyTimes()
Reset(func() { CleanCache() })
f(s)
mockCtrl.Finish()
}
}

View File

@ -0,0 +1,122 @@
package service
import (
"context"
"go-common/app/service/main/feed/dao"
"go-common/app/service/main/feed/model"
"sync/atomic"
"time"
"go-common/app/service/main/archive/model/archive"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/log"
xtime "go-common/library/time"
"go-common/library/sync/errgroup"
)
// UnreadCount get count of unread archives
func (s *Service) UnreadCount(c context.Context, app bool, withoutBangumi bool, mid int64, ip string) (count int, err error) {
var (
t int64
last time.Time
pullInterval xtime.Duration
marcs map[int64][]*archive.AidPubTime
minTotalCount int
bangumiFeeds []*feedmdl.Feed
unreadCount int64
ft = model.FeedType(app)
)
if t, err = s.dao.LastAccessCache(c, ft, mid); err != nil {
dao.PromError("未读数获取上次访问时间缓存", "s.dao.LastAccessCache(app:%v, mid:%v, err: %v", app, mid, err)
return 0, nil
}
if t == 0 {
return
}
last = time.Unix(t, 0)
pullInterval = s.pullInterval(ft)
if time.Now().Sub(last) < time.Duration(pullInterval) {
count, _ = s.dao.UnreadCountCache(c, ft, mid)
return
}
group, errCtx := errgroup.WithContext(c)
group.Go(func() error {
if app {
minTotalCount = s.c.Feed.AppLength
} else {
minTotalCount = s.c.Feed.WebLength
}
if marcs, err = s.attenUpArcs(errCtx, minTotalCount, mid, ip); err != nil {
dao.PromError("未读数attenUpArcs", "s.attenUpArcs(count:%v, mid:%v, err: %v", minTotalCount, mid, err)
err = nil
return nil
}
for _, arcs := range marcs {
for _, arc := range arcs {
if int64(arc.PubDate) > t {
atomic.AddInt64(&unreadCount, 1)
}
}
}
return nil
})
group.Go(func() error {
if withoutBangumi {
return nil
}
bangumiFeeds = s.genBangumiFeed(errCtx, mid, ip)
for _, f := range bangumiFeeds {
if int64(f.PubDate) > t {
atomic.AddInt64(&unreadCount, 1)
}
}
return nil
})
group.Wait()
count = int(unreadCount)
if count > s.c.Feed.MaxTotalCnt {
count = s.c.Feed.MaxTotalCnt
}
s.addCache(func() {
s.dao.AddUnreadCountCache(context.Background(), ft, mid, count)
})
return
}
// ArticleUnreadCount get count of unread articles
func (s *Service) ArticleUnreadCount(c context.Context, mid int64, ip string) (count int, err error) {
var (
t int64
last time.Time
pullInterval xtime.Duration
ft = model.TypeArt
)
if t, err = s.dao.LastAccessCache(c, ft, mid); err != nil {
log.Error("s.dao.LastAccessCache(app:%v, mid:%v), err: %v", ft, mid, err)
return 0, nil
}
if t == 0 {
// no access, no unread
return
}
last = time.Unix(t, 0)
pullInterval = s.pullInterval(ft)
if time.Now().Sub(last) < time.Duration(pullInterval) {
count, _ = s.dao.UnreadCountCache(c, ft, mid)
return
}
res := s.genArticleFeed(c, mid, s.c.Feed.ArticleFeedLength, ip)
for _, f := range res {
if int64(f.PublishTime) > t {
count++
}
}
if count > s.c.Feed.MaxTotalCnt {
count = s.c.Feed.MaxTotalCnt
}
s.addCache(func() {
s.dao.AddUnreadCountCache(context.Background(), ft, mid, count)
})
return
}

View File

@ -0,0 +1,25 @@
package service
import (
"context"
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func Test_UnreadCount(t *testing.T) {
Convey("app should return without err", t, WithService(t, func(svf *Service) {
_, err := svf.UnreadCount(context.TODO(), true, false, _mid, _ip)
So(err, ShouldBeNil)
}))
Convey("app without bangumi should return without err", t, WithService(t, func(svf *Service) {
_, err := svf.UnreadCount(context.TODO(), true, true, _mid, _ip)
So(err, ShouldBeNil)
}))
Convey("web should return without err", t, WithService(t, func(svf *Service) {
_, err := svf.UnreadCount(context.TODO(), false, false, _mid, _ip)
So(err, ShouldBeNil)
}))
}