Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,70 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"bangumi_test.go",
"dao_test.go",
"memcache_test.go",
"redis_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//app/service/main/feed/conf:go_default_library",
"//app/service/main/feed/model:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"bangumi.go",
"dao.go",
"memcache.go",
"redis.go",
],
importpath = "go-common/app/service/main/feed/dao",
tags = ["automanaged"],
deps = [
"//app/interface/openplatform/article/model:go_default_library",
"//app/service/main/archive/api:go_default_library",
"//app/service/main/archive/model/archive:go_default_library",
"//app/service/main/feed/conf:go_default_library",
"//app/service/main/feed/model:go_default_library",
"//library/cache/memcache:go_default_library",
"//library/cache/redis:go_default_library",
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/stat/prom:go_default_library",
"//library/sync/errgroup:go_default_library",
"//library/time:go_default_library",
"//library/xstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,75 @@
package dao
import (
"context"
"net/url"
"strconv"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/xstr"
)
const (
_bangumiURL = "http://bangumi.bilibili.co"
_pullURL = _bangumiURL + "/internal_api/follow_pull"
_pullSeasonsURL = _bangumiURL + "/internal_api/follow_seasons"
)
// BangumiPull pull bangumi feed.
func (d *Dao) BangumiPull(c context.Context, mid int64, ip string) (seasonIDS []int64, err error) {
params := url.Values{}
params.Set("mid", strconv.FormatInt(mid, 10))
var res struct {
Code int `json:"code"`
Result []*feedmdl.Pull `json:"result"`
}
if err = d.httpClient.Get(c, _pullURL, ip, params, &res); err != nil {
PromWarn("bangumi:Pull接口")
log.Error("d.client.Get(%s) error(%v)", _pullURL+"?"+params.Encode(), err)
return
}
if res.Code != 0 {
PromWarn("bangumi:Pull接口")
log.Error("url(%s) res code(%d) or res.result(%v)", _pullURL+"?"+params.Encode(), res.Code, res.Result)
err = ecode.Int(res.Code)
return
}
for _, r := range res.Result {
seasonIDS = append(seasonIDS, r.SeasonID)
}
return
}
// BangumiSeasons get bangumi info by seasonids.
func (d *Dao) BangumiSeasons(c context.Context, seasonIDs []int64, ip string) (psm map[int64]*feedmdl.Bangumi, err error) {
if len(seasonIDs) == 0 {
return
}
params := url.Values{}
params.Set("season_ids", xstr.JoinInts(seasonIDs))
var res struct {
Code int `json:"code"`
Result []*feedmdl.Bangumi `json:"result"`
}
if err = d.httpClient.Get(c, _pullSeasonsURL, ip, params, &res); err != nil {
PromWarn("bangumi:详情接口")
log.Error("d.client.Get(%s) error(%v)", _pullSeasonsURL+"?"+params.Encode(), err)
return
}
if res.Code != 0 {
PromWarn("bangumi:详情接口")
log.Error("url(%s) res code(%d) or res.result(%v)", _pullSeasonsURL+"?"+params.Encode(), res.Code, res.Result)
err = ecode.Int(res.Code)
return
}
psm = make(map[int64]*feedmdl.Bangumi, len(res.Result))
for _, p := range res.Result {
if p == nil {
continue
}
psm[p.SeasonID] = p
}
return
}

View File

@@ -0,0 +1,38 @@
package dao
import (
"context"
"testing"
"github.com/smartystreets/goconvey/convey"
)
func TestDaoBangumiPull(t *testing.T) {
var (
c = context.Background()
mid = int64(2)
ip = ""
)
convey.Convey("BangumiPull", t, func(ctx convey.C) {
seasonIDS, err := d.BangumiPull(c, mid, ip)
ctx.Convey("Then err should be nil.seasonIDS should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(seasonIDS, convey.ShouldBeNil)
})
})
}
func TestDaoBangumiSeasons(t *testing.T) {
var (
c = context.Background()
seasonIDs = []int64{5735, 5714, 5702, 5725}
ip = ""
)
convey.Convey("BangumiSeasons", t, func(ctx convey.C) {
psm, err := d.BangumiSeasons(c, seasonIDs, ip)
ctx.Convey("Then err should be nil.psm should not be nil.", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(psm, convey.ShouldNotBeNil)
})
})
}

View File

@@ -0,0 +1,108 @@
package dao
import (
"context"
"time"
"go-common/app/service/main/feed/conf"
"go-common/library/cache/memcache"
"go-common/library/cache/redis"
"go-common/library/log"
bm "go-common/library/net/http/blademaster"
"go-common/library/stat/prom"
)
var (
// CachedCount .
CachedCount = prom.CacheHit
// MissedCount .
MissedCount = prom.CacheMiss
infosCount = prom.BusinessInfoCount
warnsCount = prom.BusinessErrCount
)
// PromError stat and log.
func PromError(name string, format string, args ...interface{}) {
prom.BusinessErrCount.Incr(name)
log.Error(format, args...)
}
// PromInfo add prom info
func PromInfo(name string) {
infosCount.Incr(name)
}
// PromWarn add prom warn
func PromWarn(name string) {
warnsCount.Incr(name)
}
// Dao struct info of Dao.
type Dao struct {
// redis
redis *redis.Pool
redisTTLUpper int32
redisExpireUpper int32
redisExpireFeed int32
redisExpireArchiveFeed int32
redisExpireBangumiFeed int32
// memcache
mc *memcache.Pool
mcExpire int32
bangumiExpire int32
// feed Config
appFeedLength int
webFeedLength int
// conf
c *conf.Config
// bangumi http client
httpClient *bm.Client
}
// New new a Dao and return.
func New(c *conf.Config) (d *Dao) {
d = &Dao{
// conf
c: c,
// redis
redis: redis.NewPool(c.MultiRedis.Cache),
redisTTLUpper: int32(time.Duration(c.MultiRedis.TTLUpper) / time.Second),
redisExpireUpper: int32(time.Duration(c.MultiRedis.ExpireUpper) / time.Second),
redisExpireFeed: int32(time.Duration(c.MultiRedis.ExpireFeed) / time.Second),
redisExpireArchiveFeed: int32(time.Duration(c.Feed.ArchiveFeedExpire) / time.Second),
redisExpireBangumiFeed: int32(time.Duration(c.Feed.BangumiFeedExpire) / time.Second),
// mc
mc: memcache.NewPool(c.Memcache.Config),
mcExpire: int32(time.Duration(c.Memcache.Expire) / time.Second),
bangumiExpire: int32(time.Duration(c.Memcache.Expire) / time.Second),
// feed Config
appFeedLength: c.Feed.AppLength,
webFeedLength: c.Feed.WebLength,
httpClient: bm.NewClient(c.HTTPClient),
}
if d.appFeedLength == 0 {
d.appFeedLength = 200
}
if d.webFeedLength == 0 {
d.webFeedLength = 400
}
return
}
// Ping ping health of redis and mc.
func (d *Dao) Ping(c context.Context) (err error) {
if err = d.pingRedis(c); err != nil {
return
}
return d.pingMC(c)
}
// Close close connections of redis and mc.
func (d *Dao) Close() {
if d.redis != nil {
d.redis.Close()
}
if d.mc != nil {
d.mc.Close()
}
}

View File

@@ -0,0 +1,35 @@
package dao
import (
"flag"
"go-common/app/service/main/feed/conf"
"os"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.web-svr.feed-service")
flag.Set("conf_token", "a7fb455a62b8ba2cd751211b6781ef90")
flag.Set("tree_id", "23908")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/feed-service-test.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
m.Run()
os.Exit(0)
}

View File

@@ -0,0 +1,200 @@
package dao
import (
"context"
"strconv"
"sync"
"go-common/app/service/main/archive/api"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/cache/memcache"
"go-common/library/sync/errgroup"
)
const (
_prefixArc = "ap_"
_prefixBangumi = "bp_"
_bulkSize = 100
)
func arcKey(aid int64) string {
return _prefixArc + strconv.FormatInt(aid, 10)
}
func bangumiKey(bid int64) string {
return _prefixBangumi + strconv.FormatInt(bid, 10)
}
// pingMc ping memcache
func (d *Dao) pingMC(c context.Context) (err error) {
conn := d.mc.Get(c)
item := memcache.Item{Key: "ping", Value: []byte{1}, Expiration: d.mcExpire}
err = conn.Set(&item)
conn.Close()
return
}
// AddArchivesCache batch set archives cache.
func (d *Dao) AddArchivesCache(c context.Context, vs ...*api.Arc) (err error) {
conn := d.mc.Get(c)
defer conn.Close()
for _, v := range vs {
if v == nil {
continue
}
item := memcache.Item{Key: arcKey(v.Aid), Object: v, Flags: memcache.FlagProtobuf, Expiration: d.mcExpire}
if err = conn.Set(&item); err != nil {
PromError("mc:增加稿件缓存", "conn.Store(%s) error(%v)", arcKey(v.Aid), err)
return
}
}
return
}
// AddArchivesCacheMap batch set archives cache.
func (d *Dao) AddArchivesCacheMap(c context.Context, arcm map[int64]*api.Arc) (err error) {
var arcs []*api.Arc
for _, arc := range arcm {
arcs = append(arcs, arc)
}
return d.AddArchivesCache(c, arcs...)
}
// ArchivesCache batch get archive from cache.
func (d *Dao) ArchivesCache(c context.Context, aids []int64) (cached map[int64]*api.Arc, missed []int64, err error) {
if len(aids) == 0 {
return
}
cached = make(map[int64]*api.Arc, len(aids))
allKeys := make([]string, 0, len(aids))
aidmap := make(map[string]int64, len(aids))
for _, aid := range aids {
k := arcKey(aid)
allKeys = append(allKeys, k)
aidmap[k] = aid
}
group, errCtx := errgroup.WithContext(c)
mutex := sync.Mutex{}
keysLen := len(allKeys)
for i := 0; i < keysLen; i += _bulkSize {
var keys []string
if (i + _bulkSize) > keysLen {
keys = allKeys[i:]
} else {
keys = allKeys[i : i+_bulkSize]
}
group.Go(func() (err error) {
conn := d.mc.Get(errCtx)
replys, err := conn.GetMulti(keys)
defer conn.Close()
if err != nil {
PromError("mc:获取稿件缓存", "conn.Gets(%v) error(%v)", keys, err)
err = nil
return
}
for _, reply := range replys {
arc := &api.Arc{}
if err = conn.Scan(reply, arc); err != nil {
PromError("获取稿件缓存json解析", "json.Unmarshal(%v) error(%v)", reply.Value, err)
err = nil
continue
}
mutex.Lock()
cached[aidmap[reply.Key]] = arc
delete(aidmap, reply.Key)
mutex.Unlock()
}
return
})
}
group.Wait()
missed = make([]int64, 0, len(aidmap))
for _, aid := range aidmap {
missed = append(missed, aid)
}
MissedCount.Add("archive", int64(len(missed)))
CachedCount.Add("archive", int64(len(cached)))
return
}
// DelArchiveCache delete archive cache.
func (d *Dao) DelArchiveCache(c context.Context, aid int64) (err error) {
conn := d.mc.Get(c)
defer conn.Close()
if err = conn.Delete(arcKey(aid)); err != nil {
if err == memcache.ErrNotFound {
err = nil
} else {
PromError("mc:删除稿件缓存", "conn.Delete(%s) error(%v)", arcKey(aid), err)
return
}
}
return
}
// AddBangumisCacheMap batch set bangumis cache.
func (d *Dao) AddBangumisCacheMap(c context.Context, bm map[int64]*feedmdl.Bangumi) (err error) {
var bs []*feedmdl.Bangumi
for _, b := range bm {
bs = append(bs, b)
}
return d.AddBangumisCache(c, bs...)
}
// AddBangumisCache add batch set bangumi cache.
func (d *Dao) AddBangumisCache(c context.Context, bs ...*feedmdl.Bangumi) (err error) {
conn := d.mc.Get(c)
defer conn.Close()
for _, b := range bs {
if b == nil {
continue
}
item := memcache.Item{Key: bangumiKey(b.SeasonID), Object: b, Flags: memcache.FlagProtobuf, Expiration: d.bangumiExpire}
if err = conn.Set(&item); err != nil {
PromError("mc:增加番剧缓存", "conn.Store(%s) error(%v)", bangumiKey(b.SeasonID), err)
return
}
}
return
}
// BangumisCache batch get archive from cache.
func (d *Dao) BangumisCache(c context.Context, bids []int64) (cached map[int64]*feedmdl.Bangumi, missed []int64, err error) {
cached = make(map[int64]*feedmdl.Bangumi, len(bids))
if len(bids) == 0 {
return
}
keys := make([]string, 0, len(bids))
bidmap := make(map[string]int64, len(bids))
for _, bid := range bids {
k := bangumiKey(bid)
keys = append(keys, k)
bidmap[k] = bid
}
conn := d.mc.Get(c)
defer conn.Close()
replys, err := conn.GetMulti(keys)
if err != nil {
PromError("mc:获取番剧", "conn.Gets(%v) error(%v)", keys, err)
return
}
for _, reply := range replys {
b := &feedmdl.Bangumi{}
if err = conn.Scan(reply, b); err != nil {
PromError("获取番剧json解析", "json.Unmarshal(%v) error(%v)", reply.Value, err)
return
}
cached[bidmap[reply.Key]] = b
delete(bidmap, reply.Key)
}
missed = make([]int64, 0, len(bidmap))
for _, bid := range bidmap {
missed = append(missed, bid)
}
MissedCount.Add("bangumi", int64(len(missed)))
CachedCount.Add("bangumi", int64(len(cached)))
return
}

View File

@@ -0,0 +1,59 @@
package dao
import (
"context"
"testing"
"go-common/app/service/main/archive/api"
feed "go-common/app/service/main/feed/model"
xtime "go-common/library/time"
. "github.com/smartystreets/goconvey/convey"
)
func Test_ArchivesCache(t *testing.T) {
arc := api.Arc{Aid: 1, PubDate: xtime.Time(100), Title: "title"}
c := context.TODO()
Convey("add cache", t, func() {
err := d.AddArchivesCacheMap(c, map[int64]*api.Arc{1: &arc})
So(err, ShouldBeNil)
Convey("get cache return cached data", func() {
cached, missed, err := d.ArchivesCache(c, []int64{1})
So(err, ShouldBeNil)
So(missed, ShouldBeEmpty)
So(cached, ShouldResemble, map[int64]*api.Arc{1: &arc})
})
Convey("del cache return null", func() {
err := d.DelArchiveCache(c, 1)
So(err, ShouldBeNil)
cached, missed, err := d.ArchivesCache(c, []int64{1})
So(err, ShouldBeNil)
So(cached, ShouldBeEmpty)
So(missed, ShouldResemble, []int64{1})
})
})
}
func Test_BangumiCache(t *testing.T) {
bangumi := feed.Bangumi{SeasonID: 1, Title: "t"}
c := context.TODO()
Convey("add cache", t, func() {
err := d.AddBangumisCacheMap(c, map[int64]*feed.Bangumi{1: &bangumi})
So(err, ShouldBeNil)
Convey("get cache return cached data", func() {
cached, missed, err := d.BangumisCache(c, []int64{1})
So(err, ShouldBeNil)
So(missed, ShouldBeEmpty)
So(cached, ShouldResemble, map[int64]*feed.Bangumi{1: &bangumi})
})
Convey("return missed", func() {
miss := int64(2000)
cached, missed, err := d.BangumisCache(c, []int64{miss})
So(err, ShouldBeNil)
So(cached, ShouldBeEmpty)
So(missed, ShouldResemble, []int64{miss})
})
})
}

View File

@@ -0,0 +1,647 @@
package dao
import (
"context"
"fmt"
"strconv"
artmdl "go-common/app/interface/openplatform/article/model"
"go-common/app/service/main/archive/api"
"go-common/app/service/main/archive/model/archive"
"go-common/app/service/main/feed/model"
feedmdl "go-common/app/service/main/feed/model"
"go-common/library/cache/redis"
"go-common/library/log"
"go-common/library/time"
"go-common/library/xstr"
)
const (
_prefixUpper = "ua_" // upper's archive list
_prefixAppFeed = "af_" // user's app feed list
_prefixWebFeed = "wf_" // user's web feed list
_prefixAppLast = "al_" // user's last access
_prefixWebLast = "wl_" // user's last access
_prefixArtLast = "tl_" // user's last access of article
_prefixBangumiFeed = "banf_" // user's bangumi feed list
_prefixArchiveFeed = "arcf_" // user's archive feed list
_prefixArticleFeed = "artf_" // user's article feed list
_prefixAppUnreadCount = "ac_" // user's app unread count
_prefixWebUnreadCount = "wc_" // user's web unread count
_prefixArtUnreadCount = "tc_" // user's article unread count
)
func upperKey(mid int64) string {
return _prefixUpper + strconv.FormatInt(mid, 10)
}
func bangumiFeedKey(mid int64) string {
return _prefixBangumiFeed + strconv.FormatInt(mid, 10)
}
func archiveFeedKey(mid int64) string {
return _prefixArchiveFeed + strconv.FormatInt(mid, 10)
}
func from(i int64) (time.Time, int8) {
return time.Time((i >> 8)), int8(int64(i) & 0xff)
}
func combine(t time.Time, copyright int8) int64 {
return int64(t)<<8 | int64(copyright)
}
func feedKey(ft int, mid int64) string {
midStr := strconv.FormatInt(mid, 10)
if ft == model.TypeApp {
return _prefixAppFeed + midStr
} else if ft == model.TypeWeb {
return _prefixWebFeed + midStr
} else {
return _prefixArticleFeed + midStr
}
}
func unreadCountKey(ft int, mid int64) string {
midStr := strconv.FormatInt(mid%100000, 10)
if ft == model.TypeApp {
return _prefixAppUnreadCount + midStr
} else if ft == model.TypeWeb {
return _prefixWebUnreadCount + midStr
} else {
return _prefixArtUnreadCount + midStr
}
}
func lastKey(ft int, mid int64) string {
midStr := strconv.FormatInt(mid%100000, 10)
if ft == model.TypeApp {
return _prefixAppLast + midStr
} else if ft == model.TypeWeb {
return _prefixWebLast + midStr
} else {
return _prefixArtLast + midStr
}
}
// appFeedValue convert Feed to string, format: "type,id,fold,fold,fold..."
func appFeedValue(f *feedmdl.Feed) string {
ids := []int64{f.Type, f.ID}
for _, arc := range f.Fold {
ids = append(ids, arc.Aid)
}
return xstr.JoinInts(ids)
}
func recoverFeed(idsStr string) (fe *feedmdl.Feed, err error) {
var (
aid int64
ids []int64
)
if ids, err = xstr.SplitInts(idsStr); err != nil {
return
}
if len(ids) < 2 {
err = fmt.Errorf("recoverFeed failed idsStr(%v)", idsStr)
return
}
fe = &feedmdl.Feed{Type: ids[0], ID: ids[1]}
for _, aid = range ids[2:] {
fe.Fold = append(fe.Fold, &api.Arc{Aid: aid})
}
return
}
// pingRedis ping redis.
func (d *Dao) pingRedis(c context.Context) (err error) {
conn := d.redis.Get(c)
if _, err = conn.Do("SET", "PING", "PONG"); err != nil {
PromError("redis: ping remote", "remote redis: conn.Do(SET,PING,PONG) error(%v)", err)
}
conn.Close()
return
}
// LastAccessCache get last access time of user.
func (d *Dao) LastAccessCache(c context.Context, ft int, mid int64) (t int64, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := lastKey(ft, mid)
if t, err = redis.Int64(conn.Do("HGET", key, mid)); err != nil {
if err == redis.ErrNil {
err = nil
} else {
PromError("redis:获取上次访问时间", "conn.Do(HGET, %s, %s) error(%v)", key, mid, err)
}
}
return
}
// AddLastAccessCache add user's last access time.
func (d *Dao) AddLastAccessCache(c context.Context, ft int, mid int64, t int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := lastKey(ft, mid)
if _, err = conn.Do("HSET", key, mid, t); err != nil {
PromError("redis:增加上次访问时间", "conn.DO(HSET, %s, %d, %d) error(%v)", key, mid, t, err)
}
return
}
// ExpireFeedCache expire the user feed key.
func (d *Dao) ExpireFeedCache(c context.Context, ft int, mid int64) (ok bool, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(ft, mid)
if ok, err = redis.Bool(conn.Do("EXPIRE", key, d.redisExpireFeed)); err != nil {
PromError("redis:Feed缓存设定过期", "conn.Do(EXPIRE, %s, %d) error(%v)", key, d.redisExpireFeed, err)
}
return
}
// PurgeFeedCache purge the user feed key.
func (d *Dao) PurgeFeedCache(c context.Context, ft int, mid int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(ft, mid)
if _, err = redis.Bool(conn.Do("DEL", key)); err != nil {
PromError("redis:删除feed", "conn.Do(DEL, %s, %d) error(%v)", key, err)
}
return
}
// FeedCache get upper feed by cache.
func (d *Dao) FeedCache(c context.Context, ft int, mid int64, start, end int) (as []*feedmdl.Feed, bids []int64, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(ft, mid)
vs, err := redis.Values(conn.Do("ZREVRANGE", key, start, end, "WITHSCORES"))
if err != nil {
PromError("redis:获取feed", "conn.Do(ZREVRANGE,%s,%d,%d) error(%v)", key, start, end, err)
return
}
as = make([]*feedmdl.Feed, 0, len(vs))
for len(vs) > 0 {
var (
ts int64
idsStr string
fe *feedmdl.Feed
)
if vs, err = redis.Scan(vs, &idsStr, &ts); err != nil {
PromError("redis:获取feed", "redis.Scan(%v) error(%v)", vs, err)
return
}
if idsStr != "" {
fe, err = recoverFeed(idsStr)
fe.PubDate = time.Time(ts)
if err != nil {
PromError("恢复feed", "redis.recoverFeed(%v) error(%v)", idsStr, err)
err = nil
continue
}
as = append(as, fe)
switch fe.Type {
case feedmdl.BangumiType:
bids = append(bids, fe.ID)
}
}
}
return
}
// AddFeedCache add upper feed cache.
func (d *Dao) AddFeedCache(c context.Context, ft int, mid int64, as []*feedmdl.Feed) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(ft, mid)
if err = conn.Send("DEL", key); err != nil {
PromError("redis:删除feed缓存", "conn.Send(DEL, %s) error(%v)", key, err)
return
}
commondLen := 1
if len(as) > 0 {
var feedLen int
if ft == model.TypeApp {
feedLen = d.appFeedLength
} else {
feedLen = d.webFeedLength
}
if len(as) > feedLen {
as = as[:feedLen]
}
commonds := []interface{}{key}
for _, appFeed := range as {
ts := appFeed.PubDate.Time().Unix()
feedValue := appFeedValue(appFeed)
commonds = append(commonds, ts, feedValue)
}
if err = conn.Send("ZADD", commonds...); err != nil {
PromError("redis:增加feed缓存", "conn.Send(ZADD, %v, %v) error(%v)", key, commonds, err)
return
}
commondLen++
if err = conn.Send("EXPIRE", key, d.redisExpireFeed); err != nil {
PromError("redis:expire-feed缓存", "conn.Send(expire, %s, %v) error(%v)", key, d.redisExpireFeed, err)
return
}
commondLen++
}
if err = conn.Flush(); err != nil {
PromError("redis:feed缓存flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < commondLen; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:feed缓存receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// ExpireUppersCache expire the upper key.
func (d *Dao) ExpireUppersCache(c context.Context, mids []int64) (res map[int64]bool, err error) {
conn := d.redis.Get(c)
defer conn.Close()
res = make(map[int64]bool, len(mids))
for _, mid := range mids {
if err = conn.Send("TTL", upperKey(mid)); err != nil {
PromError("redis:up主ttl", "conn.Send(TTL, %s) error(%v)", upperKey(mid), err)
return
}
}
if err = conn.Flush(); err != nil {
PromError("redis:up主flush", "conn.Flush error(%v)", err)
return
}
var state int64
for _, mid := range mids {
if state, err = redis.Int64(conn.Receive()); err != nil {
PromError("redis:up主receive", "conn.Receive() error(%v)", err)
return
}
if int32(state) > (d.redisTTLUpper - d.redisExpireUpper) {
res[mid] = true
} else {
res[mid] = false
}
}
return
}
// UppersCaches batch get new archives of uppers by cache.
func (d *Dao) UppersCaches(c context.Context, mids []int64, start, end int) (res map[int64][]*archive.AidPubTime, err error) {
conn := d.redis.Get(c)
defer conn.Close()
res = make(map[int64][]*archive.AidPubTime, len(mids))
for _, mid := range mids {
if err = conn.Send("ZREVRANGE", upperKey(mid), start, end, "withscores"); err != nil {
PromError("redis:获取up主", "conn.Send(%s) error(%v)", upperKey(mid), err)
return
}
}
if err = conn.Flush(); err != nil {
PromError("redis:获取up主flush", "conn.Flush error(%v)", err)
return
}
for _, mid := range mids {
values, err := redis.Values(conn.Receive())
if err != nil {
PromError("redis:获取up主receive", "conn.Send(ZREVRANGE, %d) error(%v)", mid, err)
err = nil
continue
}
for len(values) > 0 {
arc := archive.AidPubTime{}
var score int64
if values, err = redis.Scan(values, &arc.Aid, &score); err != nil {
PromError("redis:scan UP主", "redis.Scan() error(%v)", err)
err = nil
continue
}
arc.PubDate, arc.Copyright = from(score)
res[mid] = append(res[mid], &arc)
}
}
CachedCount.Add("up", int64(len(res)))
return
}
// AddUpperCaches batch add passed archive of upper.
// set max num of upper's passed list.
func (d *Dao) AddUpperCaches(c context.Context, mArcs map[int64][]*archive.AidPubTime) (err error) {
var (
mid int64
arcs []*archive.AidPubTime
conn = d.redis.Get(c)
count int
)
defer conn.Close()
if len(mArcs) == 0 {
return
}
for mid, arcs = range mArcs {
if len(arcs) == 0 {
continue
}
key := upperKey(mid)
if err = conn.Send("DEL", key); err != nil {
PromError("redis:删除up主缓存", "conn.Send(DEL, %s) error(%v)", key, err)
return
}
count++
for _, arc := range arcs {
score := combine(arc.PubDate, arc.Copyright)
if err = conn.Send("ZADD", key, "CH", score, arc.Aid); err != nil {
PromError("redis:增加up主缓存", "conn.Send(ZADD, %s, %d, %d) error(%v)", key, arc.Aid, err)
return
}
count++
}
if err = conn.Send("ZREMRANGEBYRANK", key, 0, -(d.c.MultiRedis.MaxArcsNum + 1)); err != nil {
PromError("redis:清理up主缓存", "conn.Send(ZREMRANGEBYRANK, %s) error(%v)", key, err)
return
}
count++
if err = conn.Send("EXPIRE", key, d.redisTTLUpper); err != nil {
PromError("redis:expireup主缓存", "conn.Send(EXPIRE, %s, %v) error(%v)", key, d.redisTTLUpper, err)
return
}
count++
}
if err = conn.Flush(); err != nil {
PromError("redis:增加up主flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < count; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:增加up主receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// AddUpperCache .
func (d *Dao) AddUpperCache(c context.Context, mid int64, arc *archive.AidPubTime) (err error) {
var conn = d.redis.Get(c)
defer conn.Close()
key := upperKey(mid)
score := combine(arc.PubDate, arc.Copyright)
if err = conn.Send("ZADD", key, "CH", score, arc.Aid); err != nil {
PromError("redis:增加up主缓存", "conn.Send(ZADD, %s, %d, %d) error(%v)", key, arc.Aid, err)
return
}
if err = conn.Send("ZREMRANGEBYRANK", key, 0, -(d.c.MultiRedis.MaxArcsNum + 1)); err != nil {
PromError("redis:清理up主缓存", "conn.Send(ZREMRANGEBYRANK, %s) error(%v)", key, err)
return
}
if err = conn.Flush(); err != nil {
PromError("redis:增加up主flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:增加up主receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// DelUpperCache delete archive of upper cache.
func (d *Dao) DelUpperCache(c context.Context, mid int64, aid int64) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
if _, err = conn.Do("ZREM", upperKey(mid), aid); err != nil {
PromError("redis:删除up主", "conn.Do(ZERM, %s, %d) error(%v)", upperKey(mid), aid, err)
}
return
}
// AddArchiveFeedCache add archive feed cache.
func (d *Dao) AddArchiveFeedCache(c context.Context, mid int64, as []*feedmdl.Feed) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
if len(as) == 0 {
return
}
if len(as) > d.c.Feed.ArchiveFeedLength {
as = as[:d.c.Feed.ArchiveFeedLength]
}
key := archiveFeedKey(mid)
commonds := []interface{}{key}
for _, f := range as {
ts := f.PubDate.Time().Unix()
value := appFeedValue(f)
commonds = append(commonds, ts, value)
}
if err = conn.Send("ZADD", commonds...); err != nil {
PromError("redis:增加archive-feed缓存", "conn.Send(ZADD, %v, %v) error(%v)", key, commonds, err)
return
}
if err = conn.Send("EXPIRE", key, d.redisExpireArchiveFeed); err != nil {
PromError("redis:expire-archive-feed缓存", "conn.Send(expire, %s, %v) error(%v)", key, d.redisExpireArchiveFeed, err)
return
}
if err = conn.Flush(); err != nil {
PromError("redis:archive-feed-flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:archive-feed-receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// AddBangumiFeedCache add bangumi feed cache.
func (d *Dao) AddBangumiFeedCache(c context.Context, mid int64, as []*feedmdl.Feed) (err error) {
if len(as) == 0 {
return
}
conn := d.redis.Get(c)
defer conn.Close()
key := bangumiFeedKey(mid)
commonds := []interface{}{key}
for _, f := range as {
ts := f.PubDate.Time().Unix()
value := appFeedValue(f)
commonds = append(commonds, ts, value)
}
if err = conn.Send("ZADD", commonds...); err != nil {
PromError("redis:增加bangumi-feed缓存", "conn.Send(ZADD, %v, %v) error(%v)", key, commonds, err)
return
}
if err = conn.Send("EXPIRE", key, d.redisExpireBangumiFeed); err != nil {
PromError("redis:expire-bangumi-feed", "conn.Send(expire, %s, %v) error(%v)", key, d.redisExpireBangumiFeed, err)
return
}
if err = conn.Flush(); err != nil {
PromError("redis:bangumi-feed-flush", "conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
PromError("redis:bangumi-feed-receive", "conn.Receive error(%v)", err)
return
}
}
return
}
// ArchiveFeedCache get archive feed by cache.
func (d *Dao) ArchiveFeedCache(c context.Context, mid int64, start, end int) (as []*feedmdl.Feed, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := archiveFeedKey(mid)
vs, err := redis.Values(conn.Do("ZREVRANGE", key, start, end, "WITHSCORES"))
if err != nil {
PromError("redis:获取archive-feed", "conn.Do(ZREVRANGE,%s,%d,%d) error(%v)", key, start, end, err)
return
}
for len(vs) > 0 {
var (
ts int64
idsStr string
fe *feedmdl.Feed
)
if vs, err = redis.Scan(vs, &idsStr, &ts); err != nil {
PromError("redis:获取archive-feed", "redis.Scan(%v) error(%v)", vs, err)
return
}
if idsStr != "" {
fe, err = recoverFeed(idsStr)
fe.PubDate = time.Time(ts)
if err != nil {
PromError("恢复archive-feed", "redis.recoverFeed(%v) error(%v)", idsStr, err)
err = nil
continue
}
as = append(as, fe)
}
}
return
}
// BangumiFeedCache get bangumi feed by cache.
func (d *Dao) BangumiFeedCache(c context.Context, mid int64, start, end int) (bids []int64, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := bangumiFeedKey(mid)
vs, err := redis.Values(conn.Do("ZREVRANGE", key, start, end, "WITHSCORES"))
if err != nil {
PromError("redis:获取feed", "conn.Do(ZREVRANGE,%s,%d,%d) error(%v)", key, start, end, err)
return
}
bids = make([]int64, 0, len(vs))
for len(vs) > 0 {
var (
ts int64
idsStr string
fe *feedmdl.Feed
)
if vs, err = redis.Scan(vs, &idsStr, &ts); err != nil {
PromError("redis:获取bangumi-feed", "redis.Scan(%v) error(%v)", vs, err)
return
}
if idsStr != "" {
fe, err = recoverFeed(idsStr)
if err != nil {
PromError("恢复bangumi-feed", "redis.recoverFeed(%v) error(%v)", idsStr, err)
err = nil
continue
}
fe.PubDate = time.Time(ts)
bids = append(bids, fe.ID)
}
}
return
}
// ArticleFeedCache get article feed by cache.
func (d *Dao) ArticleFeedCache(c context.Context, mid int64, start, end int) (aids []int64, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := feedKey(model.TypeArt, mid)
vs, err := redis.Values(conn.Do("ZREVRANGE", key, start, end))
if err != nil {
log.Error("ArticleFeedCache conn.Do(ZREVRANGE,%s,%d,%d) error(%v)", key, start, end, err)
return
}
for len(vs) > 0 {
var aid int64
if vs, err = redis.Scan(vs, &aid); err != nil {
log.Error("ArticleFeedCache redis.Scan(%v) error(%v)", vs, err)
return
}
aids = append(aids, aid)
}
return
}
// AddArticleFeedCache add article feed cache.
func (d *Dao) AddArticleFeedCache(c context.Context, mid int64, as []*artmdl.Meta) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
if len(as) == 0 {
return
}
if len(as) > d.c.Feed.ArticleFeedLength {
as = as[:d.c.Feed.ArticleFeedLength]
}
key := feedKey(model.TypeArt, mid)
commonds := []interface{}{key}
for _, a := range as {
ts := a.PublishTime.Time().Unix()
commonds = append(commonds, ts, a.ID)
}
if err = conn.Send("ZADD", commonds...); err != nil {
log.Error("AddArticleFeedCache conn.Send(ZADD, %v, %v) error(%v)", key, commonds, err)
return
}
if err = conn.Send("EXPIRE", key, d.redisExpireArchiveFeed); err != nil {
log.Error("AddArticleFeedCache conn.Send(expire, %s, %v) error(%v)", key, d.redisExpireArchiveFeed, err)
return
}
if err = conn.Flush(); err != nil {
log.Error("AddArticleFeedCache conn.Flush error(%v)", err)
return
}
for i := 0; i < 2; i++ {
if _, err = conn.Receive(); err != nil {
log.Error("AddArticleFeedCache conn.Receive error(%v)", err)
return
}
}
return
}
// UnreadCountCache get unread count cache of user.
func (d *Dao) UnreadCountCache(c context.Context, ft int, mid int64) (count int, err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := unreadCountKey(ft, mid)
if count, err = redis.Int(conn.Do("HGET", key, mid)); err != nil {
if err == redis.ErrNil {
err = nil
} else {
PromError("redis:获取未读数", "conn.Do(HGET, %s, %v) error(%v)", key, mid, err)
}
}
return
}
// AddUnreadCountCache add user's unread count cache.
func (d *Dao) AddUnreadCountCache(c context.Context, ft int, mid int64, count int) (err error) {
conn := d.redis.Get(c)
defer conn.Close()
key := unreadCountKey(ft, mid)
if _, err = conn.Do("HSET", key, mid, count); err != nil {
PromError("redis:增加未读数", "conn.DO(HSET, %s, %d, %d) error(%v)", key, mid, count, err)
}
return
}

View File

@@ -0,0 +1,249 @@
package dao
import (
"context"
"testing"
"time"
"go-common/app/service/main/archive/api"
"go-common/app/service/main/archive/model/archive"
"go-common/app/service/main/feed/model"
feed "go-common/app/service/main/feed/model"
xtime "go-common/library/time"
. "github.com/smartystreets/goconvey/convey"
)
func Test_FeedValue(t *testing.T) {
var (
arc = api.Arc{Aid: 1}
arc2 = api.Arc{Aid: 2}
f feed.Feed
)
Convey("with fold avs", t, func() {
f = feed.Feed{ID: 100, Fold: []*api.Arc{&arc, &arc2}}
So(appFeedValue(&f), ShouldEqual, "0,100,1,2")
})
Convey("without fold avs", t, func() {
f = feed.Feed{ID: 1}
So(appFeedValue(&f), ShouldEqual, "0,1")
})
Convey("bangumi", t, func() {
f = feed.Feed{ID: 100, Type: feed.BangumiType}
So(appFeedValue(&f), ShouldEqual, "1,100")
})
}
func Test_RecoverFeed(t *testing.T) {
var (
arc = api.Arc{Aid: 1}
arc2 = api.Arc{Aid: 2}
b = feed.Feed{ID: 100, Type: feed.BangumiType}
f feed.Feed
)
Convey("bangumi", t, func() {
r, err := recoverFeed("1,100")
So(r, ShouldResemble, &b)
So(err, ShouldBeNil)
})
Convey("with fold avs", t, func() {
f = feed.Feed{ID: 100, Fold: []*api.Arc{&arc, &arc2}}
r, err := recoverFeed("0,100,1,2")
So(r, ShouldResemble, &f)
So(err, ShouldBeNil)
})
Convey("without fold avs", t, func() {
f = feed.Feed{ID: 100}
r, err := recoverFeed("0,100")
So(r, ShouldResemble, &f)
So(err, ShouldBeNil)
})
}
func Test_pingRedis(t *testing.T) {
Convey("ping redis", t, func() {
So(d.pingRedis(context.TODO()), ShouldBeNil)
})
}
func Test_LastAccessCache(t *testing.T) {
var (
mid = int64(1)
ts = int64(100)
err error
)
Convey("add cache", t, func() {
err = d.AddLastAccessCache(context.TODO(), model.TypeApp, mid, ts)
So(err, ShouldBeNil)
Convey("get cache", func() {
t1, err := d.LastAccessCache(context.TODO(), model.TypeApp, mid)
So(t1, ShouldEqual, t1)
So(err, ShouldBeNil)
})
})
}
func Test_FeedCache(t *testing.T) {
var (
mid = int64(1)
now = time.Now().Unix()
err error
a1 = api.Arc{Aid: 1, PubDate: xtime.Time(now)}
a2 = api.Arc{Aid: 2, PubDate: xtime.Time(now - 1000)}
a3 = api.Arc{Aid: 3}
bangumi = feed.Bangumi{SeasonID: 100}
f = feed.Feed{ID: 1, Archive: &a1, PubDate: a1.PubDate, Fold: []*api.Arc{&a3}}
f1 = feed.Feed{ID: 2, Archive: &a2, PubDate: a2.PubDate}
b = feed.Feed{ID: 100, Type: feed.BangumiType, Bangumi: &bangumi}
feeds = []*feed.Feed{&f, &f1, &b}
)
Convey("add cache", t, func() {
for name, client := range map[string]int{"app": model.TypeApp, "web": model.TypeWeb} {
err = d.AddFeedCache(context.TODO(), client, mid, feeds)
So(err, ShouldBeNil)
Convey(name+"get cache", func() {
res, bids, err := d.FeedCache(context.TODO(), client, mid, 0, 0)
So(res, ShouldResemble, []*feed.Feed{{ID: f.ID, Fold: []*api.Arc{&a3}, PubDate: f.PubDate}})
So(bids, ShouldBeEmpty)
So(err, ShouldBeNil)
})
Convey(name+"get cache when end > length", func() {
res, bids, err := d.FeedCache(context.TODO(), client, mid, 0, 10)
So(res, ShouldResemble, []*feed.Feed{
{ID: a1.Aid, Fold: []*api.Arc{&a3}, PubDate: a1.PubDate},
{ID: a2.Aid, PubDate: a2.PubDate},
{ID: 100, Type: feed.BangumiType},
})
So(bids, ShouldResemble, []int64{100})
So(err, ShouldBeNil)
})
Convey(name+"expire cache", func() {
ok, err := d.ExpireFeedCache(context.TODO(), client, mid)
So(ok, ShouldEqual, true)
So(err, ShouldBeNil)
})
Convey(name+"purge cache", func() {
err := d.PurgeFeedCache(context.TODO(), client, mid)
So(err, ShouldBeNil)
})
}
})
}
func Test_UppersCache(t *testing.T) {
var (
mid = int64(1)
mid2 = int64(2)
now = time.Now().Unix()
err error
a1 = archive.AidPubTime{Aid: 1, PubDate: xtime.Time(now), Copyright: 1}
a2 = archive.AidPubTime{Aid: 2, PubDate: xtime.Time(now - 1), Copyright: 0}
a3 = archive.AidPubTime{Aid: 3, PubDate: xtime.Time(now - 2), Copyright: 0}
)
Convey("add cache", t, func() {
err = d.AddUpperCaches(context.TODO(), map[int64][]*archive.AidPubTime{mid: {&a1, &a2}, mid2: {&a3}})
So(err, ShouldBeNil)
Convey("get cache", func() {
_, err := d.UppersCaches(context.TODO(), []int64{mid, mid2}, 0, 2)
So(err, ShouldBeNil)
// So(res, ShouldResemble, map[int64][]*archive.AidPubTime{mid: {&a1, &a2}, mid2: {&a3}})
})
Convey("expire cache", func() {
res, err := d.ExpireUppersCache(context.TODO(), []int64{mid})
So(err, ShouldBeNil)
So(res, ShouldResemble, map[int64]bool{mid: true})
})
Convey("get expired cache", func() {
d.redisExpireUpper = 0
res, err := d.ExpireUppersCache(context.TODO(), []int64{mid})
So(err, ShouldBeNil)
So(res, ShouldResemble, map[int64]bool{mid: false})
_, err = d.UppersCaches(context.TODO(), []int64{mid}, 0, 2)
So(err, ShouldBeNil)
// So(nres, ShouldResemble, map[int64][]*archive.AidPubTime{mid: {&a1, &a2}})
})
Convey("purge cache", func() {
err := d.DelUpperCache(context.TODO(), mid, a1.Aid)
So(err, ShouldBeNil)
})
})
}
func Test_ArchiveFeedCache(t *testing.T) {
var (
mid = int64(1)
now = time.Now().Unix()
err error
a1 = api.Arc{Aid: 1, PubDate: xtime.Time(now), Author: api.Author{Mid: mid}}
a2 = api.Arc{Aid: 2, PubDate: xtime.Time(now - 1), Author: api.Author{Mid: mid}}
a3 = api.Arc{Aid: 3, PubDate: xtime.Time(now - 2), Author: api.Author{Mid: mid}}
f1 = feed.Feed{ID: a1.Aid, Archive: &a1, PubDate: a1.PubDate, Fold: []*api.Arc{&a3}}
f2 = feed.Feed{ID: a2.Aid, Archive: &a2, PubDate: a2.PubDate}
fs = []*feed.Feed{&f1, &f2}
)
Convey("add cache", t, func() {
err = d.AddArchiveFeedCache(context.TODO(), mid, fs)
So(err, ShouldBeNil)
Convey("get cache", func() {
as, err := d.ArchiveFeedCache(context.TODO(), mid, 0, 2)
So(as, ShouldResemble, []*feed.Feed{
{ID: a1.Aid, PubDate: a1.PubDate, Fold: []*api.Arc{{Aid: 3}}},
{ID: a2.Aid, PubDate: a2.PubDate},
})
So(err, ShouldBeNil)
})
})
}
func Test_BangumiFeedCache(t *testing.T) {
var (
mid = int64(1)
err error
b1 = feed.Bangumi{SeasonID: 100}
b2 = feed.Bangumi{SeasonID: 200}
f1 = feed.Feed{ID: b1.SeasonID, Type: feed.BangumiType, Bangumi: &b1}
f2 = feed.Feed{ID: b2.SeasonID, Type: feed.BangumiType, Bangumi: &b2}
fs = []*feed.Feed{&f1, &f2}
)
Convey("add cache", t, func() {
err = d.AddBangumiFeedCache(context.TODO(), mid, fs)
So(err, ShouldBeNil)
Convey("get cache", func() {
res, err := d.BangumiFeedCache(context.TODO(), mid, 0, 2)
So(res, ShouldResemble, []int64{b2.SeasonID, b1.SeasonID})
So(err, ShouldBeNil)
})
})
}
func Test_UnreadCountCache(t *testing.T) {
var (
mid = int64(1)
count = 100
err error
)
Convey("add cache", t, func() {
err = d.AddUnreadCountCache(context.TODO(), model.TypeApp, mid, count)
So(err, ShouldBeNil)
Convey("get cache", func() {
c, err := d.UnreadCountCache(context.TODO(), model.TypeApp, mid)
So(c, ShouldEqual, count)
So(err, ShouldBeNil)
})
Convey("get wrong cache", func() {
c, err := d.UnreadCountCache(context.TODO(), model.TypeWeb, mid)
So(c, ShouldEqual, 0)
So(err, ShouldBeNil)
})
})
}