Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,71 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"d_test.go",
"hbase_test.go",
"mysql_test.go",
"redis_test.go",
"statisitcs_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/interface/main/push-archive/conf:go_default_library",
"//app/interface/main/push-archive/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"Proportion.go",
"dao.go",
"fan_group.go",
"hbase.go",
"message.go",
"mysql.go",
"push.go",
"redis.go",
"statistics.go",
],
importpath = "go-common/app/interface/main/push-archive/dao",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/interface/main/push-archive/conf:go_default_library",
"//app/interface/main/push-archive/model:go_default_library",
"//library/cache/redis:go_default_library",
"//library/database/hbase.v2:go_default_library",
"//library/database/sql:go_default_library",
"//library/log:go_default_library",
"//library/net/http/blademaster:go_default_library",
"//library/stat/prom:go_default_library",
"//library/sync/errgroup:go_default_library",
"//library/xstr:go_default_library",
"//vendor/github.com/tsuna/gohbase/hrpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,82 @@
package dao
import (
"strconv"
"strings"
"go-common/app/interface/main/push-archive/conf"
"go-common/app/interface/main/push-archive/model"
"go-common/library/log"
)
// Proportion 普通关注粉丝的灰度比例
type Proportion struct {
// 粉丝的后2位的最值
MinValue int
MaxValue int
}
// NewProportion new
func NewProportion(config []conf.Proportion) (ps []Proportion) {
var ppt float64
for _, g := range config {
valueStartFloat, err := strconv.ParseFloat(strings.TrimSpace(g.ProportionStartFrom), 64)
if err != nil {
log.Error("NewProportions config ArcPush.FanGroup.ProportionStartFrom strconv.ParseFloat(%s) error(%v)", g.ProportionStartFrom, err)
return
}
valueStartFrom := int(valueStartFloat)
// 比例验证
prop, err := strconv.ParseFloat(strings.TrimSpace(g.Proportion), 64)
if err != nil {
log.Error("NewProportions config ArcPush.FanGroup.Proportion(%s) strconv.ParseFloat err(%v)", g.Proportion, err)
return nil
}
if prop*100-float64(int(prop*100)) != 0 {
// 比例最多保留2位小数
log.Error("NewProportions config ArcPush.FanGroup.Proportion(%s) must keep at most 2 bits", g.Proportion)
return nil
}
ppt += prop
if prop <= 0 || prop > 1 || ppt > 1 || ppt <= 0 {
// 单个数在(0,1]区间,总和在(0, 1]区间
log.Error("NewProportions config ArcPush.FanGroup.Proportion(%s) must in (0, 1] and sum(%f) in (0, 1]", g.Proportion, ppt)
return nil
}
// 起始值和比例之和必须在0099之间
maxValue := int(100*prop-1) + valueStartFrom
if maxValue >= 100 {
log.Error("NewProportions config ArcPush.FanGroup.Proportion(%s)+ProportionStartFrom must in [0, 99]", g.Proportion)
return
}
p := Proportion{
MinValue: valueStartFrom,
MaxValue: maxValue,
}
ps = append(ps, p)
}
return
}
// FansByProportion 根据比例分配该关注类型的粉丝, 以全站所有用户作为分母
func (d *Dao) FansByProportion(upper int64, fans map[int64]int) (attentions []int64, specials []int64) {
for mid, relationType := range fans {
if relationType == model.RelationSpecial {
specials = append(specials, mid)
continue
}
if len(d.Proportions) == 0 {
attentions = append(attentions, mid)
continue
}
// mid最后2位是否在抽样区间内
last2Digits := int(mid % 100)
for _, g := range d.Proportions {
if last2Digits >= g.MinValue && last2Digits <= g.MaxValue {
attentions = append(attentions, mid)
break
}
}
}
return
}

View File

@@ -0,0 +1,83 @@
package dao
import (
"encoding/hex"
"flag"
"os"
"strconv"
"strings"
"testing"
"go-common/app/interface/main/push-archive/conf"
"github.com/smartystreets/goconvey/convey"
)
var d *Dao
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.archive.push-archive")
flag.Set("conf_token", "61c0d7d8527e8a4aad5b49826869e23c")
flag.Set("tree_id", "7615")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/push-archive-test.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
func Test_msgTemplateEncode(t *testing.T) {
convey.Convey("msgTemplateDesc编码", t, func() {
for _, g := range d.FanGroups {
ascii := strconv.QuoteToASCII(g.MsgTemplateDesc)
msgtemp := hex.EncodeToString([]byte(ascii))
t.Logf("the group(%s) msgtemplate encoded(%v)\n", g.Name, msgtemp)
ascii = strconv.QuoteToASCII(g.MsgTemplate)
msgtemp2 := hex.EncodeToString([]byte(ascii))
convey.So(msgtemp, convey.ShouldEqual, msgtemp2)
}
})
}
func Test_msgTemplateDecode(t *testing.T) {
convey.Convey("msgTemplateDesc解码", t, func() {
for _, g := range d.FanGroups {
convey.So(g.MsgTemplate, convey.ShouldEqual, g.MsgTemplateDesc)
}
})
}
func Test_keyname(t *testing.T) {
convey.Convey("fangroup keyname", t, func() {
for gkey, g := range d.FanGroups {
convey.So(gkey, convey.ShouldEqual, fanGroupKey(g.RelationType, g.Name))
}
})
}
func Test_conf(t *testing.T) {
convey.Convey("配置结果", t, func() {
for gkey, g := range d.FanGroups {
convey.So(gkey, convey.ShouldEqual, fanGroupKey(g.RelationType, g.Name))
convey.So(len(strings.Split(g.MsgTemplateDesc, "\r\n")), convey.ShouldEqual, 2)
convey.So(g.MsgTemplate, convey.ShouldEqual, g.MsgTemplateDesc)
}
for i, g := range d.Proportions {
proportion, _ := strconv.ParseFloat(d.c.ArcPush.Proportions[i].Proportion, 64)
convey.So(g.MaxValue-g.MinValue+1, convey.ShouldEqual, proportion*100)
}
convey.So(len(d.GroupOrder), convey.ShouldEqual, len(d.c.ArcPush.Order))
})
}

View File

@@ -0,0 +1,177 @@
package dao
import (
"context"
"fmt"
"os"
"time"
"go-common/app/interface/main/push-archive/conf"
"go-common/app/interface/main/push-archive/model"
xredis "go-common/library/cache/redis"
xsql "go-common/library/database/sql"
"go-common/library/log"
xhttp "go-common/library/net/http/blademaster"
"go-common/library/stat/prom"
"go-common/library/database/hbase.v2"
)
// Dao .
type Dao struct {
c *conf.Config
db *xsql.DB
redis *xredis.Pool
relationHBase *hbase.Client
relationHBaseReadTimeout time.Duration
relationHBaseWriteTimeout time.Duration
fanHBase *hbase.Client
fanHBaseReadTimeout time.Duration
httpClient *xhttp.Client
settingStmt *xsql.Stmt
setSettingStmt *xsql.Stmt
settingsMaxIDStmt *xsql.Stmt
setStatisticsStmt *xsql.Stmt
UpperLimitExpire int32
FanGroups map[string]*FanGroup
GroupOrder []string
Proportions []Proportion
ActiveDefaultTime map[int]int
PushBusinessID string
PushAuth string
}
var (
errorsCount = prom.BusinessErrCount
infosCount = prom.BusinessInfoCount
)
// New creates a push-service DAO instance.
func New(c *conf.Config) *Dao {
d := &Dao{
c: c,
db: xsql.NewMySQL(c.MySQL),
relationHBase: hbase.NewClient(&c.HBase.Config),
relationHBaseReadTimeout: time.Duration(c.HBase.ReadTimeout),
relationHBaseWriteTimeout: time.Duration(c.HBase.WriteTimeout),
fanHBase: hbase.NewClient(&c.FansHBase.Config),
fanHBaseReadTimeout: time.Duration(c.FansHBase.ReadTimeout),
redis: xredis.NewPool(c.Redis),
httpClient: xhttp.NewClient(c.HTTPClient),
UpperLimitExpire: int32(time.Duration(c.ArcPush.UpperLimitExpire) / time.Second),
FanGroups: NewFanGroups(c),
Proportions: NewProportion(c.ArcPush.Proportions),
}
d.settingStmt = d.db.Prepared(_settingSQL)
d.setSettingStmt = d.db.Prepared(_setSettingSQL)
d.settingsMaxIDStmt = d.db.Prepared(_settingsMaxIDSQL)
d.setStatisticsStmt = d.db.Prepared(_inStatisticsSQL)
for _, gp := range c.ArcPush.Order {
if _, exist := d.FanGroups[gp]; !exist {
log.Error("order config error, group %s not exist", gp)
fmt.Printf("order config error, group %s not exist\r\n\r\n", gp)
os.Exit(1)
}
}
d.GroupOrder = c.ArcPush.Order
// default active time
d.ActiveDefaultTime = map[int]int{}
for _, one := range c.ArcPush.ActiveTime {
d.ActiveDefaultTime[one] = 1
}
return d
}
// PromError prom error
func PromError(name string) {
errorsCount.Incr(name)
}
// PromInfo add prom info
func PromInfo(name string) {
infosCount.Incr(name)
}
// PromInfoAdd add prom info by value
func PromInfoAdd(name string, value int64) {
infosCount.Add(name, value)
}
// PromChanLen channel length
func PromChanLen(name string, length int64) {
infosCount.State(name, length)
}
// BeginTx begin transaction.
func (d *Dao) BeginTx(c context.Context) (*xsql.Tx, error) {
return d.db.Begin(c)
}
// Close dao.
func (d *Dao) Close() (err error) {
if err = d.relationHBase.Close(); err != nil {
log.Error("d.relationHBase.Close() error(%v)", err)
PromError("hbase:close")
}
if err = d.fanHBase.Close(); err != nil {
log.Error("d.fanHBase.Close() error(%v)", err)
PromError("fanHBase:close")
}
if err = d.redis.Close(); err != nil {
log.Error("d.redis.Close() error(%v)", err)
PromError("redis:close")
}
if err = d.db.Close(); err != nil {
log.Error("d.db.Close() error(%v)", err)
PromError("db:close")
}
return
}
// Ping check connection status.
func (d *Dao) Ping(c context.Context) (err error) {
if err = d.db.Ping(c); err != nil {
PromError("mysql:Ping")
log.Error("d.db.Ping error(%v)", err)
return
}
if err = d.pingRedis(c); err != nil {
PromError("redis:Ping")
log.Error("d.redis.Ping error(%v)", err)
}
return
}
// Batch 批量处理
func Batch(list *[]int64, batchSize int, retry int, params *model.BatchParam, f func(fans *[]int64, params map[string]interface{}) error) {
if params == nil {
log.Warn("Batch params(%+v) nil", params)
return
}
for {
var (
mids []int64
err error
)
l := len(*list)
if l == 0 {
break
} else if l <= batchSize {
mids = (*list)[:l]
} else {
mids = (*list)[:batchSize]
l = batchSize
}
*list = (*list)[l:]
params.Handler(&params.Params, mids)
for i := 0; i < retry; i++ {
if err = f(&mids, params.Params); err == nil {
break
}
}
if err != nil {
log.Error("Batch error(%v), params(%+v)", err, params)
}
}
}

View File

@@ -0,0 +1,182 @@
package dao
import (
"bytes"
"encoding/hex"
"fmt"
"os"
"strconv"
"strings"
"time"
"go-common/app/interface/main/push-archive/conf"
"go-common/app/interface/main/push-archive/model"
"go-common/library/log"
)
// FanGroup 粉丝分组
type FanGroup struct {
// 组名
Name string
// 粉丝与up主的关注关系
RelationType int
Hitby string
// 限制条数
Limit int
PerUpperLimit int
LimitExpire int32
// 本组获取粉丝的hbase信息
HBaseTable string
HBaseFamily []string
MsgTemplateDesc string
MsgTemplate string
}
func fanGroupKey(relationType int, name string) string {
return fmt.Sprintf(`%d#%s`, relationType, name)
}
// NewFanGroups 实例化,验证配置, 若配置错误则panic
func NewFanGroups(config *conf.Config) (grp map[string]*FanGroup) {
grp = make(map[string]*FanGroup)
for _, g := range config.ArcPush.FanGroup {
if g.Name == "" {
log.Error("NewFanGroups config ArcPush.FanGroup.Name/hitby must not be empty")
break
}
// 粉丝和up主的关系配置验证
if g.RelationType != model.RelationAttention && g.RelationType != model.RelationSpecial {
log.Error("NewFanGroups config ArcPush.FanGroup.RelationType not exist(%d)", g.RelationType)
break
}
if g.Hitby != model.GroupDataTypeDefault && g.Hitby != model.GroupDataTypeHBase &&
g.Hitby != model.GroupDataTypeAbtest && g.Hitby != model.GroupDataTypeAbComparison {
log.Error("NewFanGroups config ArcPush.FanGroup.hitby(%s) must in [default,hbase]", g.Hitby)
break
}
key := fanGroupKey(g.RelationType, g.Name)
if _, ok := grp[key]; ok {
log.Error("NewFanGroups config ArcPush.FanGroup.relationtype(%d) and name(%s) must be unique", g.RelationType, g.Name)
break
}
// hbase配置
if g.HBaseTable != "" && len(g.HBaseFamily) == 0 {
log.Error("NewFanGroups config ArcPush.FanGroup.HbaseTable(%s) & HbaseFamily(%v) must exist togather", g.HBaseTable, g.HBaseFamily)
break
}
msgTemp, err := decodeMsgTemplate(g.Name, g.MsgTemplate)
if err != nil {
log.Error("NewFanGroups config ArcPush.FanGroup.MsgTemplate(%s) decodeMsgTemplate error(%v)", g.MsgTemplate, err)
break
}
if msgTemp != g.MsgTemplateDesc {
log.Error("NewFanGroups config ArcPush.FanGroup.MsgTemplate decodeMsgTemplate(%s) must equal to MsgTemplateDesc(%s)", msgTemp, g.MsgTemplateDesc)
break
}
if len(strings.SplitN(msgTemp, "\r\n", 2)) != 2 {
log.Error("NewFanGroups config ArcPush.FanGroup.MsgTemplate(%s) decodeMsgTemplate(%s) must contains `\r\n`", g.MsgTemplate, msgTemp)
break
}
grp[key] = &FanGroup{
Name: strings.TrimSpace(g.Name),
RelationType: g.RelationType,
Hitby: strings.TrimSpace(g.Hitby),
Limit: g.Limit,
PerUpperLimit: g.PerUpperLimit,
LimitExpire: int32(time.Duration(g.LimitExpire) / time.Second),
HBaseTable: strings.TrimSpace(g.HBaseTable),
HBaseFamily: g.HBaseFamily,
MsgTemplateDesc: g.MsgTemplateDesc,
MsgTemplate: msgTemp,
}
}
if len(grp) < len(config.ArcPush.FanGroup) {
fmt.Printf("NewFanGroups failed\r\n\r\n")
os.Exit(1)
}
return
}
// decodeMsgTemplate 将ascii格式的文案模版解码成中文格式---防止某些服务器不支持中文配置
func decodeMsgTemplate(groupName string, temp string) (decode string, err error) {
if temp == "" {
return
}
b, err := hex.DecodeString(temp)
if err != nil {
log.Error("DecodeMsgTemplate hex.DecodeString error(%v) groupName(%s), temp(%s)", err, groupName, temp)
return
}
buf := new(bytes.Buffer)
temp = string(b)
rows := strings.Split(temp[1:len(temp)-1], "\\r\\n")
lenRows := len(rows) - 1
for k, row := range rows {
parts := strings.Split(row, "%s")
lenParts := len(parts) - 1
for kp, str := range parts {
words := strings.Split(str, "\\u")
for _, w := range words {
if len(w) < 1 {
continue
}
wi, err := strconv.ParseInt(w, 16, 32)
if err != nil {
log.Error("DecodeMsgTemplate error(%v) groupName(%s), decode(%s), word(%s)", err, groupName, temp, w)
return "", err
}
buf.WriteString(fmt.Sprintf("%c", wi))
}
if kp >= lenParts {
continue
}
buf.WriteString("%s")
}
if k >= lenRows {
continue
}
buf.WriteString("\r\n")
}
decode = buf.String()
return
}
// FansByHBase hbase表中查询粉丝所关联的up主过滤up不在hbase结果中的粉丝
func (d *Dao) FansByHBase(upper int64, fanGroupKey string, fans *[]int64) (result []int64, excluded []int64) {
g := d.FanGroups[fanGroupKey]
// 不过滤
if len(g.HBaseTable) == 0 {
result = *fans
return
}
params := model.NewBatchParam(map[string]interface{}{
"base": upper,
"table": g.HBaseTable,
"family": g.HBaseFamily,
"result": &result,
"excluded": &excluded,
"handler": d.filterFanByUpper,
}, nil)
Batch(fans, 100, 1, params, d.FilterFans)
return
}
// FansByActiveTime 配置了默认活跃时间,则批量过滤粉丝是否在活跃时间段内,否则不推送;未配置则不过滤活跃时间;若希望没有默认活跃时间但希望过滤活跃时间,配置成[0]
func (d *Dao) FansByActiveTime(hour int, fans *[]int64) (result []int64, excluded []int64) {
// 未配置则不过滤活跃时间
if len(d.ActiveDefaultTime) <= 0 {
result = *fans
excluded = []int64{}
return
}
params := model.NewBatchParam(map[string]interface{}{
"base": hour,
"table": "dm_member_push_active_hour",
"family": []string{"p"},
"result": &result,
"excluded": &excluded,
"handler": d.filterFanByActive,
}, nil)
Batch(fans, 100, 1, params, d.FilterFans)
return
}

View File

@@ -0,0 +1,313 @@
package dao
import (
"bytes"
"context"
"crypto/md5"
"encoding/binary"
"encoding/json"
"fmt"
"strconv"
"sync"
"go-common/app/interface/main/push-archive/model"
"go-common/library/log"
"go-common/library/sync/errgroup"
"github.com/tsuna/gohbase/hrpc"
)
const _hbaseShard = 200
var (
hbaseTable = "ugc:PushArchive"
hbaseFamily = "relation"
hbaseFamilyB = []byte(hbaseFamily)
)
func _rowKey(upper, fans int64) string {
k := fmt.Sprintf("%d_%d", upper, fans%_hbaseShard)
key := fmt.Sprintf("%x", md5.Sum([]byte(k)))
return key
}
// Fans gets the upper's fans.
func (d *Dao) Fans(c context.Context, upper int64, isPGC bool) (res map[int64]int, err error) {
var mutex sync.Mutex
res = make(map[int64]int)
group := errgroup.Group{}
for i := 0; i < _hbaseShard; i++ {
shard := int64(i)
group.Go(func() (e error) {
key := _rowKey(upper, shard)
relations, e := d.fansByKey(context.TODO(), key)
if e != nil {
return
}
mutex.Lock()
for fans, tp := range relations {
// pgc稿件屏蔽非特殊关注粉丝
if isPGC && tp != model.RelationSpecial {
continue
}
res[fans] = tp
}
mutex.Unlock()
return
})
}
group.Wait()
return
}
// AddFans add upper's fans.
func (d *Dao) AddFans(c context.Context, upper, fans int64, tp int) (err error) {
key := _rowKey(upper, fans)
relations, err := d.fansByKey(c, key)
if err != nil {
return
}
relations[fans] = tp
err = d.saveRelation(c, key, upper, relations)
return
}
// DelFans del fans.
func (d *Dao) DelFans(c context.Context, upper, fans int64) (err error) {
key := _rowKey(upper, fans)
relations, err := d.fansByKey(c, key)
if err != nil {
return
}
delete(relations, fans)
err = d.saveRelation(c, key, upper, relations)
return
}
// DelSpecialAttention del special attention.
func (d *Dao) DelSpecialAttention(c context.Context, upper, fans int64) (err error) {
key := _rowKey(upper, fans)
relations, err := d.fansByKey(c, key)
if err != nil {
return
}
if relations[fans] != model.RelationSpecial {
return
}
relations[fans] = model.RelationAttention
err = d.saveRelation(c, key, upper, relations)
return
}
func (d *Dao) fansByKey(c context.Context, key string) (relations map[int64]int, err error) {
var (
result *hrpc.Result
ctx, cancel = context.WithTimeout(c, d.relationHBaseReadTimeout)
)
defer cancel()
relations = make(map[int64]int)
if result, err = d.relationHBase.Get(ctx, []byte(hbaseTable), []byte(key)); err != nil {
log.Error("d.relationHBase.Get error(%v) querytable(%v)", err, hbaseTable)
PromError("hbase:Get")
return
} else if result == nil {
return
}
for _, c := range result.Cells {
if c != nil && bytes.Equal(c.Family, hbaseFamilyB) {
if err = json.Unmarshal(c.Value, &relations); err != nil {
log.Error("json.Unmarshal() error(%v)", err)
return
}
break
}
}
return
}
func (d *Dao) saveRelation(c context.Context, key string, upper int64, relations map[int64]int) (err error) {
var (
column = strconv.FormatInt(upper, 10)
ctx, cancel = context.WithTimeout(c, d.relationHBaseWriteTimeout)
)
defer cancel()
value, err := json.Marshal(relations)
if err != nil {
return
}
values := map[string]map[string][]byte{hbaseFamily: {column: value}}
if _, err = d.relationHBase.PutStr(ctx, hbaseTable, key, values); err != nil {
log.Error("d.relationHBase.PutStr error(%v), table(%s), values(%+v)", err, hbaseTable, values)
PromError("hbase:Put")
}
return
}
// filterFanByUpper 根据fans在hbase存储的up主列表筛选出upper主在up主列表中的粉丝
func (d *Dao) filterFanByUpper(c context.Context, fan int64, up interface{}, table string, family []string) (included bool, err error) {
var (
res *hrpc.Result
key string
ctx, cancel = context.WithTimeout(c, d.fanHBaseReadTimeout)
)
defer cancel()
upper := up.(int64)
rowKeyMD := md5.Sum([]byte(strconv.FormatInt(fan, 10)))
key = fmt.Sprintf("%x", rowKeyMD)
if res, err = d.fanHBase.Get(ctx, []byte(table), []byte(key)); err != nil {
log.Error("d.fanHBase.Get error(%v) querytable(%v) key(%s), fan(%d), upper(%d)", err, table, key, fan, upper)
PromError("hbase:Get")
return
} else if res == nil {
return
}
for _, c := range res.Cells {
if c == nil || !existFamily(c.Family, family) {
continue
}
upID := int64(binary.BigEndian.Uint32(c.Value))
if upID != upper || upID <= 0 {
continue
}
included = true
log.Info("filter fan: included by hbase, fan(%d) upper(%d) table(%s)", fan, upper, table)
return
}
if !included {
log.Info("filter fan: excluded by hbase, fan(%d) upper(%d) table(%s)", fan, upper, table)
}
return
}
// FilterFans 批量筛选
func (d *Dao) FilterFans(fans *[]int64, params map[string]interface{}) (err error) {
base := params["base"]
table := params["table"].(string)
family := params["family"].([]string)
result := params["result"].(*[]int64)
excluded := params["excluded"].(*[]int64)
handler := params["handler"].(func(context.Context, int64, interface{}, string, []string) (bool, error))
mutex := sync.Mutex{}
group := errgroup.Group{}
l := len(*fans)
for i := 0; i < l; i++ {
shared := (*fans)[i]
group.Go(func() (e error) {
included, e := handler(context.TODO(), shared, base, table, family)
if e != nil {
log.Error("FilterFans error(%v) fan(%d) base(%d) table(%s) family(%v)", e, shared, base, table, family)
}
mutex.Lock()
if included {
*result = append(*result, shared)
} else {
*excluded = append(*excluded, shared)
}
mutex.Unlock()
return
})
}
group.Wait()
return
}
// existFamily 某个hbase列族是否存在于指定列族中
func existFamily(actual []byte, family []string) bool {
for _, f := range family {
if bytes.Equal(actual, []byte(f)) {
return true
}
}
return false
}
// filterFanByActive 根据用户的活跃时间段,过滤不在活跃期内更新的粉丝; 若无活跃列表,从默认活跃时间内过滤
func (d *Dao) filterFanByActive(ctx context.Context, fan int64, oneHour interface{}, table string, family []string) (included bool, err error) {
var (
b []byte
result *hrpc.Result
c, cancel = context.WithTimeout(ctx, d.fanHBaseReadTimeout)
activeHour int
)
defer cancel()
hour := oneHour.(int)
if _, included = d.ActiveDefaultTime[hour]; included {
return
}
rowKey := md5.Sum(strconv.AppendInt(b, fan, 10))
key := fmt.Sprintf("%x", rowKey)
if result, err = d.fanHBase.Get(c, []byte(table), []byte(key)); err != nil {
log.Error("filterFanByActive d.fanHBase.Get error(%v) table(%s) key(%s) fan(%d)", err, table, key, fan)
PromError("hbase:Get")
return
} else if result == nil {
return
}
included = false
for _, cell := range result.Cells {
if cell != nil && existFamily(cell.Family, family) {
activeHour, err = strconv.Atoi(string(cell.Value))
if err != nil {
log.Error("filterFanByActive strconv.Atoi error(%v) fan(%d) value(%s)", err, fan, string(cell.Value))
break
}
if activeHour == hour {
included = true
break
}
}
}
if !included {
log.Info("filter fanexcluded by active time from table, fan(%d)", fan)
}
return
}
// ExistsInBlacklist 按黑名单过滤用户
func (d *Dao) ExistsInBlacklist(ctx context.Context, upper int64, mids []int64) (exists, notExists []int64) {
var (
mutex sync.Mutex
group = errgroup.Group{}
)
for _, mid := range mids {
mid := mid
group.Go(func() error {
include, _ := d.filterFanByUpper(context.Background(), mid, upper, d.c.Abtest.HbaseBlacklistTable, d.c.Abtest.HbaseBlacklistFamily)
mutex.Lock()
if include {
exists = append(exists, mid)
} else {
notExists = append(notExists, mid)
}
mutex.Unlock()
return nil
})
}
group.Wait()
return
}
// ExistsInWhitelist 按白名单过滤用户
func (d *Dao) ExistsInWhitelist(ctx context.Context, upper int64, mids []int64) (exists, notExists []int64) {
var (
mutex sync.Mutex
group = errgroup.Group{}
)
for _, mid := range mids {
mid := mid
group.Go(func() error {
include, _ := d.filterFanByUpper(context.Background(), mid, upper, d.c.Abtest.HbaseeWhitelistTable, d.c.Abtest.HbaseWhitelistFamily)
mutex.Lock()
if include {
exists = append(exists, mid)
} else {
notExists = append(notExists, mid)
}
mutex.Unlock()
return nil
})
}
group.Wait()
return
}

View File

@@ -0,0 +1,119 @@
package dao
import (
"context"
"testing"
"go-common/app/interface/main/push-archive/model"
"github.com/smartystreets/goconvey/convey"
)
func Test_onekey(t *testing.T) {
var included bool
var err error
included, err = d.filterFanByUpper(context.TODO(), int64(12312313), int64(275152561), "ai:pushlist_follow_recent", []string{"m"})
convey.Convey("hbase过滤up主, 不存在", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(included, convey.ShouldEqual, false)
})
included, err = d.filterFanByUpper(context.TODO(), int64(27515303), int64(27515256), "ai:pushlist_follow_recent", []string{"m", "m1"})
convey.Convey("hbase过滤up主增加1个", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(included, convey.ShouldEqual, true)
})
included, err = d.filterFanByUpper(context.TODO(), int64(27515401), int64(27515256), "ai:pushlist_follow_recent", []string{"m"})
convey.Convey("hbase过滤up主增加1个", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(included, convey.ShouldEqual, true)
})
included, err = d.filterFanByUpper(context.TODO(), int64(27515300), int64(27515256), "ai:pushlist_follow_recent", []string{"m"})
convey.Convey("hbase过滤up主增加1个", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(included, convey.ShouldEqual, true)
})
}
func Test_keys(t *testing.T) {
var result, excluded []int64
params := map[string]interface{}{
"base": int64(27515256),
"table": "ai:pushlist_follow_recent",
"family": []string{"m"},
"result": &result,
"excluded": &excluded,
"handler": d.filterFanByUpper,
}
err := d.FilterFans(&[]int64{27515303, 27515401, 27515300, 12312313}, params)
convey.Convey("多协程过滤up主,3个符合1个排除", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(len(result), convey.ShouldEqual, 3)
convey.So(len(excluded), convey.ShouldEqual, 1)
})
}
func Test_batchfilter(t *testing.T) {
var result, excluded []int64
params := model.NewBatchParam(map[string]interface{}{
"base": int64(27515256),
"table": "ai:pushlist_follow_recent",
"family": []string{"m"},
"result": &result,
"excluded": &excluded,
"handler": d.filterFanByUpper,
}, nil)
Batch(&[]int64{27515303, 27515401, 27515300, 12312313}, 1, 2, params, d.FilterFans)
convey.Convey("批量过滤up主, ,3个符合1个排除", t, func() {
convey.So(len(result), convey.ShouldEqual, 3)
convey.So(len(excluded), convey.ShouldEqual, 1)
})
t.Logf("the result(%v), excluded(%v)", result, excluded)
}
func Test_addfans(t *testing.T) {
err := d.AddFans(context.TODO(), int64(275152561), int64(121212), model.RelationAttention)
convey.Convey("添加粉丝到up主", t, func() {
convey.So(err, convey.ShouldBeNil)
})
}
func Test_delfans(t *testing.T) {
err := d.DelFans(context.TODO(), int64(275152561), int64(121212))
convey.Convey("删除up主的粉丝", t, func() {
convey.So(err, convey.ShouldBeNil)
})
}
func Test_fansbyupper(t *testing.T) {
Test_addfans(t)
fans, err := d.Fans(context.TODO(), int64(275152561), false)
convey.Convey("up主增加一个粉丝后", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(len(fans), convey.ShouldEqual, 1)
})
fans, err = d.Fans(context.TODO(), int64(275152561), true)
convey.Convey("up主增加一个普通关注粉丝后, pgc稿件只有特殊关注粉丝", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(len(fans), convey.ShouldEqual, 0)
})
Test_delfans(t)
fans, err = d.Fans(context.TODO(), int64(275152561), false)
convey.Convey("up主删除一个粉丝后", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(len(fans), convey.ShouldEqual, 0)
})
}
func Test_fansbyactive(t *testing.T) {
// 18507659 + 37118721 + 88889069
fan := int64(88889069)
hour := 21
table := "dm_member_push_active_hour"
family := []string{"p"}
included, err := d.filterFanByActive(context.TODO(), fan, hour, table, family)
t.Logf("the included(%v) err(%v)", included, err)
}

View File

@@ -0,0 +1,81 @@
package dao
import (
"bytes"
"context"
"crypto/md5"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"sort"
"strconv"
"time"
"go-common/library/log"
)
// wechatResp 企业微信的响应
type wechatResp struct {
Msg string `json:"msg"`
Status int `json:"status"`
}
// WechatMessage 发送企业微信消息
func (d *Dao) WechatMessage(content string) (err error) {
params := map[string]string{
"content": content,
"timestamp": strconv.FormatInt(time.Now().Unix(), 10),
"title": "",
"token": d.c.Wechat.Token,
"type": "wechat",
"username": d.c.Wechat.UserName,
"url": "",
}
params["signature"] = d.signature(params, d.c.Wechat.Secret)
b, err := json.Marshal(params)
if err != nil {
log.Error("WechatMessage json.Marshal error(%v)", err)
return
}
req, err := http.NewRequest(http.MethodPost, "http://bap.bilibili.co/api/v1/message/add", bytes.NewReader(b))
if err != nil {
log.Error("WechatMessage NewRequest error(%v), params(%s)", err, string(b))
return
}
req.Header.Set("Content-Type", "application/json; charset=utf-8")
res := wechatResp{}
if err = d.httpClient.Do(context.TODO(), req, &res); err != nil {
log.Error("WechatMessage Do error(%v), params(%s)", err, string(b))
return
}
if res.Status != 0 {
err = fmt.Errorf("status(%d) msg(%s)", res.Status, res.Msg)
log.Error("WechatMessage response error(%v), params(%s)", err, string(b))
return
}
return
}
// signature 加密算法
func (d *Dao) signature(params map[string]string, secret string) string {
// content=xxx&timestamp=xxx格式
keys := []string{}
for k := range params {
keys = append(keys, k)
}
sort.Strings(keys)
buf := bytes.Buffer{}
for _, k := range keys {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(url.QueryEscape(k) + "=")
buf.WriteString(url.QueryEscape(params[k]))
}
// 加密
h := md5.New()
io.WriteString(h, buf.String()+secret)
return fmt.Sprintf("%x", h.Sum(nil))
}

View File

@@ -0,0 +1,126 @@
package dao
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"go-common/library/xstr"
"go-common/app/interface/main/push-archive/model"
"go-common/library/log"
"strconv"
)
const (
_settingSQL = `SELECT value FROM push_settings WHERE mid=? and dtime=0 limit 1`
_setSettingSQL = `INSERT INTO push_settings (mid,value) VALUES (?,?) ON DUPLICATE KEY UPDATE value=?`
_settingsSQL = `SELECT mid,value FROM push_settings WHERE mid IN(%s) and dtime=0`
_settingsAllSQL = `SELECT mid,value FROM push_settings WHERE id > %s AND id <= %s`
_settingsMaxIDSQL = `SELECT MAX(id) AS mx FROM push_settings`
)
// Setting gets the setting.
func (d *Dao) Setting(c context.Context, mid int64) (st *model.Setting, err error) {
var v string
if err = d.settingStmt.QueryRow(c, mid).Scan(&v); err != nil {
if err == sql.ErrNoRows {
err = nil
return
}
log.Error("d.Setting(%d) error(%v)", mid, err)
PromError("db:获取用户配置")
return
}
st = new(model.Setting)
if err = json.Unmarshal([]byte(v), &st); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", v, err)
}
return
}
// SetSetting saves the setting.
func (d *Dao) SetSetting(c context.Context, mid int64, st *model.Setting) (err error) {
v, err := json.Marshal(st)
if err != nil {
log.Error("json.Marshal error(%v)", err)
return
}
if _, err = d.setSettingStmt.Exec(c, mid, v, v); err != nil {
log.Error("setSetting Exec mid(%d) error(%v)", mid, err)
PromError("db:保存用户设置")
}
return
}
// Settings gets the settings.
func (d *Dao) Settings(c context.Context, mids []int64) (res map[int64]*model.Setting, err error) {
res = make(map[int64]*model.Setting, len(mids))
rows, err := d.db.Query(c, fmt.Sprintf(_settingsSQL, xstr.JoinInts(mids)))
if err != nil {
log.Error("d.db.Query() error(%v)", err)
PromError("db:批量查询用户设置")
return
}
for rows.Next() {
var mid int64
var v string
if err = rows.Scan(&mid, &v); err != nil {
log.Error("rows.Scan() error(%v)", err)
PromError("db:批量查询用户设置")
return
}
st := new(model.Setting)
if err = json.Unmarshal([]byte(v), &st); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", v, err)
return
}
res[mid] = st
}
return
}
// SettingsAll gets all settings.
func (d *Dao) SettingsAll(c context.Context, startID int64, endID int64, res *map[int64]*model.Setting) (err error) {
start := strconv.FormatInt(startID, 10)
end := strconv.FormatInt(endID, 10)
rows, err := d.db.Query(c, fmt.Sprintf(_settingsAllSQL, start, end))
if err != nil {
log.Error("d.db.Query() error(%v)", err)
PromError("db:查询全部用户设置")
return
}
for rows.Next() {
var mid int64
var v string
if err = rows.Scan(&mid, &v); err != nil {
log.Error("rows.Scan() error(%v)", err)
PromError("db:查询用户设置")
return
}
st := new(model.Setting)
if err = json.Unmarshal([]byte(v), &st); err != nil {
log.Error("json.Unmarshal(%s) error(%v)", v, err)
return
}
(*res)[mid] = st
}
return
}
//SettingsMaxID get settings' total number by max(id)
func (d *Dao) SettingsMaxID(c context.Context) (mx int64, err error) {
if err = d.settingsMaxIDStmt.QueryRow(c).Scan(&mx); err != nil {
if err == sql.ErrNoRows {
err = nil
return
}
log.Error("d.settingsMaxIDStmt.QueryRow.Scan error(%v)", err)
PromError("db:查询用户最大ID")
return
}
return
}

View File

@@ -0,0 +1,55 @@
package dao
import (
"context"
"encoding/json"
"testing"
"time"
"go-common/app/interface/main/push-archive/model"
"github.com/smartystreets/goconvey/convey"
)
func Test_mxID(t *testing.T) {
_, err := d.SettingsMaxID(context.TODO())
convey.Convey("获取最大的设置id", t, func() {
convey.So(err, convey.ShouldBeNil)
})
}
func Test_settingsall(t *testing.T) {
res := make(map[int64]*model.Setting)
start, end := int64(2), int64(3)
err := d.SettingsAll(context.TODO(), start, end, &res)
convey.Convey("batch search settings", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(len(res), convey.ShouldEqual, 0)
})
start, end = int64(0), int64(10)
err = d.SettingsAll(context.TODO(), start, end, &res)
convey.Convey("batch search settings", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(len(res), convey.ShouldBeGreaterThan, 0)
})
}
func Test_statistics(t *testing.T) {
fans := []int64{1, 2, 3, 4, 5}
b, err1 := json.Marshal(fans)
ps := model.PushStatistic{
Aid: int64(101),
Group: "ai:pushlist_follow_recent",
Type: model.StatisticsUnpush,
Mids: string(b),
MidsCounter: len(fans),
CTime: time.Now(),
}
rows, err := d.SetStatistics(context.TODO(), &ps)
convey.Convey("添加统计数据", t, func() {
convey.So(err1, convey.ShouldBeNil)
convey.So(err, convey.ShouldBeNil)
convey.So(rows, convey.ShouldEqual, 1)
})
}

View File

@@ -0,0 +1,83 @@
package dao
import (
"bytes"
"context"
"fmt"
"mime/multipart"
"net/http"
"strconv"
"strings"
"time"
"go-common/app/interface/main/push-archive/model"
"go-common/library/log"
"go-common/library/xstr"
)
type _response struct {
Code int `json:"code"`
Data int `json:"data"`
}
// NoticeFans pushs the notification to fans.
func (d *Dao) NoticeFans(fans *[]int64, params map[string]interface{}) (err error) {
arc := params["archive"].(*model.Archive)
group := strings.TrimSpace(params["group"].(string))
msgTemplate := params["msgTemplate"].(string)
uuid := params["uuid"].(string)
relationType := params["relationType"].(int)
author := "UP主"
if arc.Author != "" {
author = fmt.Sprintf(`“%s”`, arc.Author)
}
// 普通关注和特殊关注用不同的业务组推
businessID := d.c.Push.BusinessID
businessToken := d.c.Push.BusinessToken
if relationType == model.RelationSpecial {
businessID = d.c.Push.BusinessSpecialID
businessToken = d.c.Push.BusinessSpecialToken
}
msg := fmt.Sprintf(msgTemplate, author, arc.Title)
sp := strings.SplitN(msg, "\r\n", 2)
buf := new(bytes.Buffer)
w := multipart.NewWriter(buf)
w.WriteField("group", group) // 实验组名,值为实验组数据表名
w.WriteField("app_id", "1") // 1表示哔哩哔哩动画
w.WriteField("business_id", strconv.Itoa(businessID))
w.WriteField("alert_title", sp[0])
w.WriteField("alert_body", sp[1])
w.WriteField("mids", xstr.JoinInts(*fans))
w.WriteField("link_type", "2") // 2代表视频稿件播放页
w.WriteField("link_value", strconv.FormatInt(arc.ID, 10))
w.WriteField("uuid", uuid)
// 1、v5.20.0 后客户端才接特殊关注 2、 iPad版本没更新不推
w.WriteField("builds", `{"2":{"Build":6500,"Condition":"gte"}, "3":{"Build":0,"Condition":"lt"}, "1":{"Build":519010,"Condition":"gte"}}`)
w.Close()
query := map[string]string{
"ts": strconv.FormatInt(time.Now().Unix(), 10),
"appkey": d.c.HTTPClient.Key,
}
query["sign"] = d.signature(query, d.c.HTTPClient.Secret)
url := fmt.Sprintf("%s?ts=%s&appkey=%s&sign=%s", d.c.Push.AddAPI, query["ts"], query["appkey"], query["sign"])
req, err := http.NewRequest(http.MethodPost, url, buf)
if err != nil {
log.Error("http.NewRequest(%s) error(%v) uuid(%s)", url, err, uuid)
PromError("http:NewRequest")
return
}
req.Header.Set("Content-Type", w.FormDataContentType())
req.Header.Set("Authorization", fmt.Sprintf("token=%s", businessToken))
res := &_response{}
if err = d.httpClient.Do(context.TODO(), req, &res); err != nil {
log.Error("httpClient.Do() error(%v)", err)
PromError("http:Do")
return
}
if res.Code != 0 || res.Data == 0 {
log.Error("push failed archive(%d) upper(%d) fans_total(%d) group(%s) response(%+v)", arc.ID, arc.Mid, len(*fans), group, res)
} else {
log.Info("push success archive(%d) upper(%d) fans_total(%d) group(%s) response(%+v)", arc.ID, arc.Mid, len(*fans), group, res)
}
return
}

View File

@@ -0,0 +1,162 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"go-common/app/interface/main/push-archive/model"
"go-common/library/cache/redis"
"go-common/library/log"
)
const (
_prefixUpperLimit = "pau_%d"
_prefixFanLimit = "paf_%d"
_statisticsKey = "statistics_push_archive"
_prefixPerUpperLimit = "perup_%d_%d"
)
func (d *Dao) do(c context.Context, command string, key string, args ...interface{}) (reply interface{}, err error) {
conn := d.redis.Get(c)
defer conn.Close()
values := []interface{}{key}
if len(args) > 0 {
values = append(values, args...)
}
reply, err = conn.Do(command, values...)
return
}
func upperLimitKey(mid int64) string {
return fmt.Sprintf(_prefixUpperLimit, mid)
}
// pingRedis ping redis.
func (d *Dao) pingRedis(c context.Context) (err error) {
if _, err = d.do(c, "SET", "PING", "PONG"); err != nil {
PromError("redis: ping remote")
log.Error("remote redis: conn.Do(SET,PING,PONG) error(%v)", err)
}
return
}
// ExistUpperLimitCache judge that whether upper push limit cache exists.
func (d *Dao) ExistUpperLimitCache(c context.Context, upper int64) (exist bool, err error) {
key := upperLimitKey(upper)
if exist, err = redis.Bool(d.do(c, "EXISTS", key)); err != nil {
PromError("redis:读取upper推送限制")
log.Error("ExistUpperLimitCache do(EXISTS, %s) error(%v)", key, err)
}
return
}
// AddUpperLimitCache sets upper push limit cache.
func (d *Dao) AddUpperLimitCache(c context.Context, upper int64) (err error) {
key := upperLimitKey(upper)
if _, err = d.do(c, "SETEX", key, d.UpperLimitExpire, ""); err != nil {
PromError("redis:添加upper推送限制")
log.Error("AddUpperLimitCache do(SETEX, %s) error(%v)", key, err)
}
return
}
//fanLimitKey 粉丝推送总次数限制key
func fanLimitKey(fan int64, relationType int) string {
key := fmt.Sprintf(_prefixFanLimit, fan)
if relationType != model.RelationSpecial {
key = fmt.Sprintf("%s_%d", key, relationType)
}
return key
}
//GetFanLimitCache 读取粉丝限制的当前值
func (d *Dao) GetFanLimitCache(c context.Context, fan int64, relationType int) (limit int, err error) {
key := fanLimitKey(fan, relationType)
if limit, err = redis.Int(d.do(c, "GET", key)); err != nil {
if err == redis.ErrNil {
err = nil
} else {
log.Error("GetFanLimitCache do(GET) error(%v)", err)
}
}
return
}
//AddFanLimitCache 添加粉丝限制的缓存
func (d *Dao) AddFanLimitCache(c context.Context, fan int64, relationType int, value int, expire int32) (err error) {
key := fanLimitKey(fan, relationType)
if _, err = d.do(c, "SETEX", key, expire, value); err != nil {
log.Error("AddFanLimitCache do(SETEX) error(%v)", err)
PromError("redis:添加fan推送限制")
}
return
}
//AddStatisticsCache 添加统计数据到redis
func (d *Dao) AddStatisticsCache(c context.Context, ps *model.PushStatistic) (err error) {
psByte, err := json.Marshal(*ps)
if err != nil {
log.Error("AddStatisticsCache json.Marshal error(%v), pushstatistic(%v)", err, ps)
return
}
key := _statisticsKey
if _, err = d.do(c, "LPUSH", key, string(psByte)); err != nil {
log.Error("AddStatisticsCache do(LPUSH, %s) error(%v) pushstatistic(%v)", key, err, ps)
PromError("redis:添加统计数据")
}
return
}
//GetStatisticsCache 读取一条统计数据
func (d *Dao) GetStatisticsCache(c context.Context) (ps *model.PushStatistic, err error) {
key := _statisticsKey
psStr, err := redis.String(d.do(c, "RPOP", key))
if err != nil {
if err == redis.ErrNil {
err = nil
} else {
log.Error("GetStatisticsCache do(RPOP, %s) error(%v)", key, err)
}
return
}
if err = json.Unmarshal([]byte(psStr), &ps); err != nil {
log.Error("GetStatisticsCache json.Unmarshal error(%v), ps(%s)", err, psStr)
return
}
return
}
//perUpperLimitKey 粉丝每个upper主的推送次数限制key
func perUpperLimitKey(fan int64, upper int64) string {
return fmt.Sprintf(_prefixPerUpperLimit, fan, upper)
}
//GetPerUpperLimitCache 粉丝每个upper主的已推送次数
func (d *Dao) GetPerUpperLimitCache(c context.Context, fan int64, upper int64) (limit int, err error) {
key := perUpperLimitKey(fan, upper)
if limit, err = redis.Int(d.do(c, "GET", key)); err != nil {
if err == redis.ErrNil {
err = nil
} else {
log.Error("GetPerUpperLimitCache do(GET, %s) error(%v)", key, err)
}
}
return
}
//AddPerUpperLimitCache 添加粉丝每个up主的推送次数
func (d *Dao) AddPerUpperLimitCache(c context.Context, fan int64, upper int64, value int, expire int32) (err error) {
key := perUpperLimitKey(fan, upper)
if _, err = d.do(c, "SETEX", key, expire, value); err != nil {
log.Error("AddPerUpperLimitCache do(SETEX, %s, %d, %d) error(%v)", key, expire, value, err)
PromError("redis:添加perupper推送限制")
}
return
}

View File

@@ -0,0 +1,84 @@
package dao
import (
"context"
"encoding/json"
"testing"
"time"
"go-common/app/interface/main/push-archive/model"
"github.com/smartystreets/goconvey/convey"
)
func Test_upperlimit(t *testing.T) {
upper := int64(998)
d.UpperLimitExpire = 1 // 1s
exist, err := d.ExistUpperLimitCache(context.TODO(), upper)
convey.Convey("upper主推送频率限制没存储过", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(exist, convey.ShouldEqual, false)
})
err = d.AddUpperLimitCache(context.TODO(), upper)
convey.Convey("upper主推送频率限制,添加推送1次再次获取已存在, 失效后不存在", t, func() {
convey.So(err, convey.ShouldBeNil)
exist, err = d.ExistUpperLimitCache(context.TODO(), upper)
convey.So(err, convey.ShouldBeNil)
convey.So(exist, convey.ShouldEqual, true)
time.Sleep(2 * time.Second)
exist, err = d.ExistUpperLimitCache(context.TODO(), upper)
convey.So(err, convey.ShouldBeNil)
convey.So(exist, convey.ShouldEqual, false)
})
}
func Test_statisticscache(t *testing.T) {
ps, err := d.GetStatisticsCache(context.TODO())
convey.Convey("从redis获取统计数据, 没有数据", t, func() {
convey.So(err, convey.ShouldBeNil)
convey.So(ps, convey.ShouldBeNil)
})
per := int64(1000)
start := int64(1000000)
var mids []int64
for i := start; i < start+per; i++ {
mids = append(mids, i)
}
midscount := len(mids)
midsstr, _ := json.Marshal(mids)
ps = &model.PushStatistic{
Aid: int64(121321),
Group: "ai:pushlist_offline_up",
Type: 1,
Mids: string(midsstr),
MidsCounter: midscount,
CTime: time.Now(),
}
err = d.AddStatisticsCache(context.TODO(), ps)
convey.Convey("添加统计数据到redis", t, func() {
convey.So(err, convey.ShouldBeNil)
})
}
func Test_perupperlimit(t *testing.T) {
upper := int64(10)
fan := int64(20)
err := d.AddPerUpperLimitCache(context.TODO(), fan, upper, 1, 1)
convey.Convey("添加推送次数限制", t, func() {
convey.So(err, convey.ShouldEqual, nil)
})
total, err := d.GetPerUpperLimitCache(context.TODO(), fan, upper)
convey.Convey("获取推送次数限制, 失效后不存在", t, func() {
convey.So(err, convey.ShouldEqual, nil)
convey.So(total, convey.ShouldEqual, 1)
time.Sleep(time.Second * 2)
total, err = d.GetPerUpperLimitCache(context.TODO(), fan, upper)
convey.So(err, convey.ShouldEqual, nil)
convey.So(total, convey.ShouldEqual, 0)
})
}

View File

@@ -0,0 +1,25 @@
package dao
import (
"context"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestDao_GetStatisticsIDRange(t *testing.T) {
Convey("GetStatisticsIDRange", t, func() {
deadline, _ := time.Parse("2006-01-02 15:04:05", "2018-05-01 00:00:00")
min, max, err := d.GetStatisticsIDRange(context.TODO(), deadline)
So(err, ShouldBeNil)
So(min, ShouldBeLessThanOrEqualTo, max)
})
}
func TestDao_DelStatisticsByID(t *testing.T) {
Convey("DelStatisticsByID", t, func() {
_, err := d.DelStatisticsByID(context.TODO(), 1, 10)
So(err, ShouldBeNil)
})
}

View File

@@ -0,0 +1,49 @@
package dao
import (
"context"
"time"
"go-common/app/interface/main/push-archive/model"
"go-common/library/log"
)
const (
_inStatisticsSQL = "INSERT INTO `push_statistics` (`aid`, `group`, `type`, `mids`, `mids_counter`, `ctime`, `mtime`) VALUES(?,?,?,?,?,?,?);"
_statisticsIDRangeSQL = "SELECT coalesce(min(id), 0), coalesce(max(id) , 0) FROM `push_statistics` WHERE `ctime` < ?"
_delStatisticsByIDSQL = "DELETE FROM `push_statistics` WHERE `id` >=? AND `id`<=?;"
)
//SetStatistics 插入一条记录
func (d *Dao) SetStatistics(ctx context.Context, st *model.PushStatistic) (rows int64, err error) {
res, err := d.setStatisticsStmt.Exec(ctx, st.Aid, st.Group, st.Type, st.Mids, st.MidsCounter, st.CTime, time.Now())
if err != nil {
log.Error("SetStatistics() d.setStatisticsStmt.Exec error(%v), pushstatistic(%v)", err, st)
PromError("db:保存统计数据")
return
}
rows, err = res.RowsAffected()
return
}
//GetStatisticsIDRange get id range
func (d *Dao) GetStatisticsIDRange(ctx context.Context, deadline time.Time) (min int64, max int64, err error) {
if err = d.db.QueryRow(ctx, _statisticsIDRangeSQL, deadline).Scan(&min, &max); err != nil {
log.Error("GetStatisticsIDRange() error(%v), deadline(%v)", err, deadline)
PromError("db:查询统计数据")
}
return
}
//DelStatisticsByID delete by id range
func (d *Dao) DelStatisticsByID(ctx context.Context, min, max int64) (rows int64, err error) {
res, err := d.db.Exec(ctx, _delStatisticsByIDSQL, min, max)
if err != nil {
log.Error("DelStatistics() error(%v), min(%d) max(%d)", err, min, max)
PromError("db:删除统计数据")
return
}
rows, err = res.RowsAffected()
return
}