Create & Init Project...
This commit is contained in:
66
app/infra/discovery/dao/BUILD
Normal file
66
app/infra/discovery/dao/BUILD
Normal file
@ -0,0 +1,66 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"guard_test.go",
|
||||
"node_test.go",
|
||||
"nodes_test.go",
|
||||
"registry_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
rundir = ".",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//app/infra/discovery/conf:go_default_library",
|
||||
"//app/infra/discovery/model:go_default_library",
|
||||
"//library/ecode:go_default_library",
|
||||
"//library/net/http/blademaster:go_default_library",
|
||||
"//library/net/netutil/breaker:go_default_library",
|
||||
"//library/time:go_default_library",
|
||||
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
|
||||
"//vendor/gopkg.in/h2non/gock.v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"guard.go",
|
||||
"node.go",
|
||||
"nodes.go",
|
||||
"registry.go",
|
||||
],
|
||||
importpath = "go-common/app/infra/discovery/dao",
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//app/infra/discovery/conf:go_default_library",
|
||||
"//app/infra/discovery/model:go_default_library",
|
||||
"//library/ecode:go_default_library",
|
||||
"//library/log:go_default_library",
|
||||
"//library/net/http/blademaster:go_default_library",
|
||||
"//library/sync/errgroup:go_default_library",
|
||||
"//library/xstr:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
60
app/infra/discovery/dao/guard.go
Normal file
60
app/infra/discovery/dao/guard.go
Normal file
@ -0,0 +1,60 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"go-common/library/log"
|
||||
)
|
||||
|
||||
const (
|
||||
_percentThreshold float64 = 0.85
|
||||
)
|
||||
|
||||
// Guard count the renew of all operations for self protection
|
||||
type Guard struct {
|
||||
expPerMin int64
|
||||
expThreshold int64
|
||||
facInMin int64
|
||||
facLastMin int64
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func (g *Guard) setExp(cnt int64) {
|
||||
g.lock.Lock()
|
||||
g.expPerMin = cnt * 2
|
||||
g.expThreshold = int64(float64(g.expPerMin) * _percentThreshold)
|
||||
g.lock.Unlock()
|
||||
}
|
||||
|
||||
func (g *Guard) incrExp() {
|
||||
g.lock.Lock()
|
||||
g.expPerMin = g.expPerMin + 2
|
||||
g.expThreshold = int64(float64(g.expPerMin) * _percentThreshold)
|
||||
g.lock.Unlock()
|
||||
}
|
||||
|
||||
func (g *Guard) updateFac() {
|
||||
atomic.StoreInt64(&g.facLastMin, atomic.SwapInt64(&g.facInMin, 0))
|
||||
}
|
||||
|
||||
func (g *Guard) decrExp() {
|
||||
g.lock.Lock()
|
||||
if g.expPerMin > 0 {
|
||||
g.expPerMin = g.expPerMin - 2
|
||||
g.expThreshold = int64(float64(g.expPerMin) * _percentThreshold)
|
||||
}
|
||||
g.lock.Unlock()
|
||||
}
|
||||
|
||||
func (g *Guard) incrFac() {
|
||||
atomic.AddInt64(&g.facInMin, 1)
|
||||
}
|
||||
|
||||
func (g *Guard) ok() (is bool) {
|
||||
is = atomic.LoadInt64(&g.facLastMin) < atomic.LoadInt64(&g.expThreshold)
|
||||
if is {
|
||||
log.Warn("discovery is protected, the factual renews(%d) less than expected renews(%d)", atomic.LoadInt64(&g.facLastMin), atomic.LoadInt64(&g.expThreshold))
|
||||
}
|
||||
return
|
||||
}
|
66
app/infra/discovery/dao/guard_test.go
Normal file
66
app/infra/discovery/dao/guard_test.go
Normal file
@ -0,0 +1,66 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestIncrExp(t *testing.T) {
|
||||
Convey("test IncrExp", t, func() {
|
||||
re := new(Guard)
|
||||
re.incrExp()
|
||||
So(re.expPerMin, ShouldResemble, int64(2))
|
||||
})
|
||||
}
|
||||
|
||||
func TestDecrExp(t *testing.T) {
|
||||
Convey("test DecrExp", t, func() {
|
||||
re := new(Guard)
|
||||
re.incrExp()
|
||||
re.decrExp()
|
||||
So(re.expPerMin, ShouldResemble, int64(0))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSetExp(t *testing.T) {
|
||||
Convey("test SetExp", t, func() {
|
||||
re := new(Guard)
|
||||
re.setExp(10)
|
||||
So(re.expPerMin, ShouldResemble, int64(20))
|
||||
So(re.expThreshold, ShouldResemble, int64(17))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpdateFac(t *testing.T) {
|
||||
Convey("test UpdateFac", t, func() {
|
||||
re := new(Guard)
|
||||
re.incrFac()
|
||||
re.updateFac()
|
||||
So(re.facLastMin, ShouldResemble, int64(1))
|
||||
})
|
||||
}
|
||||
|
||||
func TestIncrFac(t *testing.T) {
|
||||
Convey("test IncrFac", t, func() {
|
||||
re := new(Guard)
|
||||
re.incrFac()
|
||||
So(re.facInMin, ShouldResemble, int64(1))
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsProtected(t *testing.T) {
|
||||
Convey("test IncrFac", t, func() {
|
||||
re := new(Guard)
|
||||
re.incrExp()
|
||||
re.incrExp()
|
||||
re.incrFac()
|
||||
re.updateFac()
|
||||
So(re.ok(), ShouldBeTrue)
|
||||
re = new(Guard)
|
||||
re.incrExp()
|
||||
re.incrFac()
|
||||
re.updateFac()
|
||||
So(re.ok(), ShouldBeFalse)
|
||||
})
|
||||
}
|
179
app/infra/discovery/dao/node.go
Normal file
179
app/infra/discovery/dao/node.go
Normal file
@ -0,0 +1,179 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go-common/app/infra/discovery/conf"
|
||||
"go-common/app/infra/discovery/model"
|
||||
"go-common/library/ecode"
|
||||
"go-common/library/log"
|
||||
bm "go-common/library/net/http/blademaster"
|
||||
"go-common/library/xstr"
|
||||
)
|
||||
|
||||
const (
|
||||
_registerURL = "/discovery/register"
|
||||
_cancelURL = "/discovery/cancel"
|
||||
_renewURL = "/discovery/renew"
|
||||
_setURL = "/discovery/set"
|
||||
)
|
||||
|
||||
// Node represents a peer node to which information should be shared from this node.
|
||||
//
|
||||
// This struct handles replicating all update operations like 'Register,Renew,Cancel,Expiration and Status Changes'
|
||||
// to the <Discovery Server> node it represents.
|
||||
type Node struct {
|
||||
c *conf.Config
|
||||
client *bm.Client
|
||||
pRegisterURL string
|
||||
registerURL string
|
||||
cancelURL string
|
||||
renewURL string
|
||||
setURL string
|
||||
addr string
|
||||
status model.NodeStatus
|
||||
zone string
|
||||
otherZone bool
|
||||
}
|
||||
|
||||
// newNode return a node.
|
||||
func newNode(c *conf.Config, addr string) (n *Node) {
|
||||
n = &Node{
|
||||
c: c,
|
||||
addr: addr,
|
||||
registerURL: fmt.Sprintf("http://%s%s", addr, _registerURL),
|
||||
cancelURL: fmt.Sprintf("http://%s%s", addr, _cancelURL),
|
||||
renewURL: fmt.Sprintf("http://%s%s", addr, _renewURL),
|
||||
setURL: fmt.Sprintf("http://%s%s", addr, _setURL),
|
||||
client: bm.NewClient(c.HTTPClient),
|
||||
status: model.NodeStatusLost,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Register send the registration information of Instance receiving by this node to the peer node represented.
|
||||
func (n *Node) Register(c context.Context, i *model.Instance) (err error) {
|
||||
err = n.call(c, model.Register, i, n.registerURL, nil)
|
||||
if err != nil {
|
||||
log.Warn("node be called(%s) register instance(%v) error(%v)", n.registerURL, i, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Cancel send the cancellation information of Instance receiving by this node to the peer node represented.
|
||||
func (n *Node) Cancel(c context.Context, i *model.Instance) (err error) {
|
||||
err = n.call(c, model.Cancel, i, n.cancelURL, nil)
|
||||
if ec := ecode.Cause(err); ec.Code() == ecode.NothingFound.Code() {
|
||||
log.Warn("node be called(%s) instance(%v) already canceled", n.cancelURL, i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Renew send the heartbeat information of Instance receiving by this node to the peer node represented.
|
||||
// If the instance does not exist the node, the instance registration information is sent again to the peer node.
|
||||
func (n *Node) Renew(c context.Context, i *model.Instance) (err error) {
|
||||
var res *model.Instance
|
||||
err = n.call(c, model.Renew, i, n.renewURL, &res)
|
||||
ec := ecode.Cause(err)
|
||||
if ec.Code() == ecode.ServerErr.Code() {
|
||||
log.Warn("node be called(%s) instance(%v) error(%v)", n.renewURL, i, err)
|
||||
n.status = model.NodeStatusLost
|
||||
return
|
||||
}
|
||||
n.status = model.NodeStatusUP
|
||||
if ec.Code() == ecode.NothingFound.Code() {
|
||||
log.Warn("node be called(%s) instance(%v) error(%v)", n.renewURL, i, err)
|
||||
err = n.call(c, model.Register, i, n.registerURL, nil)
|
||||
return
|
||||
}
|
||||
// NOTE: register response instance whitch in conflict with peer node
|
||||
if ec.Code() == ecode.Conflict.Code() && res != nil {
|
||||
err = n.call(c, model.Register, res, n.pRegisterURL, nil)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Set the infomation of instance by this node to the peer node represented
|
||||
func (n *Node) Set(c context.Context, arg *model.ArgSet) (err error) {
|
||||
err = n.setCall(c, arg, n.setURL)
|
||||
return
|
||||
}
|
||||
|
||||
func (n *Node) call(c context.Context, action model.Action, i *model.Instance, uri string, data interface{}) (err error) {
|
||||
params := url.Values{}
|
||||
params.Set("region", i.Region)
|
||||
params.Set("zone", i.Zone)
|
||||
params.Set("env", i.Env)
|
||||
params.Set("treeid", strconv.FormatInt(i.Treeid, 10))
|
||||
params.Set("appid", i.Appid)
|
||||
params.Set("hostname", i.Hostname)
|
||||
if n.otherZone {
|
||||
params.Set("replication", "false")
|
||||
} else {
|
||||
params.Set("replication", "true")
|
||||
}
|
||||
switch action {
|
||||
case model.Register:
|
||||
params.Set("status", strconv.FormatUint(uint64(i.Status), 10))
|
||||
params.Set("version", i.Version)
|
||||
meta, _ := json.Marshal(i.Metadata)
|
||||
params.Set("metadata", string(meta))
|
||||
params.Set("addrs", strings.Join(i.Addrs, ","))
|
||||
params.Set("reg_timestamp", strconv.FormatInt(i.RegTimestamp, 10))
|
||||
params.Set("dirty_timestamp", strconv.FormatInt(i.DirtyTimestamp, 10))
|
||||
params.Set("latest_timestamp", strconv.FormatInt(i.LatestTimestamp, 10))
|
||||
case model.Renew:
|
||||
params.Set("dirty_timestamp", strconv.FormatInt(i.DirtyTimestamp, 10))
|
||||
case model.Cancel:
|
||||
params.Set("latest_timestamp", strconv.FormatInt(i.LatestTimestamp, 10))
|
||||
}
|
||||
var res struct {
|
||||
Code int `json:"code"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
}
|
||||
if err = n.client.Post(c, uri, "", params, &res); err != nil {
|
||||
log.Error("node be called(%s) instance(%v) error(%v)", uri, i, err)
|
||||
return
|
||||
}
|
||||
if res.Code != 0 {
|
||||
log.Error("node be called(%s) instance(%v) responce code(%v)", uri, i, res.Code)
|
||||
if err = ecode.Int(res.Code); err == ecode.Conflict {
|
||||
json.Unmarshal([]byte(res.Data), data)
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (n *Node) setCall(c context.Context, arg *model.ArgSet, uri string) (err error) {
|
||||
params := url.Values{}
|
||||
params.Set("region", arg.Region)
|
||||
params.Set("zone", arg.Zone)
|
||||
params.Set("env", arg.Env)
|
||||
params.Set("appid", arg.Appid)
|
||||
params.Set("hostname", strings.Join(arg.Hostname, ","))
|
||||
params.Set("set_timestamp", strconv.FormatInt(arg.SetTimestamp, 10))
|
||||
params.Set("replication", "true")
|
||||
if len(arg.Status) != 0 {
|
||||
params.Set("status", xstr.JoinInts(arg.Status))
|
||||
}
|
||||
if len(arg.Metadata) != 0 {
|
||||
params.Set("metadata", strings.Join(arg.Metadata, ","))
|
||||
}
|
||||
var res struct {
|
||||
Code int `json:"code"`
|
||||
}
|
||||
if err = n.client.Post(c, uri, "", params, &res); err != nil {
|
||||
log.Error("node be setCalled(%s) appid(%s) env (%s) error(%v)", uri, arg.Appid, arg.Env, err)
|
||||
return
|
||||
}
|
||||
if res.Code != 0 {
|
||||
log.Error("node be setCalled(%s) appid(%s) env (%s) responce code(%v)", uri, arg.Appid, arg.Env, res.Code)
|
||||
}
|
||||
return
|
||||
}
|
111
app/infra/discovery/dao/node_test.go
Normal file
111
app/infra/discovery/dao/node_test.go
Normal file
@ -0,0 +1,111 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dc "go-common/app/infra/discovery/conf"
|
||||
"go-common/app/infra/discovery/model"
|
||||
"go-common/library/ecode"
|
||||
bm "go-common/library/net/http/blademaster"
|
||||
"go-common/library/net/netutil/breaker"
|
||||
xtime "go-common/library/time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
gock "gopkg.in/h2non/gock.v1"
|
||||
)
|
||||
|
||||
func TestCall(t *testing.T) {
|
||||
Convey("test call", t, func() {
|
||||
var res *model.Instance
|
||||
node := newNode(&dc.Config{HTTPClient: &bm.ClientConfig{Breaker: &breaker.Config{Window: xtime.Duration(time.Second),
|
||||
Sleep: xtime.Duration(time.Millisecond * 100),
|
||||
Bucket: 10,
|
||||
Ratio: 0.5,
|
||||
Request: 100}, Timeout: xtime.Duration(time.Second), App: &bm.App{Key: "0c4b8fe3ff35a4b6", Secret: "b370880d1aca7d3a289b9b9a7f4d6812"}}, BM: &dc.HTTPServers{Inner: &bm.ServerConfig{Addr: "127.0.0.1:7171"}}, Nodes: []string{"127.0.0.1:7171"}}, "api.bilibili.co")
|
||||
node.client.SetTransport(gock.DefaultTransport)
|
||||
httpMock("POST", "http://api.bilibili.co/discovery/register").Reply(200).JSON(`{"ts":1514341945,"code":-409,"data":{"region":"shsb","zone":"fuck","appid":"main.arch.account-service","env":"pre","hostname":"cs4sq","http":"","rpc":"0.0.0.0:18888","weight":2}}`)
|
||||
i := model.NewInstance(reg)
|
||||
err := node.call(context.TODO(), model.Register, i, "http://api.bilibili.co/discovery/register", &res)
|
||||
So(err, ShouldResemble, ecode.Conflict)
|
||||
So(res.Appid, ShouldResemble, "main.arch.account-service")
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeCancel(t *testing.T) {
|
||||
Convey("test node renew 409 error", t, func() {
|
||||
i := model.NewInstance(reg)
|
||||
node := newNode(&dc.Config{HTTPClient: &bm.ClientConfig{Breaker: &breaker.Config{Window: xtime.Duration(time.Second),
|
||||
Sleep: xtime.Duration(time.Millisecond * 100),
|
||||
Bucket: 10,
|
||||
Ratio: 0.5,
|
||||
Request: 100}, Timeout: xtime.Duration(time.Second), App: &bm.App{Key: "0c4b8fe3ff35a4b6", Secret: "b370880d1aca7d3a289b9b9a7f4d6812"}}, BM: &dc.HTTPServers{Inner: &bm.ServerConfig{Addr: "127.0.0.1:7171"}}, Nodes: []string{"127.0.0.1:7171"}}, "api.bilibili.co")
|
||||
node.pRegisterURL = "http://127.0.0.1:7171/discovery/register"
|
||||
node.client.SetTransport(gock.DefaultTransport)
|
||||
httpMock("POST", "http://api.bilibili.co/discovery/cancel").Reply(200).JSON(`{"code":0}`)
|
||||
err := node.Cancel(context.TODO(), i)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeRenew(t *testing.T) {
|
||||
Convey("test node renew 409 error", t, func() {
|
||||
i := model.NewInstance(reg)
|
||||
node := newNode(&dc.Config{HTTPClient: &bm.ClientConfig{Breaker: &breaker.Config{Window: xtime.Duration(time.Second),
|
||||
Sleep: xtime.Duration(time.Millisecond * 100),
|
||||
Bucket: 10,
|
||||
Ratio: 0.5,
|
||||
Request: 100}, Timeout: xtime.Duration(time.Second), App: &bm.App{Key: "0c4b8fe3ff35a4b6", Secret: "b370880d1aca7d3a289b9b9a7f4d6812"}}, BM: &dc.HTTPServers{Inner: &bm.ServerConfig{Addr: "127.0.0.1:7171"}}, Nodes: []string{"127.0.0.1:7171"}}, "api.bilibili.co")
|
||||
node.pRegisterURL = "http://127.0.0.1:7171/discovery/register"
|
||||
node.client.SetTransport(gock.DefaultTransport)
|
||||
httpMock("POST", "http://api.bilibili.co/discovery/renew").Reply(200).JSON(`{"code":-409,"data":{"region":"shsb","zone":"fuck","appid":"main.arch.account-service","env":"pre","hostname":"cs4sq","http":"","rpc":"0.0.0.0:18888","weight":2}}`)
|
||||
httpMock("POST", "http://127.0.0.1:7171/discovery/register").Reply(200).JSON(`{"code":0}`)
|
||||
err := node.Renew(context.TODO(), i)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeRenew2(t *testing.T) {
|
||||
Convey("test node renew 404 error", t, func() {
|
||||
i := model.NewInstance(reg)
|
||||
node := newNode(&dc.Config{HTTPClient: &bm.ClientConfig{Breaker: &breaker.Config{Window: xtime.Duration(time.Second),
|
||||
Sleep: xtime.Duration(time.Millisecond * 100),
|
||||
Bucket: 10,
|
||||
Ratio: 0.5,
|
||||
Request: 100}, Timeout: xtime.Duration(time.Second), App: &bm.App{Key: "0c4b8fe3ff35a4b6", Secret: "b370880d1aca7d3a289b9b9a7f4d6812"}}, BM: &dc.HTTPServers{Inner: &bm.ServerConfig{Addr: "127.0.0.1:7171"}}, Nodes: []string{"127.0.0.1:7171"}}, "api.bilibili.co")
|
||||
node.client.SetTransport(gock.DefaultTransport)
|
||||
httpMock("POST", "http://api.bilibili.co/discovery/renew").Reply(200).JSON(`{"code":-404}`)
|
||||
httpMock("POST", "http://api.bilibili.co/discovery/register").Reply(200).JSON(`{"code":0}`)
|
||||
err := node.Renew(context.TODO(), i)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
Convey("test set", t, func() {
|
||||
node := newNode(&dc.Config{HTTPClient: &bm.ClientConfig{Breaker: &breaker.Config{Window: xtime.Duration(time.Second),
|
||||
Sleep: xtime.Duration(time.Millisecond * 100),
|
||||
Bucket: 10,
|
||||
Ratio: 0.5,
|
||||
Request: 100}, Timeout: xtime.Duration(time.Second), App: &bm.App{Key: "0c4b8fe3ff35a4b6", Secret: "b370880d1aca7d3a289b9b9a7f4d6812"}}, BM: &dc.HTTPServers{Inner: &bm.ServerConfig{Addr: "127.0.0.1:7171"}}, Nodes: []string{"127.0.0.1:7171"}}, "api.bilibili.co")
|
||||
node.client.SetTransport(gock.DefaultTransport)
|
||||
httpMock("POST", "http://api.bilibili.co/discovery/set").Reply(200).JSON(`{"ts":1514341945,"code":0}`)
|
||||
set := &model.ArgSet{
|
||||
Region: "shsb",
|
||||
Env: "pre",
|
||||
Appid: "main.arch.account-service",
|
||||
Hostname: []string{"test1"},
|
||||
Status: []int64{1},
|
||||
}
|
||||
err := node.Set(context.TODO(), set)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
}
|
||||
|
||||
func httpMock(method, url string) *gock.Request {
|
||||
r := gock.New(url)
|
||||
r.Method = strings.ToUpper(method)
|
||||
return r
|
||||
}
|
170
app/infra/discovery/dao/nodes.go
Normal file
170
app/infra/discovery/dao/nodes.go
Normal file
@ -0,0 +1,170 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"go-common/app/infra/discovery/conf"
|
||||
"go-common/app/infra/discovery/model"
|
||||
"go-common/library/sync/errgroup"
|
||||
)
|
||||
|
||||
// Nodes is helper to manage lifecycle of a collection of Nodes.
|
||||
type Nodes struct {
|
||||
nodes []*Node
|
||||
zones map[string][]*Node
|
||||
selfAddr string
|
||||
}
|
||||
|
||||
// NewNodes new nodes and return.
|
||||
func NewNodes(c *conf.Config) *Nodes {
|
||||
nodes := make([]*Node, 0, len(c.Nodes))
|
||||
for _, addr := range c.Nodes {
|
||||
n := newNode(c, addr)
|
||||
n.pRegisterURL = fmt.Sprintf("http://%s%s", c.BM.Inner.Addr, _registerURL)
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
zones := make(map[string][]*Node)
|
||||
for name, addrs := range c.Zones {
|
||||
var znodes []*Node
|
||||
for _, addr := range addrs {
|
||||
n := newNode(c, addr)
|
||||
n.otherZone = true
|
||||
n.zone = name
|
||||
n.pRegisterURL = fmt.Sprintf("http://%s%s", c.BM.Inner.Addr, _registerURL)
|
||||
znodes = append(znodes, n)
|
||||
}
|
||||
zones[name] = znodes
|
||||
}
|
||||
return &Nodes{
|
||||
nodes: nodes,
|
||||
zones: zones,
|
||||
selfAddr: c.BM.Inner.Addr,
|
||||
}
|
||||
}
|
||||
|
||||
// Replicate replicate information to all nodes except for this node.
|
||||
func (ns *Nodes) Replicate(c context.Context, action model.Action, i *model.Instance, otherZone bool) (err error) {
|
||||
if len(ns.nodes) == 0 {
|
||||
return
|
||||
}
|
||||
eg, c := errgroup.WithContext(c)
|
||||
for _, n := range ns.nodes {
|
||||
if !ns.Myself(n.addr) {
|
||||
ns.action(c, eg, action, n, i)
|
||||
}
|
||||
}
|
||||
if !otherZone {
|
||||
for _, zns := range ns.zones {
|
||||
if n := len(zns); n > 0 {
|
||||
ns.action(c, eg, action, zns[rand.Intn(n)], i)
|
||||
}
|
||||
}
|
||||
}
|
||||
err = eg.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func (ns *Nodes) action(c context.Context, eg *errgroup.Group, action model.Action, n *Node, i *model.Instance) {
|
||||
switch action {
|
||||
case model.Register:
|
||||
eg.Go(func() error {
|
||||
n.Register(c, i)
|
||||
return nil
|
||||
})
|
||||
case model.Renew:
|
||||
eg.Go(func() error {
|
||||
n.Renew(c, i)
|
||||
return nil
|
||||
})
|
||||
case model.Cancel:
|
||||
eg.Go(func() error {
|
||||
n.Cancel(c, i)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ReplicateSet replicate set information to all nodes except for this node.
|
||||
func (ns *Nodes) ReplicateSet(c context.Context, arg *model.ArgSet, otherZone bool) (err error) {
|
||||
if len(ns.nodes) == 0 {
|
||||
return
|
||||
}
|
||||
eg, c := errgroup.WithContext(c)
|
||||
for _, n := range ns.nodes {
|
||||
if !ns.Myself(n.addr) {
|
||||
eg.Go(func() error {
|
||||
return n.Set(c, arg)
|
||||
})
|
||||
}
|
||||
}
|
||||
if !otherZone {
|
||||
for _, zns := range ns.zones {
|
||||
if n := len(zns); n > 0 {
|
||||
node := zns[rand.Intn(n)]
|
||||
eg.Go(func() error {
|
||||
return node.Set(c, arg)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
err = eg.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
// Nodes returns nodes of local zone.
|
||||
func (ns *Nodes) Nodes() (nsi []*model.Node) {
|
||||
nsi = make([]*model.Node, 0, len(ns.nodes))
|
||||
for _, nd := range ns.nodes {
|
||||
if nd.otherZone {
|
||||
continue
|
||||
}
|
||||
node := &model.Node{
|
||||
Addr: nd.addr,
|
||||
Status: nd.status,
|
||||
Zone: nd.zone,
|
||||
}
|
||||
nsi = append(nsi, node)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllNodes returns nodes contain other zone nodes.
|
||||
func (ns *Nodes) AllNodes() (nsi []*model.Node) {
|
||||
nsi = make([]*model.Node, 0, len(ns.nodes))
|
||||
for _, nd := range ns.nodes {
|
||||
node := &model.Node{
|
||||
Addr: nd.addr,
|
||||
Status: nd.status,
|
||||
Zone: nd.zone,
|
||||
}
|
||||
nsi = append(nsi, node)
|
||||
}
|
||||
for _, zns := range ns.zones {
|
||||
if n := len(zns); n > 0 {
|
||||
nd := zns[rand.Intn(n)]
|
||||
node := &model.Node{
|
||||
Addr: nd.addr,
|
||||
Status: nd.status,
|
||||
Zone: nd.zone,
|
||||
}
|
||||
nsi = append(nsi, node)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Myself returns whether or not myself.
|
||||
func (ns *Nodes) Myself(addr string) bool {
|
||||
return ns.selfAddr == addr
|
||||
}
|
||||
|
||||
// UP marks status of myself node up.
|
||||
func (ns *Nodes) UP() {
|
||||
for _, nd := range ns.nodes {
|
||||
if ns.Myself(nd.addr) {
|
||||
nd.status = model.NodeStatusUP
|
||||
}
|
||||
}
|
||||
}
|
82
app/infra/discovery/dao/nodes_test.go
Normal file
82
app/infra/discovery/dao/nodes_test.go
Normal file
@ -0,0 +1,82 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
dc "go-common/app/infra/discovery/conf"
|
||||
"go-common/app/infra/discovery/model"
|
||||
bm "go-common/library/net/http/blademaster"
|
||||
"go-common/library/net/netutil/breaker"
|
||||
xtime "go-common/library/time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
gock "gopkg.in/h2non/gock.v1"
|
||||
)
|
||||
|
||||
func TestReplicate(t *testing.T) {
|
||||
Convey("test replicate", t, func() {
|
||||
i := model.NewInstance(reg)
|
||||
nodes := NewNodes(&dc.Config{HTTPClient: &bm.ClientConfig{Breaker: &breaker.Config{Window: xtime.Duration(time.Second),
|
||||
Sleep: xtime.Duration(time.Millisecond * 100),
|
||||
Bucket: 10,
|
||||
Ratio: 0.5,
|
||||
Request: 100}, Timeout: xtime.Duration(time.Second), App: &bm.App{Key: "0c4b8fe3ff35a4b6", Secret: "b370880d1aca7d3a289b9b9a7f4d6812"}}, BM: &dc.HTTPServers{Inner: &bm.ServerConfig{Addr: "127.0.0.1:7171"}}, Nodes: []string{"api.bilibili.co", "uat-bilibili.co", "127.0.0.1:7171"}})
|
||||
nodes.nodes[0].client.SetTransport(gock.DefaultTransport)
|
||||
nodes.nodes[1].client.SetTransport(gock.DefaultTransport)
|
||||
httpMock("POST", "http://api.bilibili.co/discovery/register").Reply(200).JSON(`{"code":0}`)
|
||||
httpMock("POST", "http://uat-bilibili.co/discovery/register").Reply(200).JSON(`{"code":0}`)
|
||||
err := nodes.Replicate(context.TODO(), model.Register, i, false)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReplicateSet(t *testing.T) {
|
||||
Convey("test replicate set", t, func() {
|
||||
nodes := NewNodes(&dc.Config{HTTPClient: &bm.ClientConfig{Breaker: &breaker.Config{Window: xtime.Duration(time.Second),
|
||||
Sleep: xtime.Duration(time.Millisecond * 100),
|
||||
Bucket: 10,
|
||||
Ratio: 0.5,
|
||||
Request: 100}, Timeout: xtime.Duration(time.Second), App: &bm.App{Key: "0c4b8fe3ff35a4b6", Secret: "b370880d1aca7d3a289b9b9a7f4d6812"}}, BM: &dc.HTTPServers{Inner: &bm.ServerConfig{Addr: "127.0.0.1:7171"}}, Nodes: []string{"api.bilibili.co"}})
|
||||
nodes.nodes[0].client.SetTransport(gock.DefaultTransport)
|
||||
httpMock("POST", "http://api.bilibili.co/discovery/set").Reply(200).JSON(`{"code":0}`)
|
||||
set := &model.ArgSet{
|
||||
Region: "shsb",
|
||||
Env: "pre",
|
||||
Appid: "main.arch.account-service",
|
||||
Hostname: []string{"test1"},
|
||||
Status: []int64{1},
|
||||
}
|
||||
err := nodes.ReplicateSet(context.TODO(), set, false)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodes(t *testing.T) {
|
||||
Convey("test replicate set", t, func() {
|
||||
nodes := NewNodes(&dc.Config{HTTPClient: &bm.ClientConfig{Breaker: &breaker.Config{Window: xtime.Duration(time.Second),
|
||||
Sleep: xtime.Duration(time.Millisecond * 100),
|
||||
Bucket: 10,
|
||||
Ratio: 0.5,
|
||||
Request: 100}, Timeout: xtime.Duration(time.Second), App: &bm.App{Key: "0c4b8fe3ff35a4b6", Secret: "b370880d1aca7d3a289b9b9a7f4d6812"}}, BM: &dc.HTTPServers{Inner: &bm.ServerConfig{Addr: "127.0.0.1:7171"}}, Nodes: []string{"api.bilibili.co", "uat-bilibili.co", "127.0.0.1:7171"}})
|
||||
res := nodes.Nodes()
|
||||
So(len(res), ShouldResemble, 3)
|
||||
})
|
||||
}
|
||||
|
||||
func TestUp(t *testing.T) {
|
||||
Convey("test up", t, func() {
|
||||
nodes := NewNodes(&dc.Config{HTTPClient: &bm.ClientConfig{Breaker: &breaker.Config{Window: xtime.Duration(time.Second),
|
||||
Sleep: xtime.Duration(time.Millisecond * 100),
|
||||
Bucket: 10,
|
||||
Ratio: 0.5,
|
||||
Request: 100}, Timeout: xtime.Duration(time.Second), App: &bm.App{Key: "0c4b8fe3ff35a4b6", Secret: "b370880d1aca7d3a289b9b9a7f4d6812"}}, BM: &dc.HTTPServers{Inner: &bm.ServerConfig{Addr: "127.0.0.1:7171"}}, Nodes: []string{"api.bilibili.co", "uat-bilibili.co", "127.0.0.1:7171"}})
|
||||
nodes.UP()
|
||||
for _, nd := range nodes.nodes {
|
||||
if nd.addr == "127.0.0.1:7171" {
|
||||
So(nd.status, ShouldResemble, model.NodeStatusUP)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
387
app/infra/discovery/dao/registry.go
Normal file
387
app/infra/discovery/dao/registry.go
Normal file
@ -0,0 +1,387 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go-common/app/infra/discovery/model"
|
||||
"go-common/library/ecode"
|
||||
"go-common/library/log"
|
||||
)
|
||||
|
||||
const (
|
||||
_evictThreshold = int64(90 * time.Second)
|
||||
_evictCeiling = int64(3600 * time.Second)
|
||||
)
|
||||
|
||||
// Registry handles replication of all operations to peer Discovery nodes to keep them all in sync.
|
||||
type Registry struct {
|
||||
appm map[string]*model.Apps // appid-env -> apps
|
||||
aLock sync.RWMutex
|
||||
|
||||
conns map[string]map[string]*conn // zone.env.appid-> host
|
||||
cLock sync.RWMutex
|
||||
|
||||
gd *Guard
|
||||
}
|
||||
|
||||
// conn the poll chan contains consumer.
|
||||
type conn struct {
|
||||
ch chan map[string]*model.InstanceInfo // TODO(felix): increase
|
||||
arg *model.ArgPolls
|
||||
latestTime int64
|
||||
count int
|
||||
}
|
||||
|
||||
// newConn new consumer chan.
|
||||
func newConn(ch chan map[string]*model.InstanceInfo, latestTime int64, arg *model.ArgPolls) *conn {
|
||||
return &conn{ch: ch, latestTime: latestTime, arg: arg, count: 1}
|
||||
}
|
||||
|
||||
// NewRegistry new register.
|
||||
func NewRegistry() (r *Registry) {
|
||||
r = &Registry{
|
||||
appm: make(map[string]*model.Apps),
|
||||
conns: make(map[string]map[string]*conn),
|
||||
gd: new(Guard),
|
||||
}
|
||||
go r.proc()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Registry) newapps(appid, env string) (a *model.Apps, ok bool) {
|
||||
key := appsKey(appid, env)
|
||||
r.aLock.Lock()
|
||||
if a, ok = r.appm[key]; !ok {
|
||||
a = model.NewApps()
|
||||
r.appm[key] = a
|
||||
}
|
||||
r.aLock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Registry) apps(appid, env, zone string) (as []*model.App, a *model.Apps, ok bool) {
|
||||
key := appsKey(appid, env)
|
||||
r.aLock.RLock()
|
||||
a, ok = r.appm[key]
|
||||
r.aLock.RUnlock()
|
||||
if ok {
|
||||
as = a.App(zone)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func appsKey(appid, env string) string {
|
||||
// NOTE disocvery 不区分具体环境
|
||||
if appid == model.AppID {
|
||||
return appid
|
||||
}
|
||||
return fmt.Sprintf("%s-%s", appid, env)
|
||||
}
|
||||
|
||||
func (r *Registry) newApp(ins *model.Instance) (a *model.App) {
|
||||
as, _ := r.newapps(ins.Appid, ins.Env)
|
||||
a, _ = as.NewApp(ins.Zone, ins.Appid, ins.Treeid, ins.LatestTimestamp)
|
||||
return
|
||||
}
|
||||
|
||||
// Register a new instance.
|
||||
func (r *Registry) Register(ins *model.Instance, latestTime int64) (err error) {
|
||||
a := r.newApp(ins)
|
||||
i, ok := a.NewInstance(ins, latestTime)
|
||||
if ok {
|
||||
r.gd.incrExp()
|
||||
}
|
||||
// NOTE: make sure free poll before update appid latest timestamp.
|
||||
r.broadcast(i.Env, i.Appid, a)
|
||||
return
|
||||
}
|
||||
|
||||
// Renew marks the given instance of the given app name as renewed, and also marks whether it originated from replication.
|
||||
func (r *Registry) Renew(arg *model.ArgRenew) (i *model.Instance, ok bool) {
|
||||
a, _, _ := r.apps(arg.Appid, arg.Env, arg.Zone)
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
if i, ok = a[0].Renew(arg.Hostname); !ok {
|
||||
return
|
||||
}
|
||||
r.gd.incrFac()
|
||||
return
|
||||
}
|
||||
|
||||
// Cancel cancels the registration of an instance.
|
||||
func (r *Registry) Cancel(arg *model.ArgCancel) (i *model.Instance, ok bool) {
|
||||
if i, ok = r.cancel(arg.Zone, arg.Env, arg.Appid, arg.Hostname, arg.LatestTimestamp); !ok {
|
||||
return
|
||||
}
|
||||
r.gd.decrExp()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Registry) cancel(zone, env, appid, hostname string, latestTime int64) (i *model.Instance, ok bool) {
|
||||
var l int
|
||||
a, as, _ := r.apps(appid, env, zone)
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
if i, l, ok = a[0].Cancel(hostname, latestTime); !ok {
|
||||
return
|
||||
}
|
||||
as.UpdateLatest(latestTime)
|
||||
if l == 0 {
|
||||
if a[0].Len() == 0 {
|
||||
as.Del(zone)
|
||||
}
|
||||
}
|
||||
if len(as.App("")) == 0 {
|
||||
r.aLock.Lock()
|
||||
delete(r.appm, appsKey(appid, env))
|
||||
r.aLock.Unlock()
|
||||
}
|
||||
r.broadcast(env, appid, a[0]) // NOTE: make sure free poll before update appid latest timestamp.
|
||||
return
|
||||
}
|
||||
|
||||
// FetchAll fetch all instances of all the families.
|
||||
func (r *Registry) FetchAll() (im map[string][]*model.Instance) {
|
||||
ass := r.allapp()
|
||||
im = make(map[string][]*model.Instance)
|
||||
for _, as := range ass {
|
||||
for _, a := range as.App("") {
|
||||
im[a.AppID] = append(im[a.AppID], a.Instances()...)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch fetch all instances by appid.
|
||||
func (r *Registry) Fetch(zone, env, appid string, latestTime int64, status uint32) (info *model.InstanceInfo, err error) {
|
||||
key := appsKey(appid, env)
|
||||
r.aLock.RLock()
|
||||
a, ok := r.appm[key]
|
||||
r.aLock.RUnlock()
|
||||
if !ok {
|
||||
err = ecode.NothingFound
|
||||
return
|
||||
}
|
||||
info, err = a.InstanceInfo(zone, latestTime, status)
|
||||
return
|
||||
}
|
||||
|
||||
// Polls hangs request and then write instances when that has changes, or return NotModified.
|
||||
func (r *Registry) Polls(arg *model.ArgPolls) (ch chan map[string]*model.InstanceInfo, new bool, err error) {
|
||||
var (
|
||||
ins = make(map[string]*model.InstanceInfo, len(arg.Treeid))
|
||||
in *model.InstanceInfo
|
||||
)
|
||||
if len(arg.Appid) != len(arg.LatestTimestamp) {
|
||||
arg.LatestTimestamp = make([]int64, len(arg.Appid))
|
||||
}
|
||||
for i := range arg.Appid {
|
||||
in, err = r.Fetch(arg.Zone, arg.Env, arg.Appid[i], arg.LatestTimestamp[i], model.InstanceStatusUP)
|
||||
if err == ecode.NothingFound {
|
||||
log.Error("Polls region(%s) zone(%s) env(%s) appid(%s) error(%v)", arg.Region, arg.Zone, arg.Env, arg.Appid[i], err)
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
if len(arg.Treeid) != 0 {
|
||||
ins[strconv.FormatInt(arg.Treeid[i], 10)] = in
|
||||
} else {
|
||||
ins[arg.Appid[i]] = in
|
||||
}
|
||||
new = true
|
||||
}
|
||||
}
|
||||
if new {
|
||||
ch = make(chan map[string]*model.InstanceInfo, 1)
|
||||
ch <- ins
|
||||
return
|
||||
}
|
||||
r.cLock.Lock()
|
||||
for i := range arg.Appid {
|
||||
k := appsKey(arg.Appid[i], arg.Env)
|
||||
if _, ok := r.conns[k]; !ok {
|
||||
r.conns[k] = make(map[string]*conn, 1)
|
||||
}
|
||||
connection, ok := r.conns[k][arg.Hostname]
|
||||
if !ok {
|
||||
if ch == nil {
|
||||
ch = make(chan map[string]*model.InstanceInfo, 5) // NOTE: there maybe have more than one connection on the same hostname!!!
|
||||
}
|
||||
connection = newConn(ch, arg.LatestTimestamp[i], arg)
|
||||
log.Info("Polls from(%s) new connection(%d)", arg.Hostname, connection.count)
|
||||
} else {
|
||||
connection.count++ // NOTE: there maybe have more than one connection on the same hostname!!!
|
||||
if ch == nil {
|
||||
ch = connection.ch
|
||||
}
|
||||
log.Info("Polls from(%s) reuse connection(%d)", arg.Hostname, connection.count)
|
||||
}
|
||||
r.conns[k][arg.Hostname] = connection
|
||||
}
|
||||
r.cLock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Polling get polling clients.
|
||||
func (r *Registry) Polling(arg *model.ArgPolling) (resp []string, err error) {
|
||||
r.cLock.RLock()
|
||||
conns, ok := r.conns[appsKey(arg.Appid, arg.Env)]
|
||||
if !ok {
|
||||
r.cLock.RUnlock()
|
||||
return
|
||||
}
|
||||
resp = make([]string, 0, len(conns))
|
||||
for host := range conns {
|
||||
resp = append(resp, host)
|
||||
}
|
||||
r.cLock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// broadcast on poll by chan.
|
||||
// NOTE: make sure free poll before update appid latest timestamp.
|
||||
func (r *Registry) broadcast(env, appid string, a *model.App) {
|
||||
key := appsKey(appid, env)
|
||||
r.cLock.Lock()
|
||||
defer r.cLock.Unlock()
|
||||
conns, ok := r.conns[key]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
delete(r.conns, key)
|
||||
for _, conn := range conns {
|
||||
ii, _ := r.Fetch(conn.arg.Zone, env, appid, 0, model.InstanceStatusUP) // TODO(felix): latesttime!=0 increase
|
||||
var key string
|
||||
if len(conn.arg.Treeid) != 0 {
|
||||
key = strconv.FormatInt(a.Treeid, 10)
|
||||
} else {
|
||||
key = a.AppID
|
||||
}
|
||||
for i := 0; i < conn.count; i++ { // NOTE: there maybe have more than one connection on the same hostname!!!
|
||||
select {
|
||||
case conn.ch <- map[string]*model.InstanceInfo{key: ii}: // NOTE: if chan is full, means no poller.
|
||||
log.Info("broadcast to(%s) success(%d)", conn.arg.Hostname, i+1)
|
||||
case <-time.After(time.Millisecond * 500):
|
||||
log.Info("broadcast to(%s) failed(%d) maybe chan full", conn.arg.Hostname, i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set Set the status of instance by hostnames.
|
||||
func (r *Registry) Set(c context.Context, arg *model.ArgSet) (ok bool) {
|
||||
a, _, _ := r.apps(arg.Appid, arg.Env, arg.Zone)
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
if ok = a[0].Set(arg); !ok {
|
||||
return
|
||||
}
|
||||
r.broadcast(arg.Env, arg.Appid, a[0])
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Registry) allapp() (ass []*model.Apps) {
|
||||
r.aLock.RLock()
|
||||
ass = make([]*model.Apps, 0, len(r.appm))
|
||||
for _, as := range r.appm {
|
||||
ass = append(ass, as)
|
||||
}
|
||||
r.aLock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// reset expect renews, count the renew of all app, one app has two expect remews in minute.
|
||||
func (r *Registry) resetExp() {
|
||||
cnt := int64(0)
|
||||
for _, p := range r.allapp() {
|
||||
for _, a := range p.App("") {
|
||||
cnt += int64(a.Len())
|
||||
}
|
||||
}
|
||||
r.gd.setExp(cnt)
|
||||
}
|
||||
|
||||
func (r *Registry) proc() {
|
||||
tk := time.Tick(1 * time.Minute)
|
||||
tk2 := time.Tick(15 * time.Minute)
|
||||
for {
|
||||
select {
|
||||
case <-tk:
|
||||
r.gd.updateFac()
|
||||
r.evict()
|
||||
case <-tk2:
|
||||
r.resetExp()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Registry) evict() {
|
||||
protect := r.gd.ok()
|
||||
// We collect first all expired items, to evict them in random order. For large eviction sets,
|
||||
// if we do not that, we might wipe out whole apps before self preservation kicks in. By randomizing it,
|
||||
// the impact should be evenly distributed across all applications.
|
||||
var eis []*model.Instance
|
||||
var registrySize int
|
||||
// all projects
|
||||
ass := r.allapp()
|
||||
for _, as := range ass {
|
||||
for _, a := range as.App("") {
|
||||
registrySize += a.Len()
|
||||
is := a.Instances()
|
||||
for _, i := range is {
|
||||
delta := time.Now().UnixNano() - i.RenewTimestamp
|
||||
if (!protect && delta > _evictThreshold) || delta > _evictCeiling {
|
||||
eis = append(eis, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// To compensate for GC pauses or drifting local time, we need to use current registry size as a base for
|
||||
// triggering self-preservation. Without that we would wipe out full registry.
|
||||
eCnt := len(eis)
|
||||
registrySizeThreshold := int(float64(registrySize) * _percentThreshold)
|
||||
evictionLimit := registrySize - registrySizeThreshold
|
||||
if eCnt > evictionLimit {
|
||||
eCnt = evictionLimit
|
||||
}
|
||||
if eCnt == 0 {
|
||||
return
|
||||
}
|
||||
for i := 0; i < eCnt; i++ {
|
||||
// Pick a random item (Knuth shuffle algorithm)
|
||||
next := i + rand.Intn(len(eis)-i)
|
||||
eis[i], eis[next] = eis[next], eis[i]
|
||||
ei := eis[i]
|
||||
r.cancel(ei.Zone, ei.Env, ei.Appid, ei.Hostname, time.Now().UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
// DelConns delete conn of host in appid
|
||||
func (r *Registry) DelConns(arg *model.ArgPolls) {
|
||||
r.cLock.Lock()
|
||||
for i := range arg.Appid {
|
||||
k := appsKey(arg.Appid[i], arg.Env)
|
||||
conns, ok := r.conns[k]
|
||||
if !ok {
|
||||
log.Warn("DelConn key(%s) not found", k)
|
||||
continue
|
||||
}
|
||||
if connection, ok := conns[arg.Hostname]; ok {
|
||||
if connection.count > 1 {
|
||||
log.Info("DelConns from(%s) count decr(%d)", arg.Hostname, connection.count)
|
||||
connection.count--
|
||||
} else {
|
||||
log.Info("DelConns from(%s) delete(%d)", arg.Hostname, connection.count)
|
||||
delete(conns, arg.Hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
r.cLock.Unlock()
|
||||
}
|
454
app/infra/discovery/dao/registry_test.go
Normal file
454
app/infra/discovery/dao/registry_test.go
Normal file
@ -0,0 +1,454 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go-common/app/infra/discovery/model"
|
||||
"go-common/library/ecode"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
var reg = &model.ArgRegister{Appid: "main.arch.test", Hostname: "reg", RPC: "127.0.0.1:8080", Region: "shsb", Zone: "sh0001", Env: "pre", Status: 1}
|
||||
var regH1 = &model.ArgRegister{Appid: "main.arch.test", Hostname: "regH1", RPC: "127.0.0.1:8080", Region: "shsb", Zone: "sh0001", Env: "pre", Status: 1}
|
||||
|
||||
var reg2 = &model.ArgRegister{Appid: "main.arch.test2", Hostname: "reg2", RPC: "127.0.0.1:8080", Region: "shsb", Zone: "sh0001", Env: "pre", Status: 1}
|
||||
|
||||
var arg = &model.ArgRenew{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Hostname: "reg"}
|
||||
var cancel = &model.ArgCancel{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Hostname: "reg"}
|
||||
var cancel2 = &model.ArgCancel{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Hostname: "regH1"}
|
||||
|
||||
func TestReigster(t *testing.T) {
|
||||
i := model.NewInstance(reg)
|
||||
register(t, i)
|
||||
}
|
||||
|
||||
func TestDiscovery(t *testing.T) {
|
||||
i1 := model.NewInstance(reg)
|
||||
i2 := model.NewInstance(regH1)
|
||||
fmt.Println(_evictThreshold)
|
||||
r := register(t, i1, i2)
|
||||
Convey("test discovery", t, func() {
|
||||
pollArg := &model.ArgPolls{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: []string{"main.arch.test"}, Hostname: "test"}
|
||||
fetchArg := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 3}
|
||||
info, err := r.Fetch(fetchArg.Zone, fetchArg.Env, fetchArg.Appid, 0, fetchArg.Status)
|
||||
So(err, ShouldBeNil)
|
||||
So(len(info.Instances), ShouldEqual, 2)
|
||||
ch, _, err := r.Polls(pollArg)
|
||||
So(err, ShouldBeNil)
|
||||
apps := <-ch
|
||||
So(len(apps["main.arch.test"].Instances), ShouldEqual, 2)
|
||||
pollArg.LatestTimestamp[0] = apps["main.arch.test"].LatestTimestamp
|
||||
fmt.Println(apps["main.arch.test"])
|
||||
r.Cancel(cancel)
|
||||
ch, _, err = r.Polls(pollArg)
|
||||
So(err, ShouldBeNil)
|
||||
apps = <-ch
|
||||
So(len(apps["main.arch.test"].Instances), ShouldEqual, 1)
|
||||
pollArg.LatestTimestamp[0] = apps["main.arch.test"].LatestTimestamp
|
||||
r.Cancel(cancel2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRenew(t *testing.T) {
|
||||
src := model.NewInstance(reg)
|
||||
r := register(t, src)
|
||||
Convey("test renew", t, func() {
|
||||
i, ok := r.Renew(arg)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(i, ShouldResemble, src)
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkRenew(b *testing.B) {
|
||||
var (
|
||||
i *model.Instance
|
||||
ok bool
|
||||
)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
r, src := benchRegister(b)
|
||||
if i, ok = r.Renew(arg); !ok {
|
||||
b.Errorf("Renew(%v)", src.Appid)
|
||||
}
|
||||
benchCompareInstance(b, src, i)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestCancel(t *testing.T) {
|
||||
src := model.NewInstance(reg)
|
||||
r := register(t, src)
|
||||
Convey("test cancel", t, func() {
|
||||
i, ok := r.Cancel(cancel)
|
||||
So(ok, ShouldBeTrue)
|
||||
So(i, ShouldResemble, src)
|
||||
fetchArg := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 3}
|
||||
_, err := r.Fetch(fetchArg.Zone, fetchArg.Env, fetchArg.Appid, 0, fetchArg.Status)
|
||||
So(err, ShouldResemble, ecode.NothingFound)
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCancel(b *testing.B) {
|
||||
var (
|
||||
i *model.Instance
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
r, src := benchRegister(b)
|
||||
if i, ok = r.Cancel(cancel); !ok {
|
||||
b.Errorf("Cancel(%v) error", src.Appid)
|
||||
}
|
||||
benchCompareInstance(b, src, i)
|
||||
fetchArg := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 3}
|
||||
if _, err = r.Fetch(fetchArg.Zone, fetchArg.Env, fetchArg.Appid, 0, fetchArg.Status); err != ecode.NothingFound {
|
||||
b.Errorf("Fetch(%v) error(%v)", src.Appid, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFetchAll(t *testing.T) {
|
||||
i := model.NewInstance(reg)
|
||||
r := register(t, i)
|
||||
Convey("test fetch all", t, func() {
|
||||
am := r.FetchAll()
|
||||
So(len(am), ShouldResemble, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkFetchAll(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
r, _ := benchRegister(b)
|
||||
if am := r.FetchAll(); len(am) != 1 {
|
||||
b.Errorf("FetchAll() error")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestFetch(t *testing.T) {
|
||||
i := model.NewInstance(reg)
|
||||
r := register(t, i)
|
||||
Convey("test fetch", t, func() {
|
||||
fetchArg2 := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 1}
|
||||
c, err := r.Fetch(fetchArg2.Zone, fetchArg2.Env, fetchArg2.Appid, 0, fetchArg2.Status)
|
||||
So(err, ShouldBeNil)
|
||||
So(len(c.Instances), ShouldResemble, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkFetch(b *testing.B) {
|
||||
var (
|
||||
err error
|
||||
c *model.InstanceInfo
|
||||
)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
r, _ := benchRegister(b)
|
||||
fetchArg := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 1}
|
||||
if c, err = r.Fetch(fetchArg.Zone, fetchArg.Env, fetchArg.Appid, 0, fetchArg.Status); err != nil {
|
||||
b.Errorf("Fetch(%v) error(%v)", arg.Appid, err)
|
||||
}
|
||||
fetchArg2 := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 2}
|
||||
if c, err = r.Fetch(fetchArg2.Zone, fetchArg2.Env, fetchArg2.Appid, 0, fetchArg2.Status); err != nil {
|
||||
b.Errorf("Fetch(%v) error(%v)", arg.Appid, err)
|
||||
}
|
||||
_ = c
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestPoll(t *testing.T) {
|
||||
i := model.NewInstance(reg)
|
||||
r := register(t, i)
|
||||
Convey("test poll", t, func() {
|
||||
pollArg := &model.ArgPolls{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: []string{"main.arch.test"}, Hostname: "csq"}
|
||||
ch, _, err := r.Polls(pollArg)
|
||||
So(err, ShouldBeNil)
|
||||
c := <-ch
|
||||
So(len(c[pollArg.Appid[0]].Instances), ShouldEqual, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPolls(t *testing.T) {
|
||||
i1 := model.NewInstance(reg)
|
||||
i2 := model.NewInstance(reg2)
|
||||
r := register(t, i1, i2)
|
||||
Convey("test polls", t, func() {
|
||||
pollArg := &model.ArgPolls{Region: "shsb", Zone: "sh0001", Env: "pre", LatestTimestamp: []int64{0, 0}, Appid: []string{"main.arch.test", "main.arch.test2"}, Hostname: "csq"}
|
||||
ch, new, err := r.Polls(pollArg)
|
||||
So(err, ShouldBeNil)
|
||||
So(new, ShouldBeTrue)
|
||||
c := <-ch
|
||||
So(len(c), ShouldResemble, 2)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPollsParallel(t *testing.T) {
|
||||
|
||||
i1 := model.NewInstance(reg)
|
||||
i2 := model.NewInstance(reg2)
|
||||
r := register(t, i1, i2)
|
||||
|
||||
Convey("test polls parallel", t, func(c C) {
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
ch1, ch2 chan map[string]*model.InstanceInfo
|
||||
new bool
|
||||
err error
|
||||
)
|
||||
pollArg := &model.ArgPolls{Region: "shsb", Zone: "sh0001", Env: "pre", LatestTimestamp: []int64{time.Now().UnixNano(), time.Now().UnixNano()}, Appid: []string{"main.arch.test", "main.arch.test2"}, Hostname: "csq"}
|
||||
ch1, new, err = r.Polls(pollArg)
|
||||
c.So(err, ShouldEqual, ecode.NotModified)
|
||||
c.So(new, ShouldBeFalse)
|
||||
c.So(ch1, ShouldNotBeNil)
|
||||
ch2, new, err = r.Polls(pollArg)
|
||||
c.So(err, ShouldEqual, ecode.NotModified)
|
||||
c.So(new, ShouldBeFalse)
|
||||
c.So(ch2, ShouldNotBeNil)
|
||||
// wait group
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
res := <-ch1
|
||||
c.So(len(res), ShouldResemble, 1)
|
||||
ress, _ := json.Marshal(res)
|
||||
fmt.Println("chenggongle 1!!!", string(ress))
|
||||
wg.Done()
|
||||
}()
|
||||
go func() {
|
||||
res := <-ch2
|
||||
c.So(len(res), ShouldResemble, 1)
|
||||
ress, _ := json.Marshal(res)
|
||||
fmt.Println("chenggongle 2!!!", string(ress))
|
||||
wg.Done()
|
||||
}()
|
||||
// re register when 1s later, make sure latest_timestamp changed
|
||||
time.Sleep(time.Second)
|
||||
h1 := model.NewInstance(regH1)
|
||||
r.Register(h1, 0)
|
||||
// wait
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkPoll(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var (
|
||||
err error
|
||||
ch chan map[string]*model.InstanceInfo
|
||||
c map[string]*model.InstanceInfo
|
||||
)
|
||||
r, _ := benchRegister(b)
|
||||
pollArg := &model.ArgPolls{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: []string{"main.arch.test"}, Hostname: "csq"}
|
||||
if ch, _, err = r.Polls(pollArg); err != nil {
|
||||
b.Errorf("Poll(%v) error(%v)", arg.Appid, err)
|
||||
}
|
||||
if c = <-ch; len(c[pollArg.Appid[0]].Instances) != 1 {
|
||||
b.Errorf("Poll(%v) lenth error", arg.Appid)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestBroadcast(t *testing.T) {
|
||||
i := model.NewInstance(reg)
|
||||
r := register(t, i)
|
||||
Convey("test poll push connection", t, func() {
|
||||
go func() {
|
||||
Convey("must poll ahead of time", t, func() {
|
||||
time.Sleep(time.Microsecond * 5)
|
||||
var arg2 = &model.ArgRegister{Appid: "main.arch.test", Hostname: "go", RPC: "127.0.0.1:8080", Region: "shsb", Zone: "sh0001", Env: "pre", Status: 1}
|
||||
m2 := model.NewInstance(arg2)
|
||||
err2 := r.Register(m2, 0)
|
||||
So(err2, ShouldBeNil)
|
||||
})
|
||||
}()
|
||||
pollArg := &model.ArgPolls{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: []string{"main.arch.test"}, LatestTimestamp: []int64{time.Now().UnixNano()}}
|
||||
ch, _, err := r.Polls(pollArg)
|
||||
So(err, ShouldResemble, ecode.NotModified)
|
||||
c := <-ch
|
||||
So(len(c[pollArg.Appid[0]].Instances), ShouldResemble, 2)
|
||||
So(c[pollArg.Appid[0]].ZoneInstances, ShouldNotBeNil)
|
||||
So(len(c[pollArg.Appid[0]].ZoneInstances["sh0001"]), ShouldResemble, 2)
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkBroadcast(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
var (
|
||||
err error
|
||||
err2 error
|
||||
ch chan map[string]*model.InstanceInfo
|
||||
c map[string]*model.InstanceInfo
|
||||
)
|
||||
r, _ := benchRegister(b)
|
||||
go func() {
|
||||
time.Sleep(time.Millisecond * 1)
|
||||
var arg2 = &model.ArgRegister{Appid: "main.arch.test", Hostname: "go", RPC: "127.0.0.1:8080", Region: "shsb", Zone: "sh0001", Env: "pre", Status: 1}
|
||||
m2 := model.NewInstance(arg2)
|
||||
if err2 = r.Register(m2, 0); err2 != nil {
|
||||
b.Errorf("Reigster(%v) error(%v)", m2.Appid, err2)
|
||||
}
|
||||
}()
|
||||
pollArg := &model.ArgPolls{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: []string{"main.arch.test"}, LatestTimestamp: []int64{time.Now().UnixNano()}}
|
||||
if ch, _, err = r.Polls(pollArg); err != nil && err != ecode.NotModified {
|
||||
b.Errorf("Poll(%v) error(%v)", pollArg.Appid, err)
|
||||
}
|
||||
c = <-ch
|
||||
if len(c[pollArg.Appid[0]].Instances) != 2 {
|
||||
b.Errorf("Poll(%v) length error", pollArg.Appid)
|
||||
}
|
||||
if c[pollArg.Appid[0]].ZoneInstances == nil {
|
||||
b.Errorf("Poll(%v) zone instances nil error", pollArg.Appid)
|
||||
}
|
||||
if len(c[pollArg.Appid[0]].ZoneInstances["sh0001"]) != 2 {
|
||||
b.Errorf("Poll(%v) zone instances length error", pollArg.Appid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegistrySet(t *testing.T) {
|
||||
i := model.NewInstance(reg)
|
||||
r := register(t, i)
|
||||
changes := make(map[string]string)
|
||||
changes["reg"] = "1"
|
||||
Convey("test set weight to 1", t, func() {
|
||||
set := &model.ArgSet{
|
||||
Region: "shsb",
|
||||
Env: "pre",
|
||||
Appid: "main.arch.test",
|
||||
Hostname: []string{"reg"},
|
||||
Metadata: []string{`{"weight":"1"}`},
|
||||
}
|
||||
ok := r.Set(context.TODO(), set)
|
||||
So(ok, ShouldBeTrue)
|
||||
fetchArg := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 3}
|
||||
c, err := r.Fetch(fetchArg.Zone, fetchArg.Env, fetchArg.Appid, 0, fetchArg.Status)
|
||||
So(err, ShouldBeNil)
|
||||
So(c.Instances[0].Metadata["weight"], ShouldResemble, "1")
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkSet(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
var (
|
||||
c *model.InstanceInfo
|
||||
err error
|
||||
ok bool
|
||||
)
|
||||
r, _ := benchRegister(b)
|
||||
set := &model.ArgSet{
|
||||
Region: "shsb",
|
||||
Env: "pre",
|
||||
Appid: "main.arch.account-service",
|
||||
Hostname: []string{"test1"},
|
||||
Status: []int64{1},
|
||||
Metadata: []string{`{"weight":"1"}`},
|
||||
}
|
||||
if ok = r.Set(context.TODO(), set); !ok {
|
||||
b.Errorf("SetWeight(%v) error", arg.Appid)
|
||||
}
|
||||
fetchArg := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 3}
|
||||
if c, err = r.Fetch(fetchArg.Zone, fetchArg.Env, fetchArg.Appid, 0, fetchArg.Status); err != nil {
|
||||
b.Errorf("Fetch(%v) error(%v)", fetchArg.Appid, err)
|
||||
}
|
||||
if c.Instances[0].Metadata["weight"] != "1" {
|
||||
b.Errorf("SetWeight(%v) change error", fetchArg.Appid)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestResetExp(t *testing.T) {
|
||||
i := model.NewInstance(reg)
|
||||
r := register(t, i)
|
||||
Convey("test ResetExp", t, func() {
|
||||
r.resetExp()
|
||||
So(r.gd.expPerMin, ShouldResemble, int64(2))
|
||||
})
|
||||
}
|
||||
|
||||
func benchCompareInstance(b *testing.B, src *model.Instance, i *model.Instance) {
|
||||
if src.Appid != i.Appid || src.Env != i.Env || src.Hostname != i.Hostname ||
|
||||
src.Region != i.Region {
|
||||
b.Errorf("instance compare error")
|
||||
}
|
||||
}
|
||||
|
||||
func register(t *testing.T, is ...*model.Instance) (r *Registry) {
|
||||
Convey("test register", t, func() {
|
||||
r = NewRegistry()
|
||||
var num int
|
||||
for _, i := range is {
|
||||
err := r.Register(i, 0)
|
||||
So(err, ShouldBeNil)
|
||||
if i.Appid == "main.arch.test" {
|
||||
num++
|
||||
}
|
||||
}
|
||||
fetchArg := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 3}
|
||||
instancesInfo, err := r.Fetch(fetchArg.Zone, fetchArg.Env, fetchArg.Appid, 0, fetchArg.Status)
|
||||
So(err, ShouldBeNil)
|
||||
So(len(instancesInfo.Instances), ShouldResemble, num)
|
||||
})
|
||||
return r
|
||||
}
|
||||
|
||||
func benchRegister(b *testing.B) (r *Registry, i *model.Instance) {
|
||||
r = NewRegistry()
|
||||
i = model.NewInstance(reg)
|
||||
if err := r.Register(i, 0); err != nil {
|
||||
b.Errorf("Reigster(%v) error(%v)", i.Appid, err)
|
||||
}
|
||||
return r, i
|
||||
}
|
||||
|
||||
func TestEvict(t *testing.T) {
|
||||
Convey("test evict for protect", t, func() {
|
||||
r := NewRegistry()
|
||||
m := model.NewInstance(reg)
|
||||
// promise the renewtime of instance is expire
|
||||
m.RenewTimestamp -= 100
|
||||
err := r.Register(m, 0)
|
||||
So(err, ShouldBeNil)
|
||||
// move up the statistics of heartbeat for evict
|
||||
r.gd.facLastMin = r.gd.facInMin
|
||||
r.evict()
|
||||
fetchArg := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 3}
|
||||
c, err := r.Fetch(fetchArg.Zone, fetchArg.Env, fetchArg.Appid, 0, fetchArg.Status)
|
||||
So(err, ShouldBeNil)
|
||||
// protect
|
||||
So(len(c.Instances), ShouldResemble, 1)
|
||||
})
|
||||
}
|
||||
|
||||
func TestEvict2(t *testing.T) {
|
||||
Convey("test evict for cancel", t, func() {
|
||||
r := NewRegistry()
|
||||
m := model.NewInstance(reg)
|
||||
err := r.Register(m, 0)
|
||||
So(err, ShouldBeNil)
|
||||
_, ok := r.Renew(arg)
|
||||
So(ok, ShouldBeTrue)
|
||||
// promise the renewtime of instance is expire
|
||||
m.RenewTimestamp -= int64(time.Second * 100)
|
||||
r.Register(m, 0)
|
||||
// move up the statistics of heartbeat for evict
|
||||
r.gd.facLastMin = r.gd.facInMin
|
||||
r.evict()
|
||||
fetchArg := &model.ArgFetch{Region: "shsb", Zone: "sh0001", Env: "pre", Appid: "main.arch.test", Status: 1}
|
||||
_, err = r.Fetch(fetchArg.Zone, fetchArg.Env, fetchArg.Appid, 0, fetchArg.Status)
|
||||
So(err, ShouldResemble, ecode.NothingFound)
|
||||
})
|
||||
}
|
Reference in New Issue
Block a user