Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["wrr_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//library/net/metadata:go_default_library",
"//library/net/rpc/warden/metadata:go_default_library",
"@org_golang_google_grpc//balancer:go_default_library",
"@org_golang_google_grpc//resolver:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["wrr.go"],
importpath = "go-common/library/net/rpc/warden/balancer/wrr",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/log:go_default_library",
"//library/net/metadata:go_default_library",
"//library/net/rpc/warden/metadata:go_default_library",
"//library/stat/summary:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//balancer:go_default_library",
"@org_golang_google_grpc//balancer/base:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//metadata:go_default_library",
"@org_golang_google_grpc//resolver:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/net/rpc/warden/balancer/wrr/test:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,14 @@
### business/warden/balancer/wrr
##### Version 1.2.1
1. 删除了netflix ribbon的权重算法改成了平方根算法
##### Version 1.2.0
1. 实现了动态计算的调度轮询算法(使用了服务端的成功率数据,替换基于本地计算的成功率数据)
##### Version 1.1.0
1. 实现了动态计算的调度轮询算法
##### Version 1.0.0
1. 实现了带权重可以识别Color的轮询算法

View File

@@ -0,0 +1,8 @@
# Owner
caoguoliang
# Author
caoguoliang
# Reviewer
maojian

View File

@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- caoguoliang
reviewers:
- caoguoliang
- maojian

View File

@@ -0,0 +1,13 @@
#### business/warden/balancer/wrr
##### 项目简介
warden 的 weighted round robin负载均衡模块主要用于为每个RPC请求返回一个Server节点以供调用
##### 编译环境
- **请只用 Golang v1.9.x 以上版本编译执行**
##### 依赖包
- [grpc](google.golang.org/grpc)

View File

@@ -0,0 +1,51 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["base_test.go"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//library/conf/env:go_default_library",
"//library/naming:go_default_library",
"//library/net/netutil/breaker:go_default_library",
"//library/net/rpc/warden:go_default_library",
"//library/net/rpc/warden/balancer/wrr:go_default_library",
"//library/net/rpc/warden/proto/testproto:go_default_library",
"//library/net/rpc/warden/resolver:go_default_library",
"//library/time:go_default_library",
"@org_golang_google_grpc//:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/net/rpc/warden/balancer/wrr/test/client:all-srcs",
"//library/net/rpc/warden/balancer/wrr/test/server:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_library(
name = "go_default_library",
srcs = ["base.go"],
importpath = "go-common/library/net/rpc/warden/balancer/wrr/test",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1 @@
package test

View File

@@ -0,0 +1,298 @@
package test
import (
"context"
"io"
"log"
"os"
"sync"
"testing"
"time"
"go-common/library/conf/env"
"go-common/library/naming"
"go-common/library/net/netutil/breaker"
"go-common/library/net/rpc/warden"
"go-common/library/net/rpc/warden/balancer/wrr"
pb "go-common/library/net/rpc/warden/proto/testproto"
"go-common/library/net/rpc/warden/resolver"
xtime "go-common/library/time"
"google.golang.org/grpc"
)
type testBuilder struct {
addrs []*naming.Instance
}
type testDiscovery struct {
mu sync.Mutex
b *testBuilder
id string
ch chan struct{}
}
func (b *testBuilder) Build(id string) naming.Resolver {
return &testDiscovery{id: id, b: b}
}
func (b *testBuilder) Scheme() string {
return "testbuilder"
}
func (d *testDiscovery) Fetch(ctx context.Context) (map[string][]*naming.Instance, bool) {
d.mu.Lock()
addrs := d.b.addrs
d.mu.Unlock()
if len(addrs) == 0 {
return nil, false
}
return map[string][]*naming.Instance{env.Zone: addrs}, true
}
func (d *testDiscovery) Watch() <-chan struct{} {
d.mu.Lock()
defer d.mu.Unlock()
if d.ch == nil {
d.ch = make(chan struct{}, 1)
}
return d.ch
}
func (d *testDiscovery) Close() error {
return nil
}
func (d *testDiscovery) Scheme() string {
return "discovery"
}
func (d *testDiscovery) set(addrs []*naming.Instance) {
d.mu.Lock()
defer d.mu.Unlock()
d.b.addrs = addrs
select {
case d.ch <- struct{}{}:
default:
return
}
}
func TestMain(m *testing.M) {
s1 := runServer(":18080")
s2 := runServer(":18081")
s3 := runServer(":18082")
b = &testBuilder{}
resolver.Register(b)
dis = b.Build("test_app").(*testDiscovery)
go func() {
time.Sleep(time.Millisecond * 10)
dis.set([]*naming.Instance{{
Addrs: []string{"grpc://127.0.0.1:18080"},
AppID: "test_app",
Metadata: map[string]string{"weight": "100"},
}, {
Addrs: []string{"grpc://127.0.0.1:18081"},
AppID: "test_app",
Metadata: map[string]string{"color": "red"},
}, {
Addrs: []string{"grpc://127.0.0.1:18082"},
AppID: "test_app",
}})
}()
c = newClient()
time.Sleep(time.Millisecond * 30)
ret := m.Run()
ctx, cancel := context.WithTimeout(context.Background(), time.Second*1)
defer cancel()
s1.Shutdown(ctx)
s2.Shutdown(ctx)
s3.Shutdown(ctx)
os.Exit(ret)
}
type helloServer struct {
addr string
}
func (s *helloServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) {
return &pb.HelloReply{Message: s.addr}, nil
}
func (s *helloServer) StreamHello(ss pb.Greeter_StreamHelloServer) error {
for i := 0; i < 3; i++ {
in, err := ss.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
ret := &pb.HelloReply{Message: "Hello " + in.Name, Success: true}
err = ss.Send(ret)
if err != nil {
return err
}
}
return nil
}
func runServer(addr string) *warden.Server {
server := warden.NewServer(&warden.ServerConfig{Timeout: xtime.Duration(time.Second)})
pb.RegisterGreeterServer(server.Server(), &helloServer{addr: addr})
go func() {
err := server.Run(addr)
if err != nil {
panic("run server failed!" + err.Error())
}
}()
return server
}
// NewClient returns a new blank Client instance with a default client interceptor.
// opt can be used to add grpc dial options.
func newClient() (client pb.GreeterClient) {
c := warden.NewClient(&warden.ClientConfig{
Dial: xtime.Duration(time.Second * 10),
Timeout: xtime.Duration(time.Second * 10),
Breaker: &breaker.Config{
Window: xtime.Duration(3 * time.Second),
Sleep: xtime.Duration(3 * time.Second),
Bucket: 10,
Ratio: 0.3,
Request: 20,
},
},
grpc.WithBalancerName(wrr.Name),
)
conn, err := c.Dial(context.Background(), "discovery://authority/111")
if err != nil {
log.Fatalf("can't not connect: %v", err)
}
client = pb.NewGreeterClient(conn)
return
}
var b *testBuilder
var dis *testDiscovery
var c pb.GreeterClient
func TestBalancer(t *testing.T) {
testBalancerBasic(t)
testBalancerFailover(t)
testBalancerUpdateColor(t)
testBalancerUpdateScore(t)
}
func testBalancerBasic(t *testing.T) {
time.Sleep(time.Millisecond * 10)
var idx8080 int
var idx8082 int
for i := 0; i < 6; i++ {
resp, err := c.SayHello(context.Background(), &pb.HelloRequest{Age: 123, Name: "asdasd"})
if err != nil {
t.Fatalf("testBalancerBasic: say hello failed!err:=%v", err)
}
if resp.Message == ":18082" {
idx8082++
} else if resp.Message == ":18080" {
idx8080++
}
}
if idx8080 != 3 {
t.Fatalf("testBalancerBasic: server 18080 response times should be 3")
}
if idx8082 != 3 {
t.Fatalf("testBalancerBasic: server 18082 response times should be 3")
}
}
func testBalancerFailover(t *testing.T) {
dis.set([]*naming.Instance{{
Addrs: []string{"grpc://127.0.0.1:18080"},
AppID: "test_app",
Metadata: map[string]string{"weight": "100"},
}, {
Addrs: []string{"grpc://127.0.0.1:18081"},
AppID: "test_app",
Metadata: map[string]string{"color": "red"},
}})
time.Sleep(time.Millisecond * 20)
var idx8080 int
var idx8082 int
for i := 0; i < 4; i++ {
resp, err := c.SayHello(context.Background(), &pb.HelloRequest{Age: 123, Name: "asdasd"})
if err != nil {
t.Fatalf("testBalancerFailover: say hello failed!err:=%v", err)
}
if resp.Message == ":18082" {
idx8082++
} else if resp.Message == ":18080" {
idx8080++
}
}
if idx8080 != 4 {
t.Fatalf("testBalancerFailover: server 8080 response should be 4")
}
}
func testBalancerUpdateColor(t *testing.T) {
dis.set([]*naming.Instance{{
Addrs: []string{"grpc://127.0.0.1:18080"},
AppID: "test_app",
Metadata: map[string]string{"weight": "100"},
}, {
Addrs: []string{"grpc://127.0.0.1:18081"},
AppID: "test_app",
}})
time.Sleep(time.Millisecond * 30)
var idx8080 int
var idx8081 int
for i := 0; i < 4; i++ {
resp, err := c.SayHello(context.Background(), &pb.HelloRequest{Age: 123, Name: "asdasd"})
if err != nil {
t.Fatalf("testBalancerUpdateColor: say hello failed!err:=%v", err)
}
if resp.Message == ":18081" {
idx8081++
} else if resp.Message == ":18080" {
idx8080++
}
}
if idx8080 != 2 {
t.Fatalf("testBalancerUpdateColor: server 8080 response should be 2")
}
if idx8081 != 2 {
t.Fatalf("testBalancerUpdateColor: server 8081 response should be 2")
}
}
func testBalancerUpdateScore(t *testing.T) {
dis.set([]*naming.Instance{{
Addrs: []string{"grpc://127.0.0.1:18080"},
AppID: "test_app",
Metadata: map[string]string{"weight": "100"},
}, {
Addrs: []string{"grpc://127.0.0.1:18081"},
AppID: "test_app",
Metadata: map[string]string{"weight": "300"},
}})
time.Sleep(time.Millisecond * 10)
var idx8080 int
var idx8081 int
for i := 0; i < 4; i++ {
resp, err := c.SayHello(context.Background(), &pb.HelloRequest{Age: 123, Name: "asdasd"})
if err != nil {
t.Fatalf("testBalancerUpdateScore: say hello failed!err:=%v", err)
}
if resp.Message == ":18081" {
idx8081++
} else if resp.Message == ":18080" {
idx8080++
}
}
if idx8080 != 1 {
t.Fatalf("testBalancerUpdateScore: server 8080 response should be 2")
}
if idx8081 != 3 {
t.Fatalf("testBalancerUpdateScore: server 8081 response should be 2")
}
}

View File

@@ -0,0 +1,43 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "client",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["client.go"],
importpath = "go-common/library/net/rpc/warden/balancer/wrr/test/client",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/exp/feature:go_default_library",
"//library/log:go_default_library",
"//library/net/rpc/warden:go_default_library",
"//library/net/rpc/warden/proto/testproto:go_default_library",
"//library/net/rpc/warden/resolver:go_default_library",
"//library/net/rpc/warden/resolver/direct:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,73 @@
package main
import (
"context"
"flag"
"fmt"
"sync/atomic"
"time"
"go-common/library/exp/feature"
"go-common/library/log"
"go-common/library/net/rpc/warden"
pb "go-common/library/net/rpc/warden/proto/testproto"
"go-common/library/net/rpc/warden/resolver"
"go-common/library/net/rpc/warden/resolver/direct"
)
var addrs string
var cli pb.GreeterClient
var concurrency int
var name string
var req int64
var qps int64
func init() {
log.Init(&log.Config{Stdout: false})
flag.StringVar(&addrs, "addr", "127.0.0.1:8000,127.0.0.1:8001", "-addr 127.0.0.1:8080,127.0.0.1:8081")
flag.IntVar(&concurrency, "c", 3, "-c 5")
flag.StringVar(&name, "name", "test", "-name test")
}
func main() {
go calcuQPS()
feature.DefaultGate.AddFlag(flag.CommandLine)
flag.Parse()
feature.DefaultGate.SetFromMap(map[string]bool{"dwrr": true})
resolver.Register(direct.New())
c := warden.NewClient(nil)
conn, err := c.Dial(context.Background(), fmt.Sprintf("direct://d/%s", addrs))
if err != nil {
panic(err)
}
cli = pb.NewGreeterClient(conn)
for i := 0; i < concurrency; i++ {
go func() {
for {
say()
time.Sleep(time.Millisecond * 5)
}
}()
}
time.Sleep(time.Hour)
}
func calcuQPS() {
var creq, breq int64
for {
time.Sleep(time.Second * 5)
creq = atomic.LoadInt64(&req)
delta := creq - breq
atomic.StoreInt64(&qps, delta/5)
breq = creq
fmt.Println("HTTP QPS: ", atomic.LoadInt64(&qps))
}
}
func say() {
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
reply, err := cli.SayHello(ctx, &pb.HelloRequest{Name: name, Age: 10})
if err == nil && reply.Success {
atomic.AddInt64(&req, 1)
}
}

View File

@@ -0,0 +1,41 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
go_binary(
name = "server",
embed = [":go_default_library"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["server.go"],
importpath = "go-common/library/net/rpc/warden/balancer/wrr/test/server",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//library/ecode:go_default_library",
"//library/log:go_default_library",
"//library/net/rpc/warden:go_default_library",
"//library/net/rpc/warden/proto/testproto:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,91 @@
package main
import (
"context"
"flag"
"fmt"
"hash/crc32"
"io"
"math/rand"
"sync/atomic"
"time"
"go-common/library/ecode"
"go-common/library/log"
"go-common/library/net/rpc/warden"
pb "go-common/library/net/rpc/warden/proto/testproto"
)
var (
req int64
qps int64
cpu int
errRate int
sleep time.Duration
)
func init() {
log.Init(&log.Config{Stdout: false})
flag.IntVar(&cpu, "cpu", 3000, "cpu time")
flag.IntVar(&errRate, "err", 0, "error rate")
flag.DurationVar(&sleep, "sleep", 0, "sleep time")
}
func calcuQPS() {
var creq, breq int64
for {
time.Sleep(time.Second * 5)
creq = atomic.LoadInt64(&req)
delta := creq - breq
atomic.StoreInt64(&qps, delta/5)
breq = creq
fmt.Println("HTTP QPS: ", atomic.LoadInt64(&qps))
}
}
func main() {
flag.Parse()
server := warden.NewServer(nil)
pb.RegisterGreeterServer(server.Server(), &helloServer{})
_, err := server.Start()
if err != nil {
panic(err)
}
calcuQPS()
}
type helloServer struct {
}
func (s *helloServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) {
atomic.AddInt64(&req, 1)
if in.Name == "err" {
if rand.Intn(100) < errRate {
return nil, ecode.ServiceUnavailable
}
}
time.Sleep(time.Millisecond * time.Duration(in.Age))
time.Sleep(sleep)
for i := 0; i < cpu+rand.Intn(cpu); i++ {
crc32.Checksum([]byte(`testasdwfwfsddsfgwddcscschttp://git.bilibili.co/platform/go-common/merge_requests/new?merge_request%5Bsource_branch%5D=stress%2Fcodel`), crc32.IEEETable)
}
return &pb.HelloReply{Message: "Hello " + in.Name, Success: true}, nil
}
func (s *helloServer) StreamHello(ss pb.Greeter_StreamHelloServer) error {
for i := 0; i < 3; i++ {
in, err := ss.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
ret := &pb.HelloReply{Message: "Hello " + in.Name, Success: true}
err = ss.Send(ret)
if err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,276 @@
package wrr
import (
"context"
"math"
"strconv"
"sync"
"sync/atomic"
"time"
"go-common/library/log"
nmd "go-common/library/net/metadata"
wmeta "go-common/library/net/rpc/warden/metadata"
"go-common/library/stat/summary"
"google.golang.org/grpc"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/status"
)
var _ base.PickerBuilder = &wrrPickerBuilder{}
var _ balancer.Picker = &wrrPicker{}
// var dwrrFeature feature.Feature = "dwrr"
// Name is the name of round_robin balancer.
const Name = "wrr"
// newBuilder creates a new weighted-roundrobin balancer builder.
func newBuilder() balancer.Builder {
return base.NewBalancerBuilder(Name, &wrrPickerBuilder{})
}
func init() {
//feature.DefaultGate.Add(map[feature.Feature]feature.Spec{
// dwrrFeature: {Default: false},
//})
balancer.Register(newBuilder())
}
type serverInfo struct {
cpu int64
success uint64 // float64 bits
}
type subConn struct {
conn balancer.SubConn
addr resolver.Address
meta wmeta.MD
err summary.Summary
lantency summary.Summary
si serverInfo
// effective weight
ewt int64
// current weight
cwt int64
// last score
score float64
}
// statistics is info for log
type statistics struct {
addr string
ewt int64
cs float64
ss float64
lantency float64
cpu float64
req int64
}
// Stats is grpc Interceptor for client to collect server stats
func Stats() grpc.UnaryClientInterceptor {
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) (err error) {
var (
trailer metadata.MD
md nmd.MD
ok bool
)
if md, ok = nmd.FromContext(ctx); !ok {
md = nmd.MD{}
} else {
md = md.Copy()
}
ctx = nmd.NewContext(ctx, md)
opts = append(opts, grpc.Trailer(&trailer))
err = invoker(ctx, method, req, reply, cc, opts...)
conn, ok := md["conn"].(*subConn)
if !ok {
return
}
if strs, ok := trailer[nmd.CPUUsage]; ok {
if cpu, err2 := strconv.ParseInt(strs[0], 10, 64); err2 == nil && cpu > 0 {
atomic.StoreInt64(&conn.si.cpu, cpu)
}
}
var reqs, errs int64
if strs, ok := trailer[nmd.Requests]; ok {
reqs, _ = strconv.ParseInt(strs[0], 10, 64)
}
if strs, ok := trailer[nmd.Errors]; ok {
errs, _ = strconv.ParseInt(strs[0], 10, 64)
}
if reqs > 0 && reqs >= errs {
success := float64(reqs-errs) / float64(reqs)
if success == 0 {
success = 0.1
}
atomic.StoreUint64(&conn.si.success, math.Float64bits(success))
}
return
}
}
type wrrPickerBuilder struct{}
func (*wrrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
p := &wrrPicker{
colors: make(map[string]*wrrPicker),
}
for addr, sc := range readySCs {
meta, ok := addr.Metadata.(wmeta.MD)
if !ok {
meta = wmeta.MD{
Weight: 10,
}
}
subc := &subConn{
conn: sc,
addr: addr,
meta: meta,
ewt: meta.Weight,
score: -1,
err: summary.New(time.Second, 10),
lantency: summary.New(time.Second, 10),
si: serverInfo{cpu: 500, success: math.Float64bits(1)},
}
if meta.Color == "" {
p.subConns = append(p.subConns, subc)
continue
}
// if color not empty, use color picker
cp, ok := p.colors[meta.Color]
if !ok {
cp = &wrrPicker{}
p.colors[meta.Color] = cp
}
cp.subConns = append(cp.subConns, subc)
}
return p
}
type wrrPicker struct {
// subConns is the snapshot of the weighted-roundrobin balancer when this picker was
// created. The slice is immutable. Each Get() will do a round robin
// selection from it and return the selected SubConn.
subConns []*subConn
colors map[string]*wrrPicker
updateAt int64
mu sync.Mutex
}
func (p *wrrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
if color := nmd.String(ctx, nmd.Color); color != "" {
if cp, ok := p.colors[color]; ok {
return cp.pick(ctx, opts)
}
}
return p.pick(ctx, opts)
}
func (p *wrrPicker) pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
var (
conn *subConn
totalWeight int64
)
if len(p.subConns) <= 0 {
return nil, nil, balancer.ErrNoSubConnAvailable
}
p.mu.Lock()
// nginx wrr load balancing algorithm: http://blog.csdn.net/zhangskd/article/details/50194069
for _, sc := range p.subConns {
totalWeight += sc.ewt
sc.cwt += sc.ewt
if conn == nil || conn.cwt < sc.cwt {
conn = sc
}
}
conn.cwt -= totalWeight
p.mu.Unlock()
start := time.Now()
if cmd, ok := nmd.FromContext(ctx); ok {
cmd["conn"] = conn
}
//if !feature.DefaultGate.Enabled(dwrrFeature) {
// return conn.conn, nil, nil
//}
return conn.conn, func(di balancer.DoneInfo) {
ev := int64(0) // error value ,if error set 1
if di.Err != nil {
if st, ok := status.FromError(di.Err); ok {
// only counter the local grpc error, ignore any business error
if st.Code() != codes.Unknown && st.Code() != codes.OK {
ev = 1
}
}
}
conn.err.Add(ev)
now := time.Now()
conn.lantency.Add(now.Sub(start).Nanoseconds() / 1e5)
u := atomic.LoadInt64(&p.updateAt)
if now.UnixNano()-u < int64(time.Second) {
return
}
if !atomic.CompareAndSwapInt64(&p.updateAt, u, now.UnixNano()) {
return
}
var (
stats = make([]statistics, len(p.subConns))
count int
total float64
)
for i, conn := range p.subConns {
cpu := float64(atomic.LoadInt64(&conn.si.cpu))
ss := math.Float64frombits(atomic.LoadUint64(&conn.si.success))
errc, req := conn.err.Value()
lagv, lagc := conn.lantency.Value()
if req > 0 && lagc > 0 && lagv > 0 {
// client-side success ratio
cs := 1 - (float64(errc) / float64(req))
if cs <= 0 {
cs = 0.1
} else if cs <= 0.2 && req <= 5 {
cs = 0.2
}
lag := float64(lagv) / float64(lagc)
conn.score = math.Sqrt((cs * ss * ss * 1e9) / (lag * cpu))
stats[i] = statistics{cs: cs, ss: ss, lantency: lag, cpu: cpu, req: req}
}
stats[i].addr = conn.addr.Addr
if conn.score > 0 {
total += conn.score
count++
}
}
// count must be greater than 1,otherwise will lead ewt to 0
if count < 2 {
return
}
avgscore := total / float64(count)
p.mu.Lock()
for i, conn := range p.subConns {
if conn.score <= 0 {
conn.score = avgscore
}
conn.ewt = int64(conn.score * float64(conn.meta.Weight))
stats[i].ewt = conn.ewt
}
p.mu.Unlock()
log.Info("warden wrr(%s): %+v", conn.addr.ServerName, stats)
}, nil
}

View File

@@ -0,0 +1,95 @@
package wrr
import (
"context"
"fmt"
"testing"
nmd "go-common/library/net/metadata"
wmeta "go-common/library/net/rpc/warden/metadata"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/resolver"
)
type testSubConn struct {
addr resolver.Address
}
func (s *testSubConn) UpdateAddresses([]resolver.Address) {
}
// Connect starts the connecting for this SubConn.
func (s *testSubConn) Connect() {
fmt.Println(s.addr.Addr)
}
func TestBalancerPick(t *testing.T) {
scs := map[resolver.Address]balancer.SubConn{}
sc1 := &testSubConn{
addr: resolver.Address{
Addr: "test1",
Metadata: wmeta.MD{
Weight: 8,
},
},
}
sc2 := &testSubConn{
addr: resolver.Address{
Addr: "test2",
Metadata: wmeta.MD{
Weight: 4,
Color: "red",
},
},
}
sc3 := &testSubConn{
addr: resolver.Address{
Addr: "test3",
Metadata: wmeta.MD{
Weight: 2,
Color: "red",
},
},
}
scs[sc1.addr] = sc1
scs[sc2.addr] = sc2
scs[sc3.addr] = sc3
b := &wrrPickerBuilder{}
picker := b.Build(scs)
res := []string{"test1", "test1", "test1", "test1"}
for i := 0; i < 3; i++ {
conn, _, err := picker.Pick(context.Background(), balancer.PickOptions{})
if err != nil {
t.Fatalf("picker.Pick failed!idx:=%d", i)
}
sc := conn.(*testSubConn)
if sc.addr.Addr != res[i] {
t.Fatalf("the subconn picked(%s),but expected(%s)", sc.addr.Addr, res[i])
}
}
res2 := []string{"test2", "test3", "test2", "test2", "test3", "test2"}
ctx := nmd.NewContext(context.Background(), nmd.New(map[string]interface{}{"color": "red"}))
for i := 0; i < 6; i++ {
conn, _, err := picker.Pick(ctx, balancer.PickOptions{})
if err != nil {
t.Fatalf("picker.Pick failed!idx:=%d", i)
}
sc := conn.(*testSubConn)
if sc.addr.Addr != res2[i] {
t.Fatalf("the (%d) subconn picked(%s),but expected(%s)", i, sc.addr.Addr, res2[i])
}
}
ctx = nmd.NewContext(context.Background(), nmd.New(map[string]interface{}{"color": "black"}))
for i := 0; i < 4; i++ {
conn, _, err := picker.Pick(ctx, balancer.PickOptions{})
if err != nil {
t.Fatalf("picker.Pick failed!idx:=%d", i)
}
sc := conn.(*testSubConn)
if sc.addr.Addr != res[i] {
t.Fatalf("the (%d) subconn picked(%s),but expected(%s)", i, sc.addr.Addr, res[i])
}
}
}