Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,50 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["dao_test.go"],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = [
"//app/service/main/dapper/conf:go_default_library",
"//app/service/main/dapper/model:go_default_library",
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["dao.go"],
importpath = "go-common/app/service/main/dapper/dao",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/dapper/conf:go_default_library",
"//app/service/main/dapper/model:go_default_library",
"//library/log:go_default_library",
"//vendor/github.com/dgryski/go-farm:go_default_library",
"//vendor/github.com/influxdata/influxdb/client/v2:go_default_library",
"//vendor/github.com/tsuna/gohbase:go_default_library",
"//vendor/github.com/tsuna/gohbase/hrpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,220 @@
package dao
import (
"context"
"fmt"
"strconv"
"strings"
"time"
"github.com/dgryski/go-farm"
influxdb "github.com/influxdata/influxdb/client/v2"
"github.com/tsuna/gohbase"
"github.com/tsuna/gohbase/hrpc"
"go-common/app/service/main/dapper/conf"
"go-common/app/service/main/dapper/model"
"go-common/library/log"
)
const (
defaultHbaseNameSpace = "ugc"
defaultInfluxDatabase = "dapper"
hbaseRawTraceTable = "DapperRawtrace"
hbaseRawTraceFamily = "pb"
hbaseListIdxTable = "DapperListidx"
hbaseListIdxFamily = "kind"
serviceNameTag = "service_name"
operationNameTag = "operation_name"
peerServiceTag = "peer.service"
spanKindTag = "span.kind"
maxDurationField = "max_duration"
minDurationField = "min_duration"
avgDurationField = "avg_duration"
spanpointMeasurement = "span_point"
errorsField = "errors"
)
// Dao interface
type Dao interface {
// WriteRawSpan to hbase
WriteRawTrace(ctx context.Context, rowKey string, values map[string][]byte) error
// BatchWriteSpanPoint
BatchWriteSpanPoint(ctx context.Context, spanPoints []*model.SpanPoint) error
// Fetch ServiceName
FetchServiceName(ctx context.Context) ([]string, error)
// Fetch OperationName
FetchOperationName(ctx context.Context, serviceName string) ([]string, error)
}
// New dao
func New(cfg *conf.Config) (Dao, error) {
// disable rpc queue
hbaseClient := gohbase.NewClient(cfg.HBase.Addrs, gohbase.RpcQueueSize(0))
hbaseNameSpace := defaultHbaseNameSpace
if cfg.HBase.Namespace != "" {
hbaseNameSpace = cfg.HBase.Namespace
}
influxdbCfg := influxdb.HTTPConfig{Addr: cfg.InfluxDB.Addr, Username: cfg.InfluxDB.Username, Password: cfg.InfluxDB.Password}
influxdbClient, err := influxdb.NewHTTPClient(influxdbCfg)
if err != nil {
return nil, err
}
influxDatabase := defaultInfluxDatabase
if cfg.InfluxDB.Database != "" {
influxDatabase = cfg.InfluxDB.Database
}
return &dao{
hbaseNameSpace: hbaseNameSpace,
hbaseClient: hbaseClient,
influxDatabase: influxDatabase,
influxdbClient: influxdbClient,
}, nil
}
var _ Dao = &dao{}
type dao struct {
hbaseNameSpace string
hbaseClient gohbase.Client
influxDatabase string
influxdbClient influxdb.Client
}
func (d *dao) WriteRawTrace(ctx context.Context, rowKey string, values map[string][]byte) error {
table := d.hbaseNameSpace + ":" + hbaseRawTraceTable
put, err := hrpc.NewPutStr(ctx, table, rowKey, map[string]map[string][]byte{hbaseRawTraceFamily: values})
if err != nil {
return err
}
_, err = d.hbaseClient.Put(put)
return err
}
func (d *dao) BatchWriteSpanPoint(ctx context.Context, spanPoints []*model.SpanPoint) error {
var messages []string
batchPoint, err := influxdb.NewBatchPoints(influxdb.BatchPointsConfig{Database: d.influxDatabase, Precision: "1s"})
if err != nil {
return err
}
for _, spanPoint := range spanPoints {
if err := d.writeSamplePoint(ctx, spanPoint); err != nil {
messages = append(messages, err.Error())
}
if point, err := toInfluxDBPoint(spanPoint); err != nil {
messages = append(messages, err.Error())
} else {
batchPoint.AddPoint(point)
}
}
if err := d.influxdbClient.Write(batchPoint); err != nil {
messages = append(messages, err.Error())
}
if len(messages) != 0 {
return fmt.Errorf("%s", strings.Join(messages, "\n"))
}
return nil
}
func (d *dao) FetchServiceName(ctx context.Context) ([]string, error) {
command := fmt.Sprintf("SHOW TAG VALUES FROM span_point WITH KEY = %s", serviceNameTag)
log.V(10).Info("query command %s", command)
query := influxdb.NewQuery(command, d.influxDatabase, "1s")
resp, err := d.influxdbClient.Query(query)
if err != nil {
return nil, err
}
if len(resp.Results) == 0 || len(resp.Results[0].Series) == 0 {
return make([]string, 0), nil
}
rows := resp.Results[0].Series[0]
serviceNames := make([]string, 0, len(rows.Values))
for _, kv := range rows.Values {
if len(kv) != 2 {
continue
}
if serviceName, ok := kv[1].(string); ok {
serviceNames = append(serviceNames, serviceName)
}
}
return serviceNames, nil
}
func (d *dao) FetchOperationName(ctx context.Context, serviceName string) ([]string, error) {
command := fmt.Sprintf("SHOW TAG VALUES FROM %s WITH KEY = %s WHERE %s = '%s' AND %s = '%s'",
spanpointMeasurement, operationNameTag, serviceNameTag, serviceName, spanKindTag, "server")
log.V(10).Info("query command %s", command)
query := influxdb.NewQuery(command, d.influxDatabase, "1s")
resp, err := d.influxdbClient.Query(query)
if err != nil {
return nil, err
}
if len(resp.Results) == 0 || len(resp.Results[0].Series) == 0 {
return make([]string, 0), nil
}
rows := resp.Results[0].Series[0]
operationNames := make([]string, 0, len(rows.Values))
for _, kv := range rows.Values {
if len(kv) != 2 {
continue
}
if operationName, ok := kv[1].(string); ok {
operationNames = append(operationNames, operationName)
}
}
return operationNames, nil
}
func (d *dao) writeSamplePoint(ctx context.Context, spanPoint *model.SpanPoint) error {
table := d.hbaseNameSpace + ":" + hbaseListIdxTable
rowKey := listIdxKey(spanPoint)
values := make(map[string][]byte)
values = fuelDurationSamplePoint(values, spanPoint.MaxDuration, spanPoint.AvgDuration, spanPoint.MinDuration)
values = fuelErrrorSamplePoint(values, spanPoint.Errors...)
put, err := hrpc.NewPutStr(ctx, table, rowKey, map[string]map[string][]byte{hbaseListIdxFamily: values})
if err != nil {
return err
}
_, err = d.hbaseClient.Put(put)
return err
}
func listIdxKey(spanPoint *model.SpanPoint) string {
serviceNameHash := farm.Hash32([]byte(spanPoint.ServiceName))
operationNameHash := farm.Hash32([]byte(spanPoint.OperationName))
return fmt.Sprintf("%x%x%d", serviceNameHash, operationNameHash, spanPoint.Timestamp)
}
func fuelDurationSamplePoint(values map[string][]byte, points ...model.SamplePoint) map[string][]byte {
for i := range points {
key := "d:" + strconv.FormatInt(points[i].Value, 10)
values[key] = []byte(fmt.Sprintf("%x:%x", points[i].TraceID, points[i].SpanID))
}
return values
}
func fuelErrrorSamplePoint(values map[string][]byte, points ...model.SamplePoint) map[string][]byte {
for i := range points {
key := "e:" + strconv.FormatInt(points[i].Value, 10)
values[key] = []byte(fmt.Sprintf("%x:%x", points[i].TraceID, points[i].SpanID))
}
return values
}
func toInfluxDBPoint(spanPoint *model.SpanPoint) (*influxdb.Point, error) {
tags := map[string]string{
serviceNameTag: spanPoint.ServiceName,
operationNameTag: spanPoint.OperationName,
spanKindTag: spanPoint.SpanKind,
peerServiceTag: spanPoint.PeerService,
}
fields := map[string]interface{}{
maxDurationField: spanPoint.MaxDuration.Value,
minDurationField: spanPoint.MinDuration.Value,
avgDurationField: spanPoint.AvgDuration.Value,
errorsField: len(spanPoint.Errors),
}
return influxdb.NewPoint(spanpointMeasurement, tags, fields, time.Unix(spanPoint.Timestamp, 0))
}

View File

@@ -0,0 +1,144 @@
package dao
import (
"context"
"flag"
"log"
"math/rand"
"os"
"strconv"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
"go-common/app/service/main/dapper/conf"
"go-common/app/service/main/dapper/model"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
var cfg *conf.Config
var flagMap = map[string]string{
"app_id": "main.common-arch.dapper-service",
"conf_token": "528dd7e00bb411e894c14a552f48fef8",
"tree_id": "5172",
"conf_version": "server-1",
"deploy_env": "uat",
"conf_host": "config.bilibili.co",
"conf_path": os.TempDir(),
"region": "sh",
"zone": "sh001",
}
func TestMain(m *testing.M) {
for key, val := range flagMap {
flag.Set(key, val)
}
flag.Parse()
if err := conf.Init(); err != nil {
log.Printf("init config from remote error: %s", err)
}
cfg = conf.Conf
if cfg.InfluxDB != nil {
cfg.InfluxDB.Database = "dapper_ut"
}
if cfg.HBase != nil {
cfg.HBase.Namespace = "dapperut"
}
if hbaseAddrs := os.Getenv("TEST_HBASE_ADDRS"); hbaseAddrs != "" {
cfg.HBase = &conf.HBaseConfig{Addrs: hbaseAddrs, Namespace: "dapperut"}
if influxdbAddr := os.Getenv("TEST_INFLUXDB_ADDR"); influxdbAddr != "" {
cfg.InfluxDB = &conf.InfluxDBConfig{Addr: influxdbAddr, Database: "dapper_ut"}
}
}
os.Exit(m.Run())
}
func TestDao(t *testing.T) {
if cfg == nil {
t.Skipf("no config provide skipped")
}
daoImpl, err := New(cfg)
if err != nil {
t.Fatalf("new dao error: %s", err)
}
ctx := context.Background()
Convey("test fetch serviceName and operationName", t, func() {
serviceNames, err := daoImpl.FetchServiceName(ctx)
So(err, ShouldBeNil)
So(serviceNames, ShouldNotBeEmpty)
for _, serviceName := range serviceNames {
operationNames, err := daoImpl.FetchOperationName(ctx, serviceName)
So(err, ShouldBeNil)
t.Logf("%s operationNames: %v", serviceName, operationNames)
}
})
Convey("test write rawtrace", t, func() {
if err := daoImpl.WriteRawTrace(
context.Background(),
strconv.FormatUint(rand.Uint64(), 16),
map[string][]byte{strconv.FormatUint(rand.Uint64(), 16): []byte("hello world")},
); err != nil {
t.Error(err)
}
})
Convey("test batchwrite span point", t, func() {
points := []*model.SpanPoint{
&model.SpanPoint{
ServiceName: "service_a",
OperationName: "opt1",
PeerService: "peer_service_a",
SpanKind: "client",
Timestamp: time.Now().Unix() - rand.Int63n(3600),
MaxDuration: model.SamplePoint{
SpanID: rand.Uint64(),
TraceID: rand.Uint64(),
Value: rand.Int63n(1024),
},
MinDuration: model.SamplePoint{
SpanID: rand.Uint64(),
TraceID: rand.Uint64(),
Value: rand.Int63n(1024),
},
AvgDuration: model.SamplePoint{
SpanID: rand.Uint64(),
TraceID: rand.Uint64(),
Value: rand.Int63n(1024),
},
Errors: []model.SamplePoint{
model.SamplePoint{
SpanID: rand.Uint64(),
TraceID: rand.Uint64(),
Value: 1,
},
model.SamplePoint{
SpanID: rand.Uint64(),
TraceID: rand.Uint64(),
Value: 1,
},
},
},
&model.SpanPoint{
ServiceName: "service_b",
OperationName: "opt2",
PeerService: "peer_service_b",
SpanKind: "server",
Timestamp: time.Now().Unix() - rand.Int63n(3600),
},
&model.SpanPoint{
ServiceName: "service_c",
OperationName: "opt3",
PeerService: "peer_service_c",
SpanKind: "client",
Timestamp: time.Now().Unix() - rand.Int63n(3600),
},
}
err := daoImpl.BatchWriteSpanPoint(context.Background(), points)
if err != nil {
t.Error(err)
}
})
}

View File

@@ -0,0 +1,4 @@
#!/bin/bash
export TEST_INFLUXDB_ADDR=http://172.22.33.146:8086
export TEST_HBASE_ADDRS=172.18.33.131,172.18.33.168,172.18.33.169