Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,43 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["batchwrite_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//app/service/main/dapper/model:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["batchwrite.go"],
importpath = "go-common/app/service/main/dapper/pkg/batchwrite",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/dapper/model:go_default_library",
"//library/log:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,174 @@
package batchwrite
import (
"context"
"sync"
"time"
"github.com/pkg/errors"
"go-common/app/service/main/dapper/model"
"go-common/library/log"
)
var (
_writeTimeout = time.Second
// ErrClosed .
ErrClosed = errors.New("batchwriter already closed")
)
// BatchWriter BatchWriter
type BatchWriter interface {
WriteSpan(span *model.Span) error
Close() error
// internale queue length
QueueLen() int
}
type rawBundle struct {
key string
data map[string][]byte
}
// NewRawDataBatchWriter NewRawDataBatchWriter
func NewRawDataBatchWriter(writeFunc func(context.Context, string, map[string][]byte) error, bufSize, chanSize, workers int, interval time.Duration) BatchWriter {
if workers <= 0 {
workers = 1
}
if interval <= 0 {
interval = 5 * time.Second
}
rbw := &rawDataBatchWrite{
maxBufSize: bufSize,
ch: make(chan *rawBundle, chanSize),
bufMap: make(map[string]map[string][]byte),
timeout: 10 * time.Second,
writeFunc: writeFunc,
}
rbw.wg.Add(workers)
for i := 0; i < workers; i++ {
go rbw.worker()
}
rbw.flushTicker = time.NewTicker(interval)
go rbw.daemonFlush()
return rbw
}
type rawDataBatchWrite struct {
mx sync.Mutex
closed bool
maxBufSize int
sizeCount int
bufMap map[string]map[string][]byte
ch chan *rawBundle
timeout time.Duration
writeFunc func(context.Context, string, map[string][]byte) error
wg sync.WaitGroup
flushTicker *time.Ticker
}
func (r *rawDataBatchWrite) WriteSpan(span *model.Span) error {
data, err := span.Marshal()
if err != nil {
return err
}
traceID := span.TraceIDStr()
spanID := span.SpanIDStr()
kind := "_s"
if !span.IsServer() {
kind = "_c"
}
key := spanID + kind
var bufMap map[string]map[string][]byte
r.mx.Lock()
if r.sizeCount > r.maxBufSize {
bufMap = r.bufMap
r.bufMap = make(map[string]map[string][]byte)
r.sizeCount = 0
}
r.sizeCount += len(data)
if _, ok := r.bufMap[traceID]; !ok {
r.bufMap[traceID] = make(map[string][]byte)
}
r.bufMap[traceID][key] = data
closed := r.closed
r.mx.Unlock()
if closed {
return ErrClosed
}
if bufMap != nil {
return r.flushBufMap(bufMap)
}
return nil
}
func (r *rawDataBatchWrite) QueueLen() int {
return len(r.ch)
}
func (r *rawDataBatchWrite) daemonFlush() {
for range r.flushTicker.C {
if err := r.flush(); err != nil {
log.Error("flush raw data error: %s", err)
}
}
}
func (r *rawDataBatchWrite) flush() error {
var bufMap map[string]map[string][]byte
r.mx.Lock()
if r.sizeCount != 0 {
bufMap = r.bufMap
r.bufMap = make(map[string]map[string][]byte)
r.sizeCount = 0
}
r.mx.Unlock()
if bufMap != nil {
return r.flushBufMap(bufMap)
}
return nil
}
func (r *rawDataBatchWrite) flushBufMap(bufMap map[string]map[string][]byte) error {
timer := time.NewTimer(_writeTimeout)
for traceID, data := range bufMap {
select {
case <-timer.C:
return errors.New("write span timeout, raw data buffer channel is full")
case r.ch <- &rawBundle{
key: traceID,
data: data,
}:
}
}
return nil
}
func (r *rawDataBatchWrite) Close() error {
r.mx.Lock()
defer r.mx.Unlock()
r.closed = true
r.flushTicker.Stop()
bufMap := r.bufMap
r.bufMap = make(map[string]map[string][]byte)
r.sizeCount = 0
r.flushBufMap(bufMap)
close(r.ch)
r.wg.Wait()
return nil
}
func (r *rawDataBatchWrite) worker() {
for bundle := range r.ch {
if err := r.write(bundle); err != nil {
log.Error("batch write raw data error: %s", err)
}
}
r.wg.Done()
}
func (r *rawDataBatchWrite) write(bundle *rawBundle) error {
ctx, cancel := context.WithTimeout(context.Background(), r.timeout)
defer cancel()
return r.writeFunc(ctx, bundle.key, bundle.data)
}

View File

@@ -0,0 +1,89 @@
package batchwrite
import (
"context"
"math/rand"
"testing"
"go-common/app/service/main/dapper/model"
)
var (
emptyspan = &model.Span{}
)
func TestRawDataBatchWriter(t *testing.T) {
storage := make(map[string]map[string][]byte)
writeFunc := func(ctx context.Context, traceID string, data map[string][]byte) error {
if _, ok := storage[traceID]; !ok {
storage[traceID] = make(map[string][]byte)
}
for k, v := range data {
storage[traceID][k] = v
}
return nil
}
rbw := NewRawDataBatchWriter(writeFunc, 16, 2, 2, 0)
spans := []*model.Span{
&model.Span{
TraceID: 1,
SpanID: 11,
},
&model.Span{
TraceID: 1,
SpanID: 12,
},
&model.Span{
TraceID: 2,
SpanID: 21,
},
&model.Span{
TraceID: 2,
SpanID: 22,
},
}
for _, span := range spans {
if err := rbw.WriteSpan(span); err != nil {
t.Error(err)
}
}
rbw.Close()
if len(storage) != 2 {
t.Errorf("expect get 2 trace data, get %v", storage)
}
if len(storage["1"]) != 2 {
t.Errorf("expect get 2 span data, get %v", storage["1"])
}
t.Logf("%v\n", storage)
}
func TestBatchWriterClosed(t *testing.T) {
writeFunc2 := func(ctx context.Context, traceID string, data map[string][]byte) error {
return nil
}
rbw := NewRawDataBatchWriter(writeFunc2, 1024*1024, 2, 2, 0)
rbw.Close()
if err := rbw.WriteSpan(emptyspan); err != ErrClosed {
t.Errorf("expect err == ErrClosed get: %v", err)
}
}
func randSpan() *model.Span {
return &model.Span{
TraceID: rand.Uint64() % 128,
SpanID: rand.Uint64() % 16,
}
}
func BenchmarkRawDataWriter(b *testing.B) {
writeFunc := func(ctx context.Context, traceID string, data map[string][]byte) error {
return nil
}
rbw := NewRawDataBatchWriter(writeFunc, 1024*1024, 2, 2, 0)
for i := 0; i < b.N; i++ {
if err := rbw.WriteSpan(randSpan()); err != nil {
b.Error(err)
}
}
rbw.Close()
}