Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,43 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["batchwrite_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//app/service/main/dapper/model:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["batchwrite.go"],
importpath = "go-common/app/service/main/dapper/pkg/batchwrite",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/dapper/model:go_default_library",
"//library/log:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,174 @@
package batchwrite
import (
"context"
"sync"
"time"
"github.com/pkg/errors"
"go-common/app/service/main/dapper/model"
"go-common/library/log"
)
var (
_writeTimeout = time.Second
// ErrClosed .
ErrClosed = errors.New("batchwriter already closed")
)
// BatchWriter BatchWriter
type BatchWriter interface {
WriteSpan(span *model.Span) error
Close() error
// internale queue length
QueueLen() int
}
type rawBundle struct {
key string
data map[string][]byte
}
// NewRawDataBatchWriter NewRawDataBatchWriter
func NewRawDataBatchWriter(writeFunc func(context.Context, string, map[string][]byte) error, bufSize, chanSize, workers int, interval time.Duration) BatchWriter {
if workers <= 0 {
workers = 1
}
if interval <= 0 {
interval = 5 * time.Second
}
rbw := &rawDataBatchWrite{
maxBufSize: bufSize,
ch: make(chan *rawBundle, chanSize),
bufMap: make(map[string]map[string][]byte),
timeout: 10 * time.Second,
writeFunc: writeFunc,
}
rbw.wg.Add(workers)
for i := 0; i < workers; i++ {
go rbw.worker()
}
rbw.flushTicker = time.NewTicker(interval)
go rbw.daemonFlush()
return rbw
}
type rawDataBatchWrite struct {
mx sync.Mutex
closed bool
maxBufSize int
sizeCount int
bufMap map[string]map[string][]byte
ch chan *rawBundle
timeout time.Duration
writeFunc func(context.Context, string, map[string][]byte) error
wg sync.WaitGroup
flushTicker *time.Ticker
}
func (r *rawDataBatchWrite) WriteSpan(span *model.Span) error {
data, err := span.Marshal()
if err != nil {
return err
}
traceID := span.TraceIDStr()
spanID := span.SpanIDStr()
kind := "_s"
if !span.IsServer() {
kind = "_c"
}
key := spanID + kind
var bufMap map[string]map[string][]byte
r.mx.Lock()
if r.sizeCount > r.maxBufSize {
bufMap = r.bufMap
r.bufMap = make(map[string]map[string][]byte)
r.sizeCount = 0
}
r.sizeCount += len(data)
if _, ok := r.bufMap[traceID]; !ok {
r.bufMap[traceID] = make(map[string][]byte)
}
r.bufMap[traceID][key] = data
closed := r.closed
r.mx.Unlock()
if closed {
return ErrClosed
}
if bufMap != nil {
return r.flushBufMap(bufMap)
}
return nil
}
func (r *rawDataBatchWrite) QueueLen() int {
return len(r.ch)
}
func (r *rawDataBatchWrite) daemonFlush() {
for range r.flushTicker.C {
if err := r.flush(); err != nil {
log.Error("flush raw data error: %s", err)
}
}
}
func (r *rawDataBatchWrite) flush() error {
var bufMap map[string]map[string][]byte
r.mx.Lock()
if r.sizeCount != 0 {
bufMap = r.bufMap
r.bufMap = make(map[string]map[string][]byte)
r.sizeCount = 0
}
r.mx.Unlock()
if bufMap != nil {
return r.flushBufMap(bufMap)
}
return nil
}
func (r *rawDataBatchWrite) flushBufMap(bufMap map[string]map[string][]byte) error {
timer := time.NewTimer(_writeTimeout)
for traceID, data := range bufMap {
select {
case <-timer.C:
return errors.New("write span timeout, raw data buffer channel is full")
case r.ch <- &rawBundle{
key: traceID,
data: data,
}:
}
}
return nil
}
func (r *rawDataBatchWrite) Close() error {
r.mx.Lock()
defer r.mx.Unlock()
r.closed = true
r.flushTicker.Stop()
bufMap := r.bufMap
r.bufMap = make(map[string]map[string][]byte)
r.sizeCount = 0
r.flushBufMap(bufMap)
close(r.ch)
r.wg.Wait()
return nil
}
func (r *rawDataBatchWrite) worker() {
for bundle := range r.ch {
if err := r.write(bundle); err != nil {
log.Error("batch write raw data error: %s", err)
}
}
r.wg.Done()
}
func (r *rawDataBatchWrite) write(bundle *rawBundle) error {
ctx, cancel := context.WithTimeout(context.Background(), r.timeout)
defer cancel()
return r.writeFunc(ctx, bundle.key, bundle.data)
}

View File

@@ -0,0 +1,89 @@
package batchwrite
import (
"context"
"math/rand"
"testing"
"go-common/app/service/main/dapper/model"
)
var (
emptyspan = &model.Span{}
)
func TestRawDataBatchWriter(t *testing.T) {
storage := make(map[string]map[string][]byte)
writeFunc := func(ctx context.Context, traceID string, data map[string][]byte) error {
if _, ok := storage[traceID]; !ok {
storage[traceID] = make(map[string][]byte)
}
for k, v := range data {
storage[traceID][k] = v
}
return nil
}
rbw := NewRawDataBatchWriter(writeFunc, 16, 2, 2, 0)
spans := []*model.Span{
&model.Span{
TraceID: 1,
SpanID: 11,
},
&model.Span{
TraceID: 1,
SpanID: 12,
},
&model.Span{
TraceID: 2,
SpanID: 21,
},
&model.Span{
TraceID: 2,
SpanID: 22,
},
}
for _, span := range spans {
if err := rbw.WriteSpan(span); err != nil {
t.Error(err)
}
}
rbw.Close()
if len(storage) != 2 {
t.Errorf("expect get 2 trace data, get %v", storage)
}
if len(storage["1"]) != 2 {
t.Errorf("expect get 2 span data, get %v", storage["1"])
}
t.Logf("%v\n", storage)
}
func TestBatchWriterClosed(t *testing.T) {
writeFunc2 := func(ctx context.Context, traceID string, data map[string][]byte) error {
return nil
}
rbw := NewRawDataBatchWriter(writeFunc2, 1024*1024, 2, 2, 0)
rbw.Close()
if err := rbw.WriteSpan(emptyspan); err != ErrClosed {
t.Errorf("expect err == ErrClosed get: %v", err)
}
}
func randSpan() *model.Span {
return &model.Span{
TraceID: rand.Uint64() % 128,
SpanID: rand.Uint64() % 16,
}
}
func BenchmarkRawDataWriter(b *testing.B) {
writeFunc := func(ctx context.Context, traceID string, data map[string][]byte) error {
return nil
}
rbw := NewRawDataBatchWriter(writeFunc, 1024*1024, 2, 2, 0)
for i := 0; i < b.N; i++ {
if err := rbw.WriteSpan(randSpan()); err != nil {
b.Error(err)
}
}
rbw.Close()
}

View File

@@ -0,0 +1,33 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["collect.go"],
importpath = "go-common/app/service/main/dapper/pkg/collect",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//app/service/main/dapper/pkg/process:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/service/main/dapper/pkg/collect/kafkacollect:all-srcs",
"//app/service/main/dapper/pkg/collect/tcpcollect:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,12 @@
package collect
import (
"go-common/app/service/main/dapper/pkg/process"
)
// Collecter collect span from different source
type Collecter interface {
Start() error
RegisterProcess(p process.Processer)
Close() error
}

View File

@@ -0,0 +1,49 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["collect_test.go"],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = [
"//app/service/main/dapper/model:go_default_library",
"//app/service/main/dapper/pkg/process:go_default_library",
"//library/log:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["collect.go"],
importpath = "go-common/app/service/main/dapper/pkg/collect/kafkacollect",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/dapper/model:go_default_library",
"//app/service/main/dapper/pkg/collect:go_default_library",
"//app/service/main/dapper/pkg/process:go_default_library",
"//library/log:go_default_library",
"//library/stat/prom:go_default_library",
"//vendor/github.com/Shopify/sarama:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,173 @@
package kafkacollect
import (
"context"
"encoding/json"
"fmt"
"sync"
"github.com/Shopify/sarama"
"go-common/app/service/main/dapper/model"
"go-common/app/service/main/dapper/pkg/collect"
"go-common/app/service/main/dapper/pkg/process"
"go-common/library/log"
"go-common/library/stat/prom"
)
var (
collectCount = prom.New().WithCounter("dapper_kafka_collect_count", []string{"name"})
collectErrCount = prom.New().WithCounter("dapper_kafka_collect_err_count", []string{"name"})
)
// Option set option
type Option func(*option)
type option struct {
group string
topic string
addrs []string
}
func (o option) saramaConfig() *sarama.Config {
return nil
}
var defaultOption = option{
group: "default",
}
//func NewConsumer(addrs []string, config *Config) (Consumer, error)
// New kafka collect
func New(topic string, addrs []string, options ...Option) (collect.Collecter, error) {
log.V(10).Info("new kafkacollect topic %s addrs: %v", topic, addrs)
if len(addrs) == 0 {
return nil, fmt.Errorf("kafka addrs required")
}
opt := defaultOption
for _, fn := range options {
fn(&opt)
}
opt.addrs = addrs
opt.topic = topic
clt := &kafkaCollect{opt: opt}
return clt, nil
}
type kafkaCollect struct {
wg sync.WaitGroup
opt option
ps []process.Processer
consumers []*consumer
client sarama.Client
offsetManager sarama.OffsetManager
baseConsumer sarama.Consumer
}
func (k *kafkaCollect) RegisterProcess(p process.Processer) {
k.ps = append(k.ps, p)
}
func (k *kafkaCollect) Start() error {
var err error
if k.client, err = sarama.NewClient(k.opt.addrs, k.opt.saramaConfig()); err != nil {
return fmt.Errorf("new kafka client error: %s", err)
}
if k.offsetManager, err = sarama.NewOffsetManagerFromClient(k.opt.group, k.client); err != nil {
return fmt.Errorf("new offset manager error: %s", err)
}
if k.baseConsumer, err = sarama.NewConsumerFromClient(k.client); err != nil {
return fmt.Errorf("new kafka consumer error: %s", err)
}
log.Info("kafkacollect consumer from topic: %s addrs: %s", k.opt.topic, k.opt.topic)
return k.start()
}
func (k *kafkaCollect) handler(protoSpan *model.ProtoSpan) {
var err error
for _, p := range k.ps {
if err = p.Process(context.Background(), protoSpan); err != nil {
log.Error("process span error: %s, discard", err)
}
}
}
func (k *kafkaCollect) start() error {
ps, err := k.client.Partitions(k.opt.topic)
if err != nil {
return fmt.Errorf("get partitions error: %s", err)
}
for _, p := range ps {
var pom sarama.PartitionOffsetManager
if pom, err = k.offsetManager.ManagePartition(k.opt.topic, p); err != nil {
return fmt.Errorf("new manage partition error: %s", err)
}
offset, _ := pom.NextOffset()
if offset == -1 {
offset = sarama.OffsetOldest
}
var c sarama.PartitionConsumer
log.V(10).Info("partitions %d start offset %d", p, offset)
if c, err = k.baseConsumer.ConsumePartition(k.opt.topic, p, offset); err != nil {
return fmt.Errorf("new consume partition error: %s", err)
}
log.V(10).Info("start partition consumer partition: %d, offset: %d", p, offset)
consumer := newConsumer(k, c, pom)
k.consumers = append(k.consumers, consumer)
k.wg.Add(1)
go consumer.start()
}
return nil
}
func (k *kafkaCollect) Close() error {
for _, c := range k.consumers {
if err := c.close(); err != nil {
log.Warn("close consumer error: %s", err)
}
}
k.wg.Wait()
return nil
}
func newConsumer(k *kafkaCollect, c sarama.PartitionConsumer, pom sarama.PartitionOffsetManager) *consumer {
return &consumer{kafkaCollect: k, consumer: c, pom: pom, closeCh: make(chan struct{}, 1)}
}
type consumer struct {
*kafkaCollect
pom sarama.PartitionOffsetManager
consumer sarama.PartitionConsumer
closeCh chan struct{}
}
func (c *consumer) close() error {
c.closeCh <- struct{}{}
c.pom.Close()
return c.consumer.Close()
}
func (c *consumer) start() {
defer c.wg.Done()
var err error
var value []byte
for {
select {
case msg := <-c.consumer.Messages():
collectCount.Incr("count")
c.pom.MarkOffset(msg.Offset+1, "")
log.V(10).Info("receive message from kafka topic: %s key: %s content: %s", msg.Key, msg.Topic, msg.Value)
protoSpan := new(model.ProtoSpan)
if err = json.Unmarshal(msg.Value, protoSpan); err != nil {
collectErrCount.Incr("count_error")
log.Error("unmarshal span from kafka error: %s, value: %v", err, value)
continue
}
c.handler(protoSpan)
case <-c.closeCh:
log.V(10).Info("receive closed return")
return
}
}
}

View File

@@ -0,0 +1,32 @@
package kafkacollect
import (
"context"
"flag"
"fmt"
"testing"
"time"
"go-common/app/service/main/dapper/model"
"go-common/app/service/main/dapper/pkg/process"
"go-common/library/log"
)
func TestKafkaCollect(t *testing.T) {
flag.Parse()
log.Init(nil)
clt, err := New("lancer_main_dapper_collector", []string{"172.18.33.163:9092", "172.18.33.164:9092", "172.18.33.165:9092"})
if err != nil {
t.Fatal(err)
}
m := process.MockProcess(func(ctx context.Context, protoSpan *model.ProtoSpan) error {
fmt.Printf("%v\n", protoSpan)
return nil
})
clt.RegisterProcess(m)
if err := clt.Start(); err != nil {
t.Fatal(err)
}
defer clt.Close()
time.Sleep(time.Minute)
}

View File

@@ -0,0 +1,54 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["server_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/service/main/dapper/conf:go_default_library",
"//app/service/main/dapper/model:go_default_library",
"//app/service/main/dapper/pkg/process:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["server.go"],
importpath = "go-common/app/service/main/dapper/pkg/collect/tcpcollect",
tags = ["manual"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/dapper/conf:go_default_library",
"//app/service/main/dapper/model:go_default_library",
"//app/service/main/dapper/pkg/process:go_default_library",
"//library/log:go_default_library",
"//library/net/trace/proto:go_default_library",
"//library/stat/counter:go_default_library",
"//library/stat/prom:go_default_library",
"@com_github_golang_protobuf//proto:go_default_library",
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,256 @@
package tcpcollect
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"net"
"strconv"
"sync"
"time"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/timestamp"
"go-common/app/service/main/dapper/conf"
"go-common/app/service/main/dapper/model"
"go-common/app/service/main/dapper/pkg/process"
"go-common/library/log"
protogen "go-common/library/net/trace/proto"
"go-common/library/stat/counter"
"go-common/library/stat/prom"
)
var (
collectCount = prom.New().WithCounter("dapper_collect_count", []string{"remote_host"})
collectErrCount = prom.New().WithCounter("dapper_collect_err_count", []string{"remote_host"})
)
const (
_magicSize = 2
_headerSize = 6
)
var (
_magicBuf = []byte{0xAC, 0xBE}
_separator = []byte("\001")
)
// ClientStatus agent client status
type ClientStatus struct {
Addr string
Counter counter.Counter
ErrorCounter counter.Counter
UpTime int64
}
func (c *ClientStatus) incr(iserr bool) {
if iserr {
collectErrCount.Incr(c.ClientHost())
}
collectCount.Incr(c.ClientHost())
c.Counter.Add(1)
}
// ClientHost extract from client addr
func (c *ClientStatus) ClientHost() string {
host, _, _ := net.SplitHostPort(c.Addr)
return host
}
// TCPCollect tcp server.
type TCPCollect struct {
cfg *conf.Collect
lis net.Listener
clientMap map[string]*ClientStatus
rmx sync.RWMutex
ps []process.Processer
}
// New tcp server.
func New(cfg *conf.Collect) *TCPCollect {
svr := &TCPCollect{
cfg: cfg,
clientMap: make(map[string]*ClientStatus),
}
return svr
}
// RegisterProcess implement process.Processer
func (s *TCPCollect) RegisterProcess(p process.Processer) {
s.ps = append(s.ps, p)
}
func (s *TCPCollect) addClient(cs *ClientStatus) {
s.rmx.Lock()
defer s.rmx.Unlock()
s.clientMap[cs.Addr] = cs
}
func (s *TCPCollect) removeClient(cs *ClientStatus) {
s.rmx.Lock()
defer s.rmx.Unlock()
delete(s.clientMap, cs.Addr)
}
// ClientStatus ClientStatus
func (s *TCPCollect) ClientStatus() []*ClientStatus {
s.rmx.RLock()
defer s.rmx.RUnlock()
css := make([]*ClientStatus, 0, len(s.clientMap))
for _, cs := range s.clientMap {
css = append(css, cs)
}
return css
}
// Start tcp server.
func (s *TCPCollect) Start() error {
var err error
if s.lis, err = net.Listen(s.cfg.Network, s.cfg.Addr); err != nil {
return err
}
go func() {
for {
conn, err := s.lis.Accept()
if err != nil {
if netE, ok := err.(net.Error); ok && netE.Temporary() {
log.Error("l.Accept() error(%v)", err)
time.Sleep(time.Second)
continue
}
return
}
go s.serveConn(conn)
}
}()
log.Info("tcp server start addr:%s@%s", s.cfg.Network, s.cfg.Addr)
return nil
}
// Close tcp server.
func (s *TCPCollect) Close() error {
return s.lis.Close()
}
func (s *TCPCollect) serveConn(conn net.Conn) {
log.Info("serverConn remoteIP:%s", conn.RemoteAddr().String())
cs := &ClientStatus{
Addr: conn.RemoteAddr().String(),
Counter: counter.NewRolling(time.Second, 100),
ErrorCounter: counter.NewGauge(),
UpTime: time.Now().Unix(),
}
s.addClient(cs)
defer conn.Close()
defer s.removeClient(cs)
rd := bufio.NewReaderSize(conn, 65536)
for {
buf, err := s.tailPacket(rd)
if err != nil {
log.Error("s.tailPacket() remoteIP:%s error(%v)", conn.RemoteAddr().String(), err)
cs.incr(true)
return
}
if len(buf) == 0 {
log.Error("s.tailPacket() is empty")
cs.incr(true)
continue
}
data := buf
fields := bytes.Split(buf, _separator)
if len(fields) >= 16 {
if data, err = s.legacySpan(fields[2:]); err != nil {
log.Error("convert legacy span error: %s", err)
continue
}
}
protoSpan := new(protogen.Span)
if err = proto.Unmarshal(data, protoSpan); err != nil {
log.Error("unmarshal data %s error: %s", err, data)
continue
}
for _, p := range s.ps {
if pe := p.Process(context.Background(), (*model.ProtoSpan)(protoSpan)); pe != nil {
log.Error("process span %s error: %s", protoSpan, err)
}
}
cs.incr(err != nil)
}
}
func (s *TCPCollect) tailPacket(rr *bufio.Reader) (res []byte, err error) {
var buf []byte
// peek magic
for {
if buf, err = rr.Peek(_magicSize); err != nil {
return
}
if bytes.Equal(buf, _magicBuf) {
break
}
rr.Discard(1)
}
// peek length
if buf, err = rr.Peek(_headerSize); err != nil {
return
}
// peek body
packetLen := int(binary.BigEndian.Uint32(buf[_magicSize:_headerSize]))
if buf, err = rr.Peek(_headerSize + packetLen); err != nil {
return
}
res = buf[_headerSize+_magicSize:]
rr.Discard(packetLen + _headerSize)
return
}
// startTime/endTime/traceID/spanID/parentID/event/level/class/sample/address/family/title/comment/caller/error
func (s *TCPCollect) legacySpan(fields [][]byte) ([]byte, error) {
startAt, _ := strconv.ParseInt(string(fields[0]), 10, 64)
finishAt, _ := strconv.ParseInt(string(fields[1]), 10, 64)
traceID, _ := strconv.ParseUint(string(fields[2]), 10, 64)
spanID, _ := strconv.ParseUint(string(fields[3]), 10, 64)
parentID, _ := strconv.ParseUint(string(fields[4]), 10, 64)
event, _ := strconv.Atoi(string(fields[5]))
start := 8
if len(fields) == 14 {
start = 7
}
address := string(fields[start+1])
family := string(fields[start+2])
title := string(fields[start+3])
comment := string(fields[start+4])
caller := string(fields[start+5])
errMsg := string(fields[start+6])
span := &protogen.Span{Version: 2}
span.ServiceName = family
span.OperationName = title
span.Caller = caller
span.TraceId = traceID
span.SpanId = spanID
span.ParentId = parentID
span.StartTime = &timestamp.Timestamp{
Seconds: startAt / int64(time.Second),
Nanos: int32(startAt % int64(time.Second)),
}
d := finishAt - startAt
span.Duration = &duration.Duration{
Seconds: d / int64(time.Second),
Nanos: int32(d % int64(time.Second)),
}
if event == 0 {
span.Tags = append(span.Tags, &protogen.Tag{Key: "span.kind", Kind: protogen.Tag_STRING, Value: []byte("client")})
} else {
span.Tags = append(span.Tags, &protogen.Tag{Key: "span.kind", Kind: protogen.Tag_STRING, Value: []byte("server")})
}
span.Tags = append(span.Tags, &protogen.Tag{Key: "legacy.address", Kind: protogen.Tag_STRING, Value: []byte(address)})
span.Tags = append(span.Tags, &protogen.Tag{Key: "legacy.comment", Kind: protogen.Tag_STRING, Value: []byte(comment)})
if errMsg != "" {
span.Logs = append(span.Logs, &protogen.Log{Key: "legacy.error", Fields: []*protogen.Field{&protogen.Field{Key: "error", Value: []byte(errMsg)}}})
}
return proto.Marshal(span)
}

View File

@@ -0,0 +1,45 @@
package tcpcollect
import (
"context"
"io"
"net"
"os"
"testing"
"time"
"go-common/app/service/main/dapper/conf"
"go-common/app/service/main/dapper/model"
"go-common/app/service/main/dapper/pkg/process"
)
func TestCollect(t *testing.T) {
count := 0
collect := New(&conf.Collect{Network: "tcp", Addr: "127.0.0.1:6190"})
collect.RegisterProcess(process.MockProcess(func(context.Context, *model.ProtoSpan) error {
count++
return nil
}))
if err := collect.Start(); err != nil {
t.Fatal(err)
}
fp, err := os.Open("testdata/data.bin")
if err != nil {
t.Fatal(err)
}
defer fp.Close()
conn, err := net.Dial("tcp", "127.0.0.1:6190")
if err != nil {
t.Fatal(err)
}
defer conn.Close()
_, err = io.Copy(conn, fp)
if err != nil {
t.Error(err)
}
time.Sleep(time.Second)
if count <= 0 {
t.Errorf("expect more than one span write")
}
}

Binary file not shown.

View File

@@ -0,0 +1,38 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["deliver_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["deliver.go"],
importpath = "go-common/app/service/main/dapper/pkg/deliver",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//library/log:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,155 @@
package deliver
import (
"encoding/binary"
"fmt"
"math/rand"
"net"
"sync"
"time"
"go-common/library/log"
)
var (
_magicBuf = []byte{0xAC, 0xBE}
_bufpool sync.Pool
)
func init() {
rand.Seed(time.Now().UnixNano())
_bufpool = sync.Pool{New: func() interface{} {
return make([]byte, 0, 4096)
}}
}
func freeBuf(buf []byte) {
buf = buf[:0]
_bufpool.Put(buf)
}
func getBuf() []byte {
return _bufpool.Get().([]byte)
}
// Deliver deliver span to dapper-service through tcp
type Deliver struct {
servers []string
readFn func() ([]byte, error)
conn *net.TCPConn
dataCh chan []byte
closeCh chan struct{}
closed bool
}
// New Deliver
func New(servers []string, readFn func() ([]byte, error)) (*Deliver, error) {
if len(servers) == 0 {
return nil, fmt.Errorf("no server provide")
}
d := &Deliver{
servers: servers,
readFn: readFn,
closeCh: make(chan struct{}, 1),
dataCh: make(chan []byte),
}
return d, d.start()
}
func (d *Deliver) start() error {
if err := d.dial(); err != nil {
return err
}
go d.fetch()
go d.loop()
return nil
}
func (d *Deliver) fetch() {
for {
if d.closed {
return
}
data, err := d.readFn()
if err != nil {
log.Error("deliver read data error: %s", err)
continue
}
d.dataCh <- data
}
}
func (d *Deliver) loop() {
for {
select {
case <-d.closeCh:
return
case data := <-d.dataCh:
data = warpData(data)
send:
_, err := d.conn.Write(data)
if err == nil {
freeBuf(data)
continue
}
d.reDial()
goto send
}
}
}
// Close deliver
func (d *Deliver) Close() error {
if d.closed {
return fmt.Errorf("already closed")
}
d.closed = true
d.closeCh <- struct{}{}
timer := time.NewTimer(50 * time.Millisecond)
select {
case data := <-d.dataCh:
// write last data to conn
_, err := d.conn.Write(data)
return fmt.Errorf("write last data error: %s", err)
case <-timer.C:
return nil
}
return nil
}
func (d *Deliver) reDial() {
if d.conn != nil {
d.conn.Close()
}
for {
if err := d.dial(); err != nil {
log.Error("redial error: %s, retry after second", err)
time.Sleep(time.Second)
}
break
}
}
func (d *Deliver) dial() error {
server := chioceServer(d.servers)
conn, err := net.Dial("tcp", server)
if err != nil {
return fmt.Errorf("dial tcp://%s error: %s", server, err)
}
d.conn = conn.(*net.TCPConn)
d.conn.SetKeepAlive(true)
return nil
}
func chioceServer(servers []string) string {
return servers[rand.Intn(len(servers))]
}
func warpData(data []byte) []byte {
buf := getBuf()
buf = append(buf, _magicBuf...)
buf = append(buf, []byte{0, 0, 0, 0, 0, 0}...)
binary.BigEndian.PutUint32(buf[2:6], uint32(len(data)+2))
buf = append(buf, data...)
return buf
}

View File

@@ -0,0 +1,45 @@
package deliver
import (
"bytes"
"encoding/binary"
"io"
"net"
"testing"
"time"
)
func TestDeliver(t *testing.T) {
buf := &bytes.Buffer{}
lis, err := net.Listen("tcp", "127.0.0.1:12233")
if err != nil {
t.Fatal(err)
}
go func() {
conn, err := lis.Accept()
if err != nil {
t.Fatal(err)
}
io.Copy(buf, conn)
}()
data := []byte("hello world")
readed := make(chan bool, 1)
d, err := New([]string{"127.0.0.1:12233"}, func() ([]byte, error) {
readed <- true
return data, nil
})
if err != nil {
t.Fatal(err)
}
time.Sleep(500 * time.Millisecond)
if !bytes.Equal(buf.Bytes()[0:2], _magicBuf) {
t.Error("invalid data, wrong magic header")
}
if int(binary.BigEndian.Uint32(buf.Bytes()[2:6])) != len(data) {
t.Error("wrong data length")
}
if !bytes.Equal(buf.Bytes()[6:], data) {
t.Errorf("invalid content %s", buf.Bytes()[6:])
}
d.Close()
}

View File

@@ -0,0 +1,43 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"bucket_test.go",
"diskqueue_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = [
"bucket.go",
"diskqueue.go",
],
importpath = "go-common/app/service/main/dapper/pkg/diskqueue",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,200 @@
package diskqueue
import (
"bufio"
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"os"
"sync"
)
const (
_blockByte int32 = 512
_lenByte int32 = 2
_dataByte = _blockByte - _lenByte
)
var errBucketFull = errors.New("bucket is full or not enough")
var fullHeader = []byte{1, 254}
var nextHeader = []byte{1, 255}
var magicHeader = []byte{'D', 'Q'}
type memBucketPool struct {
cap int32
pool sync.Pool
}
func newMemBucketPool(bucketByte int32) *memBucketPool {
return &memBucketPool{
pool: sync.Pool{New: func() interface{} {
return make([]byte, bucketByte)
}},
cap: bucketByte / _blockByte,
}
}
func (m *memBucketPool) new() *memBucket {
data := m.pool.Get().([]byte)
return &memBucket{data: data, cap: m.cap}
}
func (m *memBucketPool) free(bucket *memBucket) {
m.pool.Put(bucket.data)
}
type memBucket struct {
sync.Mutex
cap int32
readAt int32
writeAt int32
data []byte
}
func (m *memBucket) push(p []byte) error {
m.Lock()
defer m.Unlock()
length := int32(len(p))
if length > _dataByte*(m.cap-m.writeAt) {
return errBucketFull
}
// if p length < blockbyte write it direct
if length < _dataByte {
ds := m.writeAt * _blockByte
binary.BigEndian.PutUint16(m.data[ds:], uint16(length))
copy(m.data[ds+_lenByte:], p)
m.writeAt++
return nil
}
// loop write block
blocks := length / _dataByte
re := length % _dataByte
var i int32
for i = 0; i < blocks-1; i++ {
ds := m.writeAt * _blockByte
copy(m.data[ds:], nextHeader)
ps := i * _dataByte
copy(m.data[ds+_lenByte:], p[ps:ps+_dataByte])
m.writeAt++
}
var nh []byte
if re == 0 {
nh = fullHeader
} else {
nh = nextHeader
}
ds := m.writeAt * _blockByte
copy(m.data[ds:], nh)
ps := (blocks - 1) * _dataByte
copy(m.data[ds+_lenByte:], p[ps:ps+_dataByte])
m.writeAt++
if re != 0 {
ds := m.writeAt * _blockByte
binary.BigEndian.PutUint16(m.data[ds:], uint16(re))
copy(m.data[ds+_lenByte:], p[blocks*_dataByte:])
m.writeAt++
}
return nil
}
func (m *memBucket) pop() ([]byte, error) {
m.Lock()
defer m.Unlock()
if m.readAt >= m.writeAt {
return nil, io.EOF
}
ret := make([]byte, 0, _blockByte)
for m.readAt < m.writeAt {
ds := m.readAt * _blockByte
m.readAt++
l := int32(binary.BigEndian.Uint16(m.data[ds : ds+_lenByte]))
if l <= _dataByte {
ret = append(ret, m.data[ds+_lenByte:ds+_lenByte+l]...)
break
}
ret = append(ret, m.data[ds+_lenByte:ds+_blockByte]...)
}
return ret, nil
}
func (m *memBucket) dump(w io.Writer) (int, error) {
header := make([]byte, 10)
copy(header, magicHeader)
binary.BigEndian.PutUint32(header[2:6], uint32(m.readAt))
binary.BigEndian.PutUint32(header[6:10], uint32(m.writeAt))
n1, err := w.Write(header)
if err != nil {
return n1, err
}
n2, err := w.Write(m.data[:m.writeAt*_blockByte])
return n1 + n2, err
}
func newFileBucket(fpath string) (*fileBucket, error) {
fp, err := os.Open(fpath)
if err != nil {
return nil, err
}
header := make([]byte, 10)
n, err := fp.Read(header)
if err != nil {
return nil, err
}
if n != 10 {
return nil, fmt.Errorf("expect read 10 byte header get: %d", n)
}
if !bytes.Equal(header[:2], magicHeader) {
return nil, fmt.Errorf("invalid magic %s", header[:2])
}
readAt := int32(binary.BigEndian.Uint32(header[2:6]))
writeAt := int32(binary.BigEndian.Uint32(header[6:10]))
if _, err = fp.Seek(int64(readAt*_blockByte), os.SEEK_CUR); err != nil {
return nil, err
}
return &fileBucket{
fp: fp,
readAt: readAt,
writeAt: writeAt,
bufRd: bufio.NewReader(fp),
}, nil
}
type fileBucket struct {
sync.Mutex
fp *os.File
readAt int32
writeAt int32
bufRd *bufio.Reader
}
func (f *fileBucket) pop() ([]byte, error) {
f.Lock()
defer f.Unlock()
if f.readAt >= f.writeAt {
return nil, io.EOF
}
ret := make([]byte, 0, _blockByte)
block := make([]byte, _blockByte)
for f.readAt < f.writeAt {
n, err := f.bufRd.Read(block)
if err != nil {
return nil, err
}
if int32(n) != _blockByte {
return nil, fmt.Errorf("expect read %d byte data get %d", _blockByte, n)
}
l := int32(binary.BigEndian.Uint16(block[:2]))
if l <= _dataByte {
ret = append(ret, block[2:2+l]...)
break
}
ret = append(ret, block[2:_blockByte]...)
}
return ret, nil
}
func (f *fileBucket) close() error {
return f.fp.Close()
}

View File

@@ -0,0 +1,105 @@
package diskqueue
import (
"crypto/rand"
"io"
"os"
"reflect"
"testing"
)
func Test_membucket(t *testing.T) {
cap := int32(16)
data := make([]byte, _blockByte*cap)
mb := &memBucket{
cap: cap,
data: data,
}
t.Run("test push & pop small data", func(t *testing.T) {
p := []byte("hello world")
err := mb.push(p)
if err != nil {
t.Error(err)
}
ret, err := mb.pop()
if err != nil {
t.Error(err)
} else {
if !reflect.DeepEqual(ret, p) {
t.Errorf("%s not equal %s", ret, p)
}
}
})
t.Run("test push & pop big data", func(t *testing.T) {
p := make([]byte, 1890)
rand.Read(p)
err := mb.push(p)
if err != nil {
t.Error(err)
}
ret, err := mb.pop()
if err != nil {
t.Error(err)
} else {
if !reflect.DeepEqual(ret, p) {
t.Logf("buf: %v", mb.data)
t.Errorf("%v not equal %v", ret, p)
}
}
})
t.Run("push big data", func(t *testing.T) {
p := make([]byte, _blockByte*cap*2)
err := mb.push(p)
if err != errBucketFull {
t.Errorf("expect err == errBucketFull get: %v", err)
}
})
t.Run("pop io.EOF", func(t *testing.T) {
_, err := mb.pop()
if err != io.EOF {
t.Errorf("expect err == io.EOF get: %v", err)
}
})
}
func Test_fileBucket(t *testing.T) {
fpath := "bucket.bin"
defer os.RemoveAll(fpath)
cap := int32(16)
data := make([]byte, _blockByte*cap)
mb := &memBucket{
cap: cap,
data: data,
}
d1 := []byte("hello world")
for i := 0; i < 10; i++ {
mb.push(d1)
}
fp, err := os.OpenFile(fpath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
t.Fatal(err)
}
mb.dump(fp)
fp.Close()
fb, err := newFileBucket(fpath)
if err != nil {
t.Fatal(err)
}
count := 0
for {
ret, err := fb.pop()
if err != nil {
if err != io.EOF {
t.Error(err)
}
break
}
count++
if !reflect.DeepEqual(ret, d1) {
t.Errorf("%v not equal %v", ret, d1)
}
}
if count != 10 {
t.Errorf("expect 10 data get %d", count)
}
}

View File

@@ -0,0 +1,460 @@
package diskqueue
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
)
const (
// max memroy use equal to BucketByte * (MemBucket + DynamicMemBucket)
_defaultBucketByte = _blockByte * 2 * 1024 * 16 // 16MB
_defaultMemBucket = 1
_defaultDynamicMemBucket = 3
_filePrefix = "disk_queue_"
)
// node status
const (
_inmem int8 = iota
_indisk
_freed
)
var _globalID int64
// ErrQueueFull .
var ErrQueueFull = errors.New("error queue is full, can't create new membucket")
func nextNodeID() int64 {
return atomic.AddInt64(&_globalID, 1)
}
// DiskQueue disk queue
type DiskQueue interface {
Push(p []byte) error
Pop() ([]byte, error)
Close() error
}
// Option Ringbuffer option
type Option func(opt *option)
// SetBucketByte bucketbyte
func SetBucketByte(n int) Option {
return func(opt *option) {
opt.bucketByte = (int32(n) / _blockByte) * _blockByte
}
}
// SetMemBucket set the number of mem bucket
func SetMemBucket(n int) Option {
return func(opt *option) {
opt.memBucket = int32(n)
}
}
// SetDynamicMemBucket set the number of dynamic mem bucket
func SetDynamicMemBucket(n int) Option {
return func(opt *option) {
opt.dynamicMemBucket = int32(n)
}
}
// SetMaxBucket set the number of max bucket 0 represent unlimit
func SetMaxBucket(n int) Option {
return func(opt *option) {
opt.maxBucket = int32(n)
}
}
type option struct {
bucketByte int32
memBucket int32
maxBucket int32
dynamicMemBucket int32
fpath string
}
func (o option) validate() error {
if o.bucketByte <= 0 {
return fmt.Errorf("bucket byte must > 0")
}
if o.memBucket <= 0 {
return fmt.Errorf("mem bucket must > 0")
}
if o.dynamicMemBucket <= 0 {
return fmt.Errorf("dynamic mem bucket must > 0")
}
return nil
}
var _defaultOpt = option{
bucketByte: _defaultBucketByte,
memBucket: _defaultMemBucket,
dynamicMemBucket: _defaultDynamicMemBucket,
}
// New Ringbuffer
func New(fpath string, options ...Option) (DiskQueue, error) {
info, err := os.Stat(fpath)
if err != nil {
if !os.IsNotExist(err) {
return nil, fmt.Errorf("stat %s error: %s", fpath, err)
}
if err = os.MkdirAll(fpath, 0755); err != nil {
return nil, fmt.Errorf("fpath %s not exists try create directry error: %s", fpath, err)
}
} else if !info.IsDir() {
return nil, fmt.Errorf("fpath: %s already exists and not a directory", fpath)
}
// TODO: check permission
opt := _defaultOpt
opt.fpath = fpath
for _, fn := range options {
fn(&opt)
}
if err = opt.validate(); err != nil {
return nil, err
}
b := &base{
opt: opt,
}
if opt.maxBucket == 0 {
return &queue{base: b}, b.init()
}
return nil, nil
}
type node struct {
id int64
mx sync.Mutex
flushing bool
bucket *memBucket
next *node
fpath string
fbucket *fileBucket
kind int8
}
func (n *node) setFlushing(flushing bool) {
n.mx.Lock()
n.flushing = flushing
n.mx.Unlock()
}
func (n *node) pop() ([]byte, error) {
n.mx.Lock()
defer n.mx.Unlock()
if n.bucket != nil {
return n.bucket.pop()
}
var err error
if n.fbucket == nil {
if n.fbucket, err = newFileBucket(n.fpath); err != nil {
return nil, err
}
}
return n.fbucket.pop()
}
type base struct {
opt option
head *node
tail *node
pool *memBucketPool
length int32
memBucket int32
}
func (b *base) init() error {
b.pool = newMemBucketPool(b.opt.bucketByte)
if loaded, err := b.loadFromFile(); err != nil || loaded {
return err
}
current := &node{
id: nextNodeID(),
bucket: b.pool.new(),
}
b.head = current
b.tail = current
return nil
}
func (b *base) loadFromFile() (bool, error) {
infos, err := ioutil.ReadDir(b.opt.fpath)
if err != nil {
return false, fmt.Errorf("readdir %s error: %s", b.opt.fpath, err)
}
var files []string
for _, info := range infos {
if info.IsDir() || !strings.HasPrefix(info.Name(), _filePrefix) {
continue
}
files = append(files, path.Join(b.opt.fpath, info.Name()))
}
if len(files) == 0 {
return false, nil
}
nodeID := func(name string) int64 {
id, err := strconv.ParseInt(path.Base(name)[len(_filePrefix):], 10, 64)
if err != nil {
panic(fmt.Errorf("invalid file name: %s error: %s", name, err))
}
return id
}
sort.Slice(files, func(i int, j int) bool {
return nodeID(files[i]) < nodeID(files[j])
})
_globalID = nodeID(files[len(files)-1])
current := &node{
id: nodeID(files[0]),
fpath: files[0],
kind: _indisk,
}
b.head = current
for _, file := range files[1:] {
next := &node{
id: nodeID(file),
fpath: file,
kind: _indisk,
}
current.next = next
current = next
}
b.memBucket = 1
next := &node{
id: nextNodeID(),
bucket: b.pool.new(),
}
current.next = next
current = next
b.tail = current
return true, nil
}
type queue struct {
*base
mx sync.Mutex
closed bool
lastID int64
wg sync.WaitGroup
}
func (q *queue) Push(p []byte) (err error) {
if len(p) >= int(q.opt.bucketByte) {
return fmt.Errorf("data too large")
}
if q.closed {
return fmt.Errorf("queue already closed")
}
for {
err = q.tail.bucket.push(p)
if err == nil {
atomic.AddInt32(&q.length, 1)
return
}
if err == errBucketFull {
if err = q.moveTail(); err != nil {
return err
}
continue
}
return
}
}
func (q *queue) moveTail() error {
bucket := atomic.LoadInt32(&q.memBucket)
if bucket >= q.opt.memBucket+q.opt.dynamicMemBucket {
return fmt.Errorf("can't assign memory bucket any more")
}
if bucket >= q.opt.maxBucket {
q.notifyStore()
}
// take tail snapshot
p := q.tail
// lock queue
q.mx.Lock()
defer q.mx.Unlock()
// tail alreay changed
if p != q.tail {
return nil
}
atomic.AddInt32(&q.memBucket, 1)
n := &node{
id: nextNodeID(),
bucket: q.pool.new(),
kind: _inmem,
}
// move to new tail
q.tail.next = n
q.tail = n
return nil
}
func (q *queue) notifyStore() {
n := q.head
for n.next != nil {
read := q.head
if n.id > q.lastID && n.kind != _indisk && n != read {
q.lastID = n.id
go q.storeNode(n)
return
}
n = n.next
}
}
func (q *queue) Pop() (data []byte, err error) {
defer func() {
if err != nil {
atomic.AddInt32(&q.length, -1)
}
}()
if q.closed {
return nil, fmt.Errorf("queue already closed")
}
data, err = q.head.pop()
if err != nil {
if err == io.EOF {
if err = q.moveHead(); err != nil {
return nil, err
}
return q.head.pop()
}
return nil, err
}
return data, nil
}
func (q *queue) moveHead() error {
tail := q.tail
if q.head == tail {
return io.EOF
}
// move head to next
q.mx.Lock()
head := q.head
q.head = q.head.next
q.mx.Unlock()
// reset head to new read node
q.freeNode(head)
return nil
}
func (q *queue) freeNode(n *node) {
n.mx.Lock()
defer n.mx.Unlock()
if n.flushing {
n.kind = _freed
return
}
if n.bucket != nil {
q.freeBucket(n.bucket)
n.bucket = nil
}
if n.fbucket != nil {
n.fbucket.close()
}
if n.fpath != "" {
if err := os.Remove(n.fpath); err != nil {
//fmt.Fprintf(os.Stderr, "[ERROR] diskqueue: remove file %s error: %s", n.fpath, err)
}
}
}
func (q *queue) storeNode(n *node) (err error) {
fpath := storePath(q.opt.fpath, n)
q.wg.Add(1)
defer q.wg.Done()
n.setFlushing(true)
// if node already free return direct
if n.bucket == nil {
return
}
// if node be freed just release membucket
if n.kind == _freed {
q.freeBucket(n.bucket)
return
}
// store bucket to disk
if err = store(fpath, n); err != nil {
fmt.Fprintf(os.Stderr, "[ERROR] diskqueue: store node error: %s", err)
}
n.fpath = fpath
n.setFlushing(false)
if n.kind == _freed {
q.freeBucket(n.bucket)
n.bucket = nil
if err := os.Remove(fpath); err != nil {
//fmt.Fprintf(os.Stderr, "[ERROR] diskqueue: remove file %s error: %s", n.fpath, err)
}
return
}
n.kind = _indisk
q.mx.Lock()
if q.head != n {
q.freeBucket(n.bucket)
n.bucket = nil
}
q.mx.Unlock()
return
}
func (q *queue) freeBucket(bucket *memBucket) {
q.pool.free(bucket)
atomic.AddInt32(&q.memBucket, -1)
}
func (q *queue) Close() error {
// set closed
q.closed = true
// wait all store goroutines finish
q.wg.Wait()
var messages []string
// store all leave node
current := q.head
for current != nil {
if current.kind == _inmem && current.bucket != nil {
fpath := storePath(q.opt.fpath, current)
if err := store(fpath, current); err != nil {
messages = append(messages, err.Error())
}
}
current = current.next
}
if len(messages) == 0 {
return nil
}
return fmt.Errorf("close queue error: %s", strings.Join(messages, "; "))
}
func store(fpath string, n *node) (err error) {
// ignore empty bucket
if n.bucket.writeAt == n.bucket.readAt {
return nil
}
var fp *os.File
fp, err = os.OpenFile(fpath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return fmt.Errorf("open file %s error: %s", fpath, err)
}
_, err = n.bucket.dump(fp)
if err != nil {
return fmt.Errorf("dump data to file %s error: %s", fpath, err)
}
return
}
func storePath(base string, n *node) string {
return path.Join(base, _filePrefix+strconv.FormatInt(n.id, 10))
}

View File

@@ -0,0 +1,206 @@
package diskqueue
import (
"bytes"
"crypto/rand"
"io"
mrand "math/rand"
"os"
"os/exec"
"sync"
"testing"
"time"
)
func init() {
mrand.Seed(time.Now().UnixNano())
}
func TestDiskQueuePushPopMem(t *testing.T) {
dirname := "testdata/d1"
defer os.RemoveAll(dirname)
queue, err := New(dirname)
if err != nil {
t.Fatal(err)
}
N := 10
p := []byte("hello world")
for i := 0; i < N; i++ {
if err := queue.Push(p); err != nil {
t.Error(err)
}
}
count := 0
for {
data, err := queue.Pop()
if err == io.EOF {
break
}
if err != nil {
t.Error(err)
}
if !bytes.Equal(data, p) {
t.Errorf("invalid data: %s", data)
}
count++
}
if count != N {
t.Errorf("wrong count %d", count)
}
}
func TestDiskQueueDisk(t *testing.T) {
data := make([]byte, 2233)
rand.Read(data)
count := 1024 * 256
dirname := "testdata/d2"
defer os.RemoveAll(dirname)
t.Run("test write disk", func(t *testing.T) {
queue, err := New(dirname)
if err != nil {
t.Fatal(err)
}
for i := 0; i < count; i++ {
if err := queue.Push(data); err != nil {
time.Sleep(time.Second)
if err := queue.Push(data); err != nil {
t.Error(err)
}
}
}
queue.Close()
})
t.Run("test read disk", func(t *testing.T) {
n := 0
queue, err := New(dirname)
if err != nil {
t.Fatal(err)
}
for {
ret, err := queue.Pop()
if err == io.EOF {
break
}
if !bytes.Equal(data, ret) {
t.Errorf("invalid data unequal")
}
n++
}
if n != count {
t.Errorf("want %d get %d", count, n)
}
})
}
func TestDiskQueueTrans(t *testing.T) {
dirname := "testdata/d3"
defer os.RemoveAll(dirname)
queue, err := New(dirname)
if err != nil {
t.Fatal(err)
}
data := make([]byte, 1890)
rand.Read(data)
cycles := 512
var wg sync.WaitGroup
wg.Add(2)
done := false
writed := 0
readed := 0
go func() {
defer wg.Done()
for i := 0; i < cycles; i++ {
ms := mrand.Intn(40) + 10
time.Sleep(time.Duration(ms) * time.Millisecond)
for i := 0; i < 128; i++ {
if err := queue.Push(data); err != nil {
t.Error(err)
} else {
writed++
}
}
}
done = true
}()
go func() {
defer wg.Done()
for {
ret, err := queue.Pop()
if err == io.EOF && done {
break
}
if err == io.EOF {
ms := mrand.Intn(10)
time.Sleep(time.Duration(ms) * time.Millisecond)
continue
}
if !bytes.Equal(ret, data) {
t.Fatalf("invalid data, data length: %d, want: %d, data: %v, want: %v", len(ret), len(data), ret, data)
}
readed++
}
}()
wg.Wait()
os.RemoveAll(dirname)
if writed != readed {
t.Errorf("readed: %d != writed: %d", readed, writed)
}
}
func TestEmpty(t *testing.T) {
dirname := "testdata/d4"
defer os.RemoveAll(dirname)
queue, err := New(dirname)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 5; i++ {
_, err := queue.Pop()
if err != io.EOF {
t.Errorf("expect err == io.EOF, get %v", err)
}
}
}
func TestEmptyCache(t *testing.T) {
datadir := "testdata/emptycache"
dirname := "testdata/de"
if err := exec.Command("cp", "-r", datadir, dirname).Run(); err != nil {
t.Error(err)
}
defer os.RemoveAll(dirname)
queue, err := New(dirname)
if err != nil {
t.Fatal(err)
}
for i := 0; i < 5; i++ {
_, err := queue.Pop()
if err != io.EOF {
t.Errorf("expect err == io.EOF, get %v", err)
}
}
}
func BenchmarkDiskQueue(b *testing.B) {
queue, err := New("testdata/d5")
if err != nil {
b.Fatal(err)
}
done := make(chan bool, 1)
go func() {
for {
if _, err := queue.Pop(); err != nil {
if err == io.EOF {
break
}
}
}
done <- true
}()
data := make([]byte, 768)
rand.Read(data)
for i := 0; i < b.N; i++ {
queue.Push(data)
}
<-done
}

View File

@@ -0,0 +1,41 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["pointwrite_test.go"],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = ["//app/service/main/dapper/model:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["pointwrite.go"],
importpath = "go-common/app/service/main/dapper/pkg/pointwrite",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/dapper/model:go_default_library",
"//library/log:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,139 @@
package pointwrite
import (
"context"
"fmt"
"strings"
"sync"
"time"
"go-common/app/service/main/dapper/model"
"go-common/library/log"
)
// WriteFn .
type WriteFn func(ctx context.Context, points []*model.SpanPoint) error
// PointWriter writer span point
type PointWriter interface {
WriteSpan(span *model.Span) error
Close() error
}
// New PointWriter
func New(fn WriteFn, precision int64, timeout time.Duration) PointWriter {
pw := &pointwriter{
precision: precision,
current: make(map[string]*model.SpanPoint),
timeout: timeout,
// TODO make it configurable
tk: time.NewTicker(time.Second * 30),
fn: fn,
}
go pw.start()
return pw
}
type pointwriter struct {
closed bool
rmx sync.RWMutex
precision int64
timeout time.Duration
current map[string]*model.SpanPoint
fn WriteFn
tk *time.Ticker
}
func (p *pointwriter) start() {
for range p.tk.C {
err := p.flush()
if err != nil {
log.Error("flush pointwriter error: %s", err)
}
}
}
func (p *pointwriter) flush() error {
p.rmx.Lock()
current := p.current
p.current = make(map[string]*model.SpanPoint)
p.rmx.Unlock()
points := make([]*model.SpanPoint, 0, len(current))
for _, point := range current {
points = append(points, point)
}
if len(points) == 0 {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), p.timeout)
defer cancel()
return p.fn(ctx, points)
}
// WriteSpan writespan
func (p *pointwriter) WriteSpan(span *model.Span) error {
if p.closed {
return fmt.Errorf("pointwriter already closed")
}
kind := "client"
if span.IsServer() {
kind = "server"
}
// NOTE: ingored sample ponit if is legacy span, DELETE it futrue
if kind == "client" && !strings.Contains(span.ServiceName, ".") {
return nil
}
peerService, ok := span.Tags["peer.service"].(string)
if !ok {
peerService = "unknown"
}
timestamp := span.StartTime.Unix() - (span.StartTime.Unix() % p.precision)
key := fmt.Sprintf("%d_%s_%s_%s_%s",
timestamp,
span.ServiceName,
span.OperationName,
peerService,
kind,
)
p.rmx.Lock()
defer p.rmx.Unlock()
point, ok := p.current[key]
if !ok {
point = &model.SpanPoint{
Timestamp: timestamp,
ServiceName: span.ServiceName,
OperationName: span.OperationName,
PeerService: peerService,
SpanKind: kind,
AvgDuration: model.SamplePoint{TraceID: span.TraceID, SpanID: span.SpanID, Value: int64(span.Duration)},
}
p.current[key] = point
}
duration := int64(span.Duration)
if duration > point.MaxDuration.Value {
point.MaxDuration.TraceID = span.TraceID
point.MaxDuration.SpanID = span.SpanID
point.MaxDuration.Value = duration
}
if point.MinDuration.Value == 0 || duration < point.MinDuration.Value {
point.MinDuration.TraceID = span.TraceID
point.MinDuration.SpanID = span.SpanID
point.MinDuration.Value = duration
}
if span.IsError() {
point.Errors = append(point.Errors, model.SamplePoint{
TraceID: span.TraceID,
SpanID: span.SpanID,
Value: duration,
})
}
return nil
}
// Close pointwriter
func (p *pointwriter) Close() error {
p.closed = true
p.tk.Stop()
return p.flush()
}

View File

@@ -0,0 +1,76 @@
package pointwrite
import (
"context"
"testing"
"time"
"go-common/app/service/main/dapper/model"
)
func TestPointWrite(t *testing.T) {
var data []*model.SpanPoint
mockFn := func(ctx context.Context, points []*model.SpanPoint) error {
data = append(data, points...)
return nil
}
pw := &pointwriter{
fn: mockFn,
current: make(map[string]*model.SpanPoint),
precision: 5,
timeout: time.Second,
tk: time.NewTicker(time.Second * time.Duration(5)),
}
spans := []*model.Span{
&model.Span{
ServiceName: "test1",
StartTime: time.Unix(100, 0),
},
&model.Span{
ServiceName: "test1",
StartTime: time.Unix(110, 0),
},
}
for _, span := range spans {
if err := pw.WriteSpan(span); err != nil {
t.Error(err)
}
}
if len(pw.current) != 2 {
t.Errorf("expect 2 point get %d", len(pw.current))
}
pw.flush()
if len(data) != 2 {
t.Errorf("expect 2 point get %d", len(data))
}
}
func TestPointWriteFlush(t *testing.T) {
var data []*model.SpanPoint
wait := make(chan bool, 1)
mockFn := func(ctx context.Context, points []*model.SpanPoint) error {
data = append(data, points...)
wait <- true
return nil
}
pw := New(mockFn, 1, time.Second)
spans := []*model.Span{
&model.Span{
ServiceName: "test1",
StartTime: time.Unix(100, 0),
},
&model.Span{
ServiceName: "test1",
StartTime: time.Unix(110, 0),
},
}
for _, span := range spans {
if err := pw.WriteSpan(span); err != nil {
t.Error(err)
}
}
<-wait
if len(data) != 2 {
t.Errorf("expect 2 point get %d", len(data))
}
}

View File

@@ -0,0 +1,29 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["process.go"],
importpath = "go-common/app/service/main/dapper/pkg/process",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//app/service/main/dapper/model:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,20 @@
package process
import (
"context"
"go-common/app/service/main/dapper/model"
)
// Processer .
type Processer interface {
Process(ctx context.Context, protoSpan *model.ProtoSpan) error
}
// MockProcess MockProcess
type MockProcess func(ctx context.Context, protoSpan *model.ProtoSpan) error
// Process implement Processer
func (m MockProcess) Process(ctx context.Context, protoSpan *model.ProtoSpan) error {
return m(ctx, protoSpan)
}

View File

@@ -0,0 +1,28 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["handlersignal.go"],
importpath = "go-common/app/service/main/dapper/pkg/util",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,26 @@
package util
import (
"os"
"os/signal"
"syscall"
)
// HandlerExit handler exit signal
func HandlerExit(exitFn func(s os.Signal) int) {
sch := make(chan os.Signal, 1)
signal.Notify(sch, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT)
s := <-sch
os.Exit(exitFn(s))
}
// HandlerReload handler Reload signal
func HandlerReload(reload func(s os.Signal)) {
go func() {
sch := make(chan os.Signal, 1)
signal.Notify(sch, syscall.SIGHUP)
for s := range sch {
reload(s)
}
}()
}