Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,33 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["collect.go"],
importpath = "go-common/app/service/main/dapper/pkg/collect",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//app/service/main/dapper/pkg/process:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/service/main/dapper/pkg/collect/kafkacollect:all-srcs",
"//app/service/main/dapper/pkg/collect/tcpcollect:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,12 @@
package collect
import (
"go-common/app/service/main/dapper/pkg/process"
)
// Collecter collect span from different source
type Collecter interface {
Start() error
RegisterProcess(p process.Processer)
Close() error
}

View File

@@ -0,0 +1,49 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["collect_test.go"],
embed = [":go_default_library"],
tags = ["automanaged"],
deps = [
"//app/service/main/dapper/model:go_default_library",
"//app/service/main/dapper/pkg/process:go_default_library",
"//library/log:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["collect.go"],
importpath = "go-common/app/service/main/dapper/pkg/collect/kafkacollect",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/dapper/model:go_default_library",
"//app/service/main/dapper/pkg/collect:go_default_library",
"//app/service/main/dapper/pkg/process:go_default_library",
"//library/log:go_default_library",
"//library/stat/prom:go_default_library",
"//vendor/github.com/Shopify/sarama:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,173 @@
package kafkacollect
import (
"context"
"encoding/json"
"fmt"
"sync"
"github.com/Shopify/sarama"
"go-common/app/service/main/dapper/model"
"go-common/app/service/main/dapper/pkg/collect"
"go-common/app/service/main/dapper/pkg/process"
"go-common/library/log"
"go-common/library/stat/prom"
)
var (
collectCount = prom.New().WithCounter("dapper_kafka_collect_count", []string{"name"})
collectErrCount = prom.New().WithCounter("dapper_kafka_collect_err_count", []string{"name"})
)
// Option set option
type Option func(*option)
type option struct {
group string
topic string
addrs []string
}
func (o option) saramaConfig() *sarama.Config {
return nil
}
var defaultOption = option{
group: "default",
}
//func NewConsumer(addrs []string, config *Config) (Consumer, error)
// New kafka collect
func New(topic string, addrs []string, options ...Option) (collect.Collecter, error) {
log.V(10).Info("new kafkacollect topic %s addrs: %v", topic, addrs)
if len(addrs) == 0 {
return nil, fmt.Errorf("kafka addrs required")
}
opt := defaultOption
for _, fn := range options {
fn(&opt)
}
opt.addrs = addrs
opt.topic = topic
clt := &kafkaCollect{opt: opt}
return clt, nil
}
type kafkaCollect struct {
wg sync.WaitGroup
opt option
ps []process.Processer
consumers []*consumer
client sarama.Client
offsetManager sarama.OffsetManager
baseConsumer sarama.Consumer
}
func (k *kafkaCollect) RegisterProcess(p process.Processer) {
k.ps = append(k.ps, p)
}
func (k *kafkaCollect) Start() error {
var err error
if k.client, err = sarama.NewClient(k.opt.addrs, k.opt.saramaConfig()); err != nil {
return fmt.Errorf("new kafka client error: %s", err)
}
if k.offsetManager, err = sarama.NewOffsetManagerFromClient(k.opt.group, k.client); err != nil {
return fmt.Errorf("new offset manager error: %s", err)
}
if k.baseConsumer, err = sarama.NewConsumerFromClient(k.client); err != nil {
return fmt.Errorf("new kafka consumer error: %s", err)
}
log.Info("kafkacollect consumer from topic: %s addrs: %s", k.opt.topic, k.opt.topic)
return k.start()
}
func (k *kafkaCollect) handler(protoSpan *model.ProtoSpan) {
var err error
for _, p := range k.ps {
if err = p.Process(context.Background(), protoSpan); err != nil {
log.Error("process span error: %s, discard", err)
}
}
}
func (k *kafkaCollect) start() error {
ps, err := k.client.Partitions(k.opt.topic)
if err != nil {
return fmt.Errorf("get partitions error: %s", err)
}
for _, p := range ps {
var pom sarama.PartitionOffsetManager
if pom, err = k.offsetManager.ManagePartition(k.opt.topic, p); err != nil {
return fmt.Errorf("new manage partition error: %s", err)
}
offset, _ := pom.NextOffset()
if offset == -1 {
offset = sarama.OffsetOldest
}
var c sarama.PartitionConsumer
log.V(10).Info("partitions %d start offset %d", p, offset)
if c, err = k.baseConsumer.ConsumePartition(k.opt.topic, p, offset); err != nil {
return fmt.Errorf("new consume partition error: %s", err)
}
log.V(10).Info("start partition consumer partition: %d, offset: %d", p, offset)
consumer := newConsumer(k, c, pom)
k.consumers = append(k.consumers, consumer)
k.wg.Add(1)
go consumer.start()
}
return nil
}
func (k *kafkaCollect) Close() error {
for _, c := range k.consumers {
if err := c.close(); err != nil {
log.Warn("close consumer error: %s", err)
}
}
k.wg.Wait()
return nil
}
func newConsumer(k *kafkaCollect, c sarama.PartitionConsumer, pom sarama.PartitionOffsetManager) *consumer {
return &consumer{kafkaCollect: k, consumer: c, pom: pom, closeCh: make(chan struct{}, 1)}
}
type consumer struct {
*kafkaCollect
pom sarama.PartitionOffsetManager
consumer sarama.PartitionConsumer
closeCh chan struct{}
}
func (c *consumer) close() error {
c.closeCh <- struct{}{}
c.pom.Close()
return c.consumer.Close()
}
func (c *consumer) start() {
defer c.wg.Done()
var err error
var value []byte
for {
select {
case msg := <-c.consumer.Messages():
collectCount.Incr("count")
c.pom.MarkOffset(msg.Offset+1, "")
log.V(10).Info("receive message from kafka topic: %s key: %s content: %s", msg.Key, msg.Topic, msg.Value)
protoSpan := new(model.ProtoSpan)
if err = json.Unmarshal(msg.Value, protoSpan); err != nil {
collectErrCount.Incr("count_error")
log.Error("unmarshal span from kafka error: %s, value: %v", err, value)
continue
}
c.handler(protoSpan)
case <-c.closeCh:
log.V(10).Info("receive closed return")
return
}
}
}

View File

@@ -0,0 +1,32 @@
package kafkacollect
import (
"context"
"flag"
"fmt"
"testing"
"time"
"go-common/app/service/main/dapper/model"
"go-common/app/service/main/dapper/pkg/process"
"go-common/library/log"
)
func TestKafkaCollect(t *testing.T) {
flag.Parse()
log.Init(nil)
clt, err := New("lancer_main_dapper_collector", []string{"172.18.33.163:9092", "172.18.33.164:9092", "172.18.33.165:9092"})
if err != nil {
t.Fatal(err)
}
m := process.MockProcess(func(ctx context.Context, protoSpan *model.ProtoSpan) error {
fmt.Printf("%v\n", protoSpan)
return nil
})
clt.RegisterProcess(m)
if err := clt.Start(); err != nil {
t.Fatal(err)
}
defer clt.Close()
time.Sleep(time.Minute)
}

View File

@@ -0,0 +1,54 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["server_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = [
"//app/service/main/dapper/conf:go_default_library",
"//app/service/main/dapper/model:go_default_library",
"//app/service/main/dapper/pkg/process:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["server.go"],
importpath = "go-common/app/service/main/dapper/pkg/collect/tcpcollect",
tags = ["manual"],
visibility = ["//visibility:public"],
deps = [
"//app/service/main/dapper/conf:go_default_library",
"//app/service/main/dapper/model:go_default_library",
"//app/service/main/dapper/pkg/process:go_default_library",
"//library/log:go_default_library",
"//library/net/trace/proto:go_default_library",
"//library/stat/counter:go_default_library",
"//library/stat/prom:go_default_library",
"@com_github_golang_protobuf//proto:go_default_library",
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,256 @@
package tcpcollect
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"net"
"strconv"
"sync"
"time"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/timestamp"
"go-common/app/service/main/dapper/conf"
"go-common/app/service/main/dapper/model"
"go-common/app/service/main/dapper/pkg/process"
"go-common/library/log"
protogen "go-common/library/net/trace/proto"
"go-common/library/stat/counter"
"go-common/library/stat/prom"
)
var (
collectCount = prom.New().WithCounter("dapper_collect_count", []string{"remote_host"})
collectErrCount = prom.New().WithCounter("dapper_collect_err_count", []string{"remote_host"})
)
const (
_magicSize = 2
_headerSize = 6
)
var (
_magicBuf = []byte{0xAC, 0xBE}
_separator = []byte("\001")
)
// ClientStatus agent client status
type ClientStatus struct {
Addr string
Counter counter.Counter
ErrorCounter counter.Counter
UpTime int64
}
func (c *ClientStatus) incr(iserr bool) {
if iserr {
collectErrCount.Incr(c.ClientHost())
}
collectCount.Incr(c.ClientHost())
c.Counter.Add(1)
}
// ClientHost extract from client addr
func (c *ClientStatus) ClientHost() string {
host, _, _ := net.SplitHostPort(c.Addr)
return host
}
// TCPCollect tcp server.
type TCPCollect struct {
cfg *conf.Collect
lis net.Listener
clientMap map[string]*ClientStatus
rmx sync.RWMutex
ps []process.Processer
}
// New tcp server.
func New(cfg *conf.Collect) *TCPCollect {
svr := &TCPCollect{
cfg: cfg,
clientMap: make(map[string]*ClientStatus),
}
return svr
}
// RegisterProcess implement process.Processer
func (s *TCPCollect) RegisterProcess(p process.Processer) {
s.ps = append(s.ps, p)
}
func (s *TCPCollect) addClient(cs *ClientStatus) {
s.rmx.Lock()
defer s.rmx.Unlock()
s.clientMap[cs.Addr] = cs
}
func (s *TCPCollect) removeClient(cs *ClientStatus) {
s.rmx.Lock()
defer s.rmx.Unlock()
delete(s.clientMap, cs.Addr)
}
// ClientStatus ClientStatus
func (s *TCPCollect) ClientStatus() []*ClientStatus {
s.rmx.RLock()
defer s.rmx.RUnlock()
css := make([]*ClientStatus, 0, len(s.clientMap))
for _, cs := range s.clientMap {
css = append(css, cs)
}
return css
}
// Start tcp server.
func (s *TCPCollect) Start() error {
var err error
if s.lis, err = net.Listen(s.cfg.Network, s.cfg.Addr); err != nil {
return err
}
go func() {
for {
conn, err := s.lis.Accept()
if err != nil {
if netE, ok := err.(net.Error); ok && netE.Temporary() {
log.Error("l.Accept() error(%v)", err)
time.Sleep(time.Second)
continue
}
return
}
go s.serveConn(conn)
}
}()
log.Info("tcp server start addr:%s@%s", s.cfg.Network, s.cfg.Addr)
return nil
}
// Close tcp server.
func (s *TCPCollect) Close() error {
return s.lis.Close()
}
func (s *TCPCollect) serveConn(conn net.Conn) {
log.Info("serverConn remoteIP:%s", conn.RemoteAddr().String())
cs := &ClientStatus{
Addr: conn.RemoteAddr().String(),
Counter: counter.NewRolling(time.Second, 100),
ErrorCounter: counter.NewGauge(),
UpTime: time.Now().Unix(),
}
s.addClient(cs)
defer conn.Close()
defer s.removeClient(cs)
rd := bufio.NewReaderSize(conn, 65536)
for {
buf, err := s.tailPacket(rd)
if err != nil {
log.Error("s.tailPacket() remoteIP:%s error(%v)", conn.RemoteAddr().String(), err)
cs.incr(true)
return
}
if len(buf) == 0 {
log.Error("s.tailPacket() is empty")
cs.incr(true)
continue
}
data := buf
fields := bytes.Split(buf, _separator)
if len(fields) >= 16 {
if data, err = s.legacySpan(fields[2:]); err != nil {
log.Error("convert legacy span error: %s", err)
continue
}
}
protoSpan := new(protogen.Span)
if err = proto.Unmarshal(data, protoSpan); err != nil {
log.Error("unmarshal data %s error: %s", err, data)
continue
}
for _, p := range s.ps {
if pe := p.Process(context.Background(), (*model.ProtoSpan)(protoSpan)); pe != nil {
log.Error("process span %s error: %s", protoSpan, err)
}
}
cs.incr(err != nil)
}
}
func (s *TCPCollect) tailPacket(rr *bufio.Reader) (res []byte, err error) {
var buf []byte
// peek magic
for {
if buf, err = rr.Peek(_magicSize); err != nil {
return
}
if bytes.Equal(buf, _magicBuf) {
break
}
rr.Discard(1)
}
// peek length
if buf, err = rr.Peek(_headerSize); err != nil {
return
}
// peek body
packetLen := int(binary.BigEndian.Uint32(buf[_magicSize:_headerSize]))
if buf, err = rr.Peek(_headerSize + packetLen); err != nil {
return
}
res = buf[_headerSize+_magicSize:]
rr.Discard(packetLen + _headerSize)
return
}
// startTime/endTime/traceID/spanID/parentID/event/level/class/sample/address/family/title/comment/caller/error
func (s *TCPCollect) legacySpan(fields [][]byte) ([]byte, error) {
startAt, _ := strconv.ParseInt(string(fields[0]), 10, 64)
finishAt, _ := strconv.ParseInt(string(fields[1]), 10, 64)
traceID, _ := strconv.ParseUint(string(fields[2]), 10, 64)
spanID, _ := strconv.ParseUint(string(fields[3]), 10, 64)
parentID, _ := strconv.ParseUint(string(fields[4]), 10, 64)
event, _ := strconv.Atoi(string(fields[5]))
start := 8
if len(fields) == 14 {
start = 7
}
address := string(fields[start+1])
family := string(fields[start+2])
title := string(fields[start+3])
comment := string(fields[start+4])
caller := string(fields[start+5])
errMsg := string(fields[start+6])
span := &protogen.Span{Version: 2}
span.ServiceName = family
span.OperationName = title
span.Caller = caller
span.TraceId = traceID
span.SpanId = spanID
span.ParentId = parentID
span.StartTime = &timestamp.Timestamp{
Seconds: startAt / int64(time.Second),
Nanos: int32(startAt % int64(time.Second)),
}
d := finishAt - startAt
span.Duration = &duration.Duration{
Seconds: d / int64(time.Second),
Nanos: int32(d % int64(time.Second)),
}
if event == 0 {
span.Tags = append(span.Tags, &protogen.Tag{Key: "span.kind", Kind: protogen.Tag_STRING, Value: []byte("client")})
} else {
span.Tags = append(span.Tags, &protogen.Tag{Key: "span.kind", Kind: protogen.Tag_STRING, Value: []byte("server")})
}
span.Tags = append(span.Tags, &protogen.Tag{Key: "legacy.address", Kind: protogen.Tag_STRING, Value: []byte(address)})
span.Tags = append(span.Tags, &protogen.Tag{Key: "legacy.comment", Kind: protogen.Tag_STRING, Value: []byte(comment)})
if errMsg != "" {
span.Logs = append(span.Logs, &protogen.Log{Key: "legacy.error", Fields: []*protogen.Field{&protogen.Field{Key: "error", Value: []byte(errMsg)}}})
}
return proto.Marshal(span)
}

View File

@@ -0,0 +1,45 @@
package tcpcollect
import (
"context"
"io"
"net"
"os"
"testing"
"time"
"go-common/app/service/main/dapper/conf"
"go-common/app/service/main/dapper/model"
"go-common/app/service/main/dapper/pkg/process"
)
func TestCollect(t *testing.T) {
count := 0
collect := New(&conf.Collect{Network: "tcp", Addr: "127.0.0.1:6190"})
collect.RegisterProcess(process.MockProcess(func(context.Context, *model.ProtoSpan) error {
count++
return nil
}))
if err := collect.Start(); err != nil {
t.Fatal(err)
}
fp, err := os.Open("testdata/data.bin")
if err != nil {
t.Fatal(err)
}
defer fp.Close()
conn, err := net.Dial("tcp", "127.0.0.1:6190")
if err != nil {
t.Fatal(err)
}
defer conn.Close()
_, err = io.Copy(conn, fp)
if err != nil {
t.Error(err)
}
time.Sleep(time.Second)
if count <= 0 {
t.Errorf("expect more than one span write")
}
}

Binary file not shown.