Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

View File

@@ -0,0 +1,56 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"management.go",
"pipeline.go",
],
importpath = "go-common/app/service/ops/log-agent/pipeline",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/ops/log-agent/event:go_default_library",
"//app/service/ops/log-agent/input:go_default_library",
"//app/service/ops/log-agent/input/file:go_default_library",
"//app/service/ops/log-agent/input/sock:go_default_library",
"//app/service/ops/log-agent/output:go_default_library",
"//app/service/ops/log-agent/output/lancergrpc:go_default_library",
"//app/service/ops/log-agent/output/lancerlogstream:go_default_library",
"//app/service/ops/log-agent/output/stdout:go_default_library",
"//app/service/ops/log-agent/pkg/common:go_default_library",
"//app/service/ops/log-agent/processor:go_default_library",
"//app/service/ops/log-agent/processor/classify:go_default_library",
"//app/service/ops/log-agent/processor/fileLog:go_default_library",
"//app/service/ops/log-agent/processor/grok:go_default_library",
"//app/service/ops/log-agent/processor/httpstream:go_default_library",
"//app/service/ops/log-agent/processor/jsonLog:go_default_library",
"//app/service/ops/log-agent/processor/lengthCheck:go_default_library",
"//app/service/ops/log-agent/processor/sample:go_default_library",
"//library/log:go_default_library",
"//vendor/github.com/BurntSushi/toml:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//app/service/ops/log-agent/pipeline/dockerlogcollector:all-srcs",
"//app/service/ops/log-agent/pipeline/hostlogcollector:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,38 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"config.go",
"file.go",
],
importpath = "go-common/app/service/ops/log-agent/pipeline/dockerlogcollector",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/ops/log-agent/pipeline:go_default_library",
"//library/log:go_default_library",
"//library/time:go_default_library",
"//vendor/github.com/docker/docker/api/types:go_default_library",
"//vendor/github.com/docker/docker/client:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,39 @@
package dockerlogcollector
import (
"errors"
"time"
xtime "go-common/library/time"
)
type Config struct {
ConfigEnv string `toml:"configEnv"`
ConfigSuffix string `toml:"configSuffix"`
MetaPath string `toml:"metaPath"`
ScanInterval xtime.Duration `toml:"scanInterval"`
}
func (c *Config) ConfigValidate() (error) {
if c == nil {
return errors.New("config of docker log collector can't be nil")
}
if c.ConfigEnv == "" {
c.ConfigEnv = "LogCollectorConf"
}
if c.MetaPath == "" {
c.MetaPath = "/data/log-agent/meta"
}
if c.ConfigSuffix == "" {
c.ConfigSuffix = ".conf"
}
if c.ScanInterval == 0 {
c.ScanInterval = xtime.Duration(time.Second * 10)
}
return nil
}

View File

@@ -0,0 +1,116 @@
package dockerlogcollector
import (
"context"
"time"
"strings"
"path"
"io/ioutil"
"go-common/library/log"
"go-common/app/service/ops/log-agent/pipeline"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client"
)
type DockerLogCollector struct {
c *Config
client *client.Client
ctx context.Context
cancel context.CancelFunc
}
type configItem struct {
configPath string
MergedDir string
}
func InitDockerLogCollector(ctx context.Context, c *Config) (err error) {
if err = c.ConfigValidate(); err != nil {
return err
}
collector := new(DockerLogCollector)
collector.c = c
collector.ctx, collector.cancel = context.WithCancel(ctx)
// init docker client
collector.client, err = client.NewEnvClient()
if err != nil {
return err
}
go collector.scan()
return nil
}
func (collector *DockerLogCollector) getConfigs() ([]*configItem, error) {
var (
configItems = make([]*configItem, 0)
mergedDir string
ok bool
)
containers, err := collector.client.ContainerList(collector.ctx, types.ContainerListOptions{})
if err != nil {
return nil, err
}
for _, container := range containers {
info, err := collector.client.ContainerInspect(collector.ctx, container.ID)
if err != nil {
log.Error("failed to inspect container: %s", container.ID)
continue
}
// get overlay2 info
if info.GraphDriver.Name != "overlay2" {
log.Error("only overlay2 is supported")
continue
}
mergedDir, ok = info.GraphDriver.Data["MergedDir"]
if !ok {
log.Error("failed to get MergedDir of container:%s", container.ID)
}
for _, env := range info.Config.Env {
if strings.HasPrefix(env, collector.c.
ConfigEnv) {
for _, path := range strings.Split(strings.TrimPrefix(env, collector.c.ConfigEnv+"="), ",") {
configItems = append(configItems, &configItem{path, mergedDir})
}
}
}
}
return configItems, nil
}
func (collector *DockerLogCollector) scan() {
ticker := time.Tick(time.Duration(collector.c.ScanInterval))
for {
select {
case <-ticker:
configItems, err := collector.getConfigs()
if err != nil {
log.Error("failed to scan hostlogcollector config file list: %s", err)
continue
}
for _, item := range configItems {
configPath := path.Join(item.MergedDir, item.configPath)
config, err := ioutil.ReadFile(configPath)
if err != nil {
log.Error("filed to read hostlogcollector config file %s: %s", configPath, err)
continue
}
if !pipeline.PipelineManagement.PipelineExisted(configPath) {
ctx := context.WithValue(collector.ctx, "MergedDir", item.MergedDir)
go pipeline.PipelineManagement.StartPipeline(ctx, configPath, string(config))
}
}
case <-collector.ctx.Done():
return
}
}
}

View File

@@ -0,0 +1,36 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"config.go",
"file.go",
],
importpath = "go-common/app/service/ops/log-agent/pipeline/hostlogcollector",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = [
"//app/service/ops/log-agent/pipeline:go_default_library",
"//library/log:go_default_library",
"//library/time:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,39 @@
package hostlogcollector
import (
"errors"
"time"
xtime "go-common/library/time"
)
type Config struct {
HostConfigPath string `toml:"hostConfigPath"`
ConfigSuffix string `toml:"configSuffix"`
MetaPath string `toml:"metaPath"`
ScanInterval xtime.Duration `toml:"scanInterval"`
}
func (c *Config) ConfigValidate() (error) {
if c == nil {
return errors.New("config of host log collector can't be nil")
}
if c.HostConfigPath == "" {
return errors.New("hostConfigPath of host log collector config can't be nil")
}
if c.MetaPath == "" {
c.MetaPath = "/data/log-agent/meta"
}
if c.ConfigSuffix == "" {
c.ConfigSuffix = ".conf"
}
if c.ScanInterval == 0 {
c.ScanInterval = xtime.Duration(time.Second * 10)
}
return nil
}

View File

@@ -0,0 +1,92 @@
package hostlogcollector
import (
"os"
"io/ioutil"
"path"
"strings"
"fmt"
"time"
"context"
"go-common/library/log"
"go-common/app/service/ops/log-agent/pipeline"
)
type HostLogCollector struct {
c *Config
ctx context.Context
cancel context.CancelFunc
}
func InitHostLogCollector(ctx context.Context, c *Config) (err error) {
if err = c.ConfigValidate(); err != nil {
return err
}
collector := new(HostLogCollector)
collector.c = c
collector.ctx, collector.cancel = context.WithCancel(ctx)
go collector.scan()
return nil
}
//
func (collector *HostLogCollector) scan() {
ticker := time.Tick(time.Duration(collector.c.ScanInterval))
for {
select {
case <-ticker:
configPaths, err := collector.getConfigs()
if err != nil {
log.Error("failed to scan hostlogcollector config file list: %s", err)
continue
}
for _, configPath := range configPaths {
config, err := ioutil.ReadFile(configPath)
if err != nil {
log.Error("filed to read hostlogcollector config file %s: %s", configPath, err)
continue
}
if !pipeline.PipelineManagement.PipelineExisted(configPath) {
go pipeline.PipelineManagement.StartPipeline(collector.ctx, configPath, string(config))
}
}
case <-collector.ctx.Done():
return
}
}
}
// HostLogCollector get file collect configs under path
func (collector *HostLogCollector) getConfigs() ([]string, error) {
var (
err error
cinfos []os.FileInfo
configFiles = make([]string, 0)
)
dinfo, err := os.Lstat(collector.c.HostConfigPath)
if err != nil {
return nil, fmt.Errorf("lstat(%s) failed: %s", collector.c.HostConfigPath, err)
}
if !dinfo.IsDir() {
return nil, fmt.Errorf("file collect config path must be dir")
}
if cinfos, err = ioutil.ReadDir(collector.c.HostConfigPath); err != nil {
return nil, fmt.Errorf("ioutil.ReadDir(%s) error(%v)", collector.c.HostConfigPath, err)
}
for _, cinfo := range cinfos {
name := path.Join(collector.c.HostConfigPath, cinfo.Name())
if !cinfo.IsDir() && strings.HasSuffix(name, collector.c.ConfigSuffix) {
configFiles = append(configFiles, name)
}
}
return configFiles, nil
}

View File

@@ -0,0 +1,394 @@
package pipeline
import (
"sync"
"errors"
"context"
"sort"
"time"
"os"
"go-common/app/service/ops/log-agent/event"
"go-common/app/service/ops/log-agent/input"
"go-common/app/service/ops/log-agent/processor"
"go-common/app/service/ops/log-agent/output"
"go-common/app/service/ops/log-agent/pkg/common"
"go-common/library/log"
"github.com/BurntSushi/toml"
)
type PipelineMng struct {
Pipelines map[string]*Pipeline
PipelinesLock sync.RWMutex
ctx context.Context
cancel context.CancelFunc
scanInterval time.Duration
}
var PipelineManagement *PipelineMng
const defaultPipeline = "defaultPipeline"
func InitPipelineMng(ctx context.Context) (err error) {
m := new(PipelineMng)
m.Pipelines = make(map[string]*Pipeline)
m.ctx, m.cancel = context.WithCancel(ctx)
if err = m.StartDefaultOutput(); err != nil {
return err
}
m.scanInterval = time.Second * 10
go m.scan()
m.StartDefaultPipeline()
// Todo check defaultPipeline
//if !m.PipelineExisted(defaultPipeline) {
// return errors.New("failed to start defaultPipeline, see log for more details")
//}
PipelineManagement = m
return nil
}
func (m *PipelineMng) RegisterHostFileCollector(configPath string, p *Pipeline) {
m.PipelinesLock.Lock()
defer m.PipelinesLock.Unlock()
m.Pipelines[configPath] = p
}
func (m *PipelineMng) UnRegisterHostFileCollector(configPath string) {
m.PipelinesLock.Lock()
defer m.PipelinesLock.Unlock()
delete(m.Pipelines, configPath)
}
func (m *PipelineMng) PipelineExisted(configPath string) bool {
m.PipelinesLock.RLock()
defer m.PipelinesLock.RUnlock()
_, ok := m.Pipelines[configPath]
return ok
}
func (m *PipelineMng) GetPipeline(configPath string) *Pipeline {
m.PipelinesLock.RLock()
defer m.PipelinesLock.RUnlock()
if pipe, ok := m.Pipelines[configPath]; ok {
return pipe
}
return nil
}
// pipelines get configPath list of registered pipeline
func (m *PipelineMng) configPaths() []string {
m.PipelinesLock.RLock()
defer m.PipelinesLock.RUnlock()
result := make([]string, 0, len(m.Pipelines))
for p, _ := range m.Pipelines {
result = append(result, p)
}
return result
}
func (m *PipelineMng) scan() {
ticker := time.Tick(m.scanInterval)
whiteList := make(map[string]struct{})
whiteList[defaultPipeline] = struct{}{}
for {
select {
case <-ticker:
for _, configPath := range m.configPaths() {
if _, ok := whiteList[configPath]; ok {
continue
}
pipe := m.GetPipeline(configPath)
// config removed
if _, err := os.Stat(configPath); os.IsNotExist(err) {
if pipe != nil {
log.Info("config file not exist any more, stop pipeline: %s", configPath)
pipe.Stop()
continue
}
}
// config updated
oldMd5 := pipe.configMd5
newMd5 := common.FileMd5(configPath)
if oldMd5 != newMd5 {
log.Info("config file updated, stop old pipeline: %s", configPath)
pipe.Stop()
continue
}
}
case <-m.ctx.Done():
return
}
}
}
func (m *PipelineMng) StartPipeline(ctx context.Context, configPath string, config string) () {
var err error
p := new(Pipeline)
p.configPath = configPath
p.configMd5 = common.FileMd5(configPath)
ctx = context.WithValue(ctx, "configPath", configPath)
p.ctx, p.cancel = context.WithCancel(ctx)
defer p.Stop()
var sortedOrder []string
p.c = new(Config)
md, err := toml.Decode(config, p.c)
if err != nil {
p.logError(err)
return
}
inputToProcessor := make(chan *event.ProcessorEvent)
// start input
inputName := p.c.Input.Name
if inputName == "" {
p.logError(errors.New("type of Config can't be nil"))
return
}
c, err := DecodeInputConfig(inputName, md, p.c.Input.Config)
if err != nil {
p.logError(err)
return
}
InputFactory, err := input.GetFactory(inputName)
if err != nil {
p.logError(err)
return
}
i, err := InputFactory(p.ctx, c, inputToProcessor)
if err != nil {
p.logError(err)
return
}
if err = i.Run(); err != nil {
p.logError(err)
return
}
// start processor
var ProcessorConnector chan *event.ProcessorEvent
ProcessorConnector = inputToProcessor
sortedOrder = make([]string, 0)
for order, _ := range p.c.Processor {
sortedOrder = append(sortedOrder, order)
}
sort.Strings(sortedOrder)
for _, order := range sortedOrder {
name := p.c.Processor[order].Name
if name == "" {
p.logError(errors.New("type of Processor can't be nil"))
return
}
c, err := DecodeProcessorConfig(name, md, p.c.Processor[order].Config)
if err != nil {
p.logError(err)
return
}
proc, err := processor.GetFactory(name)
if err != nil {
p.logError(err)
return
}
ProcessorConnector, err = proc(p.ctx, c, ProcessorConnector)
if err != nil {
p.logError(err)
return
}
}
// add classify and fileLog processor by default if inputName == "file"
if inputName == "file" {
config := `
[processor]
[processor.1]
type = "classify"
[processor.2]
type = "fileLog"
`
fProcessor := new(Config)
md, _ := toml.Decode(config, fProcessor)
fsortedOrder := make([]string, 0)
for order, _ := range fProcessor.Processor {
fsortedOrder = append(fsortedOrder, order)
}
sort.Strings(fsortedOrder)
for _, order := range fsortedOrder {
name := fProcessor.Processor[order].Name
if name == "" {
p.logError(errors.New("type of Processor can't be nil"))
return
}
fc, err := DecodeProcessorConfig(name, md, fProcessor.Processor[order].Config)
if err != nil {
p.logError(err)
return
}
proc, err := processor.GetFactory(name)
if err != nil {
p.logError(err)
return
}
ProcessorConnector, err = proc(p.ctx, fc, ProcessorConnector)
if err != nil {
p.logError(err)
return
}
}
}
// start output
if p.c.Output != nil {
if len(p.c.Output) > 1 {
p.logError(errors.New("only One Output is allowed in One pipeline"))
return
}
var first string
for key, _ := range p.c.Output {
first = key
break
}
o, err := StartOutput(p.ctx, md, p.c.Output[first])
if err != nil {
p.logError(err)
return
}
// connect processor and output
output.ChanConnect(m.ctx, ProcessorConnector, o.InputChan())
} else {
// write to default output
if err := processor.WriteToOutput(p.ctx, "", ProcessorConnector); err != nil {
p.logError(err)
return
}
}
m.RegisterHostFileCollector(configPath, p)
defer m.UnRegisterHostFileCollector(configPath)
<-p.ctx.Done()
}
func (m *PipelineMng) StartDefaultPipeline() {
// config := `
//[input]
//type = "file"
//[input.config]
//paths = ["/data/log-agent/log/info.log.2018-11-07.001"]
//appId = "ops.billions.test"
//[processor]
//[output]
//[output.1]
//type = "stdout"
//`
// config := `
//[input]
//type = "file"
//[input.config]
//paths = ["/data/log-agent/log/info.log.2018-*"]
//appId = "ops.billions.test"
//logId = "000069"
//[processor]
//[processor.1]
//type = "fileLog"
//`
config := `
[input]
type = "sock"
[input.config]
[processor]
[processor.1]
type = "jsonLog"
[processor.2]
type = "lengthCheck"
[processor.3]
type = "httpStream"
[processor.4]
type = "sample"
[processor.5]
type = "classify"
`
go m.StartPipeline(context.Background(), defaultPipeline, config)
}
func (m *PipelineMng) StartDefaultOutput() (err error) {
var value string
if value, err = output.ReadConfig(); err != nil {
return err
}
p := new(Pipeline)
p.c = new(Config)
md, err := toml.Decode(value, p.c)
if err != nil {
return err
}
return StartOutputs(m.ctx, md, p.c.Output)
}
func StartOutputs(ctx context.Context, md toml.MetaData, config map[string]ConfigItem) (err error) {
for _, item := range config {
name := item.Name
if name == "" {
return errors.New("type of Output can't be nil")
}
if _, err = StartOutput(ctx, md, item); err != nil {
return err
}
}
return nil
}
func StartOutput(ctx context.Context, md toml.MetaData, config ConfigItem) (o output.Output, err error) {
name := config.Name
if name == "" {
return nil, errors.New("type of Output can't be nil")
}
c, err := DecodeOutputConfig(name, md, config.Config)
if err != nil {
return nil, err
}
OutputFactory, err := output.GetFactory(name)
if err != nil {
return nil, err
}
o, err = OutputFactory(ctx, c)
if err != nil {
return nil, err
}
if err = o.Run(); err != nil {
return nil, err
}
return o, nil
}

View File

@@ -0,0 +1,128 @@
package pipeline
import (
"fmt"
"context"
"go-common/app/service/ops/log-agent/output/lancerlogstream"
"go-common/app/service/ops/log-agent/output/lancergrpc"
"go-common/app/service/ops/log-agent/input/sock"
"go-common/app/service/ops/log-agent/input/file"
"go-common/app/service/ops/log-agent/processor/classify"
"go-common/app/service/ops/log-agent/processor/jsonLog"
"go-common/app/service/ops/log-agent/processor/fileLog"
"go-common/app/service/ops/log-agent/processor/lengthCheck"
"go-common/app/service/ops/log-agent/processor/sample"
"go-common/app/service/ops/log-agent/processor/httpstream"
"go-common/app/service/ops/log-agent/processor/grok"
"go-common/app/service/ops/log-agent/output/stdout"
"go-common/library/log"
"github.com/BurntSushi/toml"
)
var inputConfigDecodeFactory = make(map[string]configDecodeFunc)
var processorConfigDecodeFactory = make(map[string]configDecodeFunc)
var outputConfigDecodeFactory = make(map[string]configDecodeFunc)
func init() {
RegisterInputConfigDecodeFunc("sock", sock.DecodeConfig)
RegisterInputConfigDecodeFunc("file", file.DecodeConfig)
RegisterProcessorConfigDecodeFunc("classify", classify.DecodeConfig)
RegisterProcessorConfigDecodeFunc("jsonLog", jsonLog.DecodeConfig)
RegisterProcessorConfigDecodeFunc("lengthCheck", lengthCheck.DecodeConfig)
RegisterProcessorConfigDecodeFunc("sample", sample.DecodeConfig)
RegisterProcessorConfigDecodeFunc("httpStream", httpstream.DecodeConfig)
RegisterProcessorConfigDecodeFunc("fileLog", fileLog.DecodeConfig)
RegisterProcessorConfigDecodeFunc("grok", grok.DecodeConfig)
RegisterOutputConfigDecodeFunc("stdout", stdout.DecodeConfig)
RegisterOutputConfigDecodeFunc("lancer", lancerlogstream.DecodeConfig)
RegisterOutputConfigDecodeFunc("lancergrpc", lancergrpc.DecodeConfig)
}
type Pipeline struct {
c *Config
ctx context.Context
cancel context.CancelFunc
configPath string
configMd5 string
}
type Config struct {
Input ConfigItem `toml:"input"`
Processor map[string]ConfigItem `toml:"processor"`
Output map[string]ConfigItem `toml:"output"`
}
type ConfigItem struct {
Name string `toml:"type"`
Config toml.Primitive `toml:"config"`
}
func (pipe *Pipeline) Stop() {
pipe.cancel()
}
type configDecodeFunc = func(md toml.MetaData, primValue toml.Primitive) (c interface{}, err error)
func RegisterInputConfigDecodeFunc(name string, f configDecodeFunc) {
inputConfigDecodeFactory[name] = f
}
func RegisterProcessorConfigDecodeFunc(name string, f configDecodeFunc) {
processorConfigDecodeFactory[name] = f
}
func GetInputConfigDecodeFunc(name string) (f configDecodeFunc, err error) {
if f, exist := inputConfigDecodeFactory[name]; exist {
return f, nil
}
return nil, fmt.Errorf("InputConfigDecodeFunc for %s not exist", name)
}
func GetProcessorConfigDecodeFunc(name string) (f configDecodeFunc, err error) {
if f, exist := processorConfigDecodeFactory[name]; exist {
return f, nil
}
return nil, fmt.Errorf("ProcessorConfigDecodeFunc for %s not exist", name)
}
func DecodeInputConfig(name string, md toml.MetaData, primValue toml.Primitive) (c interface{}, err error) {
dFunc, err := GetInputConfigDecodeFunc(name)
if err != nil {
return nil, err
}
return dFunc(md, primValue)
}
func DecodeProcessorConfig(name string, md toml.MetaData, primValue toml.Primitive) (c interface{}, err error) {
dFunc, err := GetProcessorConfigDecodeFunc(name)
if err != nil {
return nil, err
}
return dFunc(md, primValue)
}
func RegisterOutputConfigDecodeFunc(name string, f configDecodeFunc) {
outputConfigDecodeFactory[name] = f
}
func GetOutputConfigDecodeFunc(name string) (f configDecodeFunc, err error) {
if f, exist := outputConfigDecodeFactory[name]; exist {
return f, nil
}
return nil, fmt.Errorf("OutputConfigDecodeFunc for %s not exist", name)
}
func DecodeOutputConfig(name string, md toml.MetaData, primValue toml.Primitive) (c interface{}, err error) {
dFunc, err := GetOutputConfigDecodeFunc(name)
if err != nil {
return nil, err
}
return dFunc(md, primValue)
}
func (p *Pipeline) logError(err error) {
configPath := p.ctx.Value("configPath")
log.Error("failed to run pipeline for %s: %s", configPath, err)
}