Create & Init Project...

This commit is contained in:
2019-04-22 18:49:16 +08:00
commit fc4fa37393
25440 changed files with 4054998 additions and 0 deletions

36
library/stat/BUILD Normal file
View File

@@ -0,0 +1,36 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["stat.go"],
importpath = "go-common/library/stat",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//library/stat/prom:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//library/stat/counter:all-srcs",
"//library/stat/prom:all-srcs",
"//library/stat/statsd:all-srcs",
"//library/stat/summary:all-srcs",
"//library/stat/sys/cpu:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,5 @@
### stat 统计库包含Counter、Summary等
##### Version 1.0.0
> 1.修改计数器方法去除NewGroup、New方法

View File

@@ -0,0 +1,9 @@
# Owner
maojian
# Author
all
# Reviewer
chenzhihui
weicheng

10
library/stat/OWNERS Normal file
View File

@@ -0,0 +1,10 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- maojian
labels:
- library
- library/stat
reviewers:
- chenzhihui
- weicheng

View File

@@ -0,0 +1,45 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"counter_test.go",
"rolling_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = [
"counter.go",
"gauge.go",
"rolling.go",
],
importpath = "go-common/library/stat/counter",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,62 @@
package counter
import (
"sync"
)
// Counter is a counter interface.
type Counter interface {
Add(int64)
Reset()
Value() int64
}
// Group is a counter group.
type Group struct {
mu sync.RWMutex
vecs map[string]Counter
// New optionally specifies a function to generate a counter.
// It may not be changed concurrently with calls to other functions.
New func() Counter
}
// Add add a counter by a specified key, if counter not exists then make a new one and return new value.
func (g *Group) Add(key string, value int64) {
g.mu.RLock()
vec, ok := g.vecs[key]
g.mu.RUnlock()
if !ok {
vec = g.New()
g.mu.Lock()
if g.vecs == nil {
g.vecs = make(map[string]Counter)
}
if _, ok = g.vecs[key]; !ok {
g.vecs[key] = vec
}
g.mu.Unlock()
}
vec.Add(value)
}
// Value get a counter value by key.
func (g *Group) Value(key string) int64 {
g.mu.RLock()
vec, ok := g.vecs[key]
g.mu.RUnlock()
if ok {
return vec.Value()
}
return 0
}
// Reset reset a counter by key.
func (g *Group) Reset(key string) {
g.mu.RLock()
vec, ok := g.vecs[key]
g.mu.RUnlock()
if ok {
vec.Reset()
}
}

View File

@@ -0,0 +1,57 @@
package counter
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestGaugeCounter(t *testing.T) {
key := "test"
g := &Group{
New: func() Counter {
return NewGauge()
},
}
g.Add(key, 1)
g.Add(key, 2)
g.Add(key, 3)
g.Add(key, -1)
assert.Equal(t, g.Value(key), int64(5))
g.Reset(key)
assert.Equal(t, g.Value(key), int64(0))
}
func TestRollingCounter(t *testing.T) {
key := "test"
g := &Group{
New: func() Counter {
return NewRolling(time.Second, 10)
},
}
t.Run("add_key_b1", func(t *testing.T) {
g.Add(key, 1)
assert.Equal(t, g.Value(key), int64(1))
})
time.Sleep(time.Millisecond * 110)
t.Run("add_key_b2", func(t *testing.T) {
g.Add(key, 1)
assert.Equal(t, g.Value(key), int64(2))
})
time.Sleep(time.Millisecond * 900) // expire one bucket, 110 + 900
t.Run("expire_b1", func(t *testing.T) {
assert.Equal(t, g.Value(key), int64(1))
g.Add(key, 1)
assert.Equal(t, g.Value(key), int64(2)) // expire one bucket
})
time.Sleep(time.Millisecond * 1100)
t.Run("expire_all", func(t *testing.T) {
assert.Equal(t, g.Value(key), int64(0))
})
t.Run("reset", func(t *testing.T) {
g.Reset(key)
assert.Equal(t, g.Value(key), int64(0))
})
}

View File

@@ -0,0 +1,28 @@
package counter
import "sync/atomic"
var _ Counter = new(gaugeCounter)
// A value is a thread-safe counter implementation.
type gaugeCounter int64
// NewGauge return a guage counter.
func NewGauge() Counter {
return new(gaugeCounter)
}
// Add method increments the counter by some value and return new value
func (v *gaugeCounter) Add(val int64) {
atomic.AddInt64((*int64)(v), val)
}
// Value method returns the counter's current value.
func (v *gaugeCounter) Value() int64 {
return atomic.LoadInt64((*int64)(v))
}
// Reset reset the counter.
func (v *gaugeCounter) Reset() {
atomic.StoreInt64((*int64)(v), 0)
}

View File

@@ -0,0 +1,117 @@
package counter
import (
"sync"
"time"
)
type bucket struct {
val int64
next *bucket
}
func (b *bucket) Add(val int64) {
b.val += val
}
func (b *bucket) Value() int64 {
return b.val
}
func (b *bucket) Reset() {
b.val = 0
}
var _ Counter = new(rollingCounter)
type rollingCounter struct {
mu sync.RWMutex
buckets []bucket
bucketTime int64
lastAccess int64
cur *bucket
}
// NewRolling creates a new window. windowTime is the time covering the entire
// window. windowBuckets is the number of buckets the window is divided into.
// An example: a 10 second window with 10 buckets will have 10 buckets covering
// 1 second each.
func NewRolling(window time.Duration, winBucket int) Counter {
buckets := make([]bucket, winBucket)
bucket := &buckets[0]
for i := 1; i < winBucket; i++ {
bucket.next = &buckets[i]
bucket = bucket.next
}
bucket.next = &buckets[0]
bucketTime := time.Duration(window.Nanoseconds() / int64(winBucket))
return &rollingCounter{
cur: &buckets[0],
buckets: buckets,
bucketTime: int64(bucketTime),
lastAccess: time.Now().UnixNano(),
}
}
// Add increments the counter by value and return new value.
func (r *rollingCounter) Add(val int64) {
r.mu.Lock()
r.lastBucket().Add(val)
r.mu.Unlock()
}
// Value get the counter value.
func (r *rollingCounter) Value() (sum int64) {
now := time.Now().UnixNano()
r.mu.RLock()
b := r.cur
i := r.elapsed(now)
for j := 0; j < len(r.buckets); j++ {
// skip all future reset bucket.
if i > 0 {
i--
} else {
sum += b.Value()
}
b = b.next
}
r.mu.RUnlock()
return
}
// Reset reset the counter.
func (r *rollingCounter) Reset() {
r.mu.Lock()
for i := range r.buckets {
r.buckets[i].Reset()
}
r.mu.Unlock()
}
func (r *rollingCounter) elapsed(now int64) (i int) {
var e int64
if e = now - r.lastAccess; e <= r.bucketTime {
return
}
if i = int(e / r.bucketTime); i > len(r.buckets) {
i = len(r.buckets)
}
return
}
func (r *rollingCounter) lastBucket() (b *bucket) {
now := time.Now().UnixNano()
b = r.cur
// reset the buckets between now and number of buckets ago. If
// that is more that the existing buckets, reset all.
if i := r.elapsed(now); i > 0 {
r.lastAccess = now
for ; i > 0; i-- {
// replace the next used bucket.
b = b.next
b.Reset()
}
}
r.cur = b
return
}

View File

@@ -0,0 +1,23 @@
package counter
import (
"testing"
"time"
)
func TestRollingCounterMinInterval(t *testing.T) {
count := NewRolling(time.Second/2, 10)
tk1 := time.NewTicker(5 * time.Millisecond)
defer tk1.Stop()
for i := 0; i < 100; i++ {
<-tk1.C
count.Add(1)
}
v := count.Value()
t.Logf("count value: %d", v)
// 10% of error when bucket is 10
if v < 90 || v > 110 {
t.Errorf("expect value in [90-110] get %d", v)
}
}

View File

@@ -0,0 +1,27 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["prometheus.go"],
importpath = "go-common/library/stat/prom",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,126 @@
package prom
import (
"github.com/prometheus/client_golang/prometheus"
)
var (
// LibClient for mc redis and db client.
LibClient = New().WithTimer("go_lib_client", []string{"method"}).WithState("go_lib_client_state", []string{"method", "name"}).WithCounter("go_lib_client_code", []string{"method", "code"})
// RPCClient rpc client
RPCClient = New().WithTimer("go_rpc_client", []string{"method"}).WithState("go_rpc_client_state", []string{"method", "name"}).WithCounter("go_rpc_client_code", []string{"method", "code"})
// HTTPClient http client
HTTPClient = New().WithTimer("go_http_client", []string{"method"}).WithState("go_http_client_state", []string{"method", "name"}).WithCounter("go_http_client_code", []string{"method", "code"})
// HTTPServer for http server
HTTPServer = New().WithTimer("go_http_server", []string{"user", "method"}).WithCounter("go_http_server_code", []string{"user", "method", "code"})
// RPCServer for rpc server
RPCServer = New().WithTimer("go_rpc_server", []string{"user", "method"}).WithCounter("go_rpc_server_code", []string{"user", "method", "code"})
// BusinessErrCount for business err count
BusinessErrCount = New().WithCounter("go_business_err_count", []string{"name"}).WithState("go_business_err_state", []string{"name"})
// BusinessInfoCount for business info count
BusinessInfoCount = New().WithCounter("go_business_info_count", []string{"name"}).WithState("go_business_info_state", []string{"name"})
// CacheHit for cache hit
CacheHit = New().WithCounter("go_cache_hit", []string{"name"})
// CacheMiss for cache miss
CacheMiss = New().WithCounter("go_cache_miss", []string{"name"})
)
// Prom struct info
type Prom struct {
timer *prometheus.HistogramVec
counter *prometheus.CounterVec
state *prometheus.GaugeVec
}
// New creates a Prom instance.
func New() *Prom {
return &Prom{}
}
// WithTimer with summary timer
func (p *Prom) WithTimer(name string, labels []string) *Prom {
if p == nil || p.timer != nil {
return p
}
p.timer = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: name,
Help: name,
}, labels)
prometheus.MustRegister(p.timer)
return p
}
// WithCounter sets counter.
func (p *Prom) WithCounter(name string, labels []string) *Prom {
if p == nil || p.counter != nil {
return p
}
p.counter = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: name,
Help: name,
}, labels)
prometheus.MustRegister(p.counter)
return p
}
// WithState sets state.
func (p *Prom) WithState(name string, labels []string) *Prom {
if p == nil || p.state != nil {
return p
}
p.state = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: name,
Help: name,
}, labels)
prometheus.MustRegister(p.state)
return p
}
// Timing log timing information (in milliseconds) without sampling
func (p *Prom) Timing(name string, time int64, extra ...string) {
label := append([]string{name}, extra...)
if p.timer != nil {
p.timer.WithLabelValues(label...).Observe(float64(time))
}
}
// Incr increments one stat counter without sampling
func (p *Prom) Incr(name string, extra ...string) {
label := append([]string{name}, extra...)
if p.counter != nil {
p.counter.WithLabelValues(label...).Inc()
}
if p.state != nil {
p.state.WithLabelValues(label...).Inc()
}
}
// Decr decrements one stat counter without sampling
func (p *Prom) Decr(name string, extra ...string) {
if p.state != nil {
label := append([]string{name}, extra...)
p.state.WithLabelValues(label...).Dec()
}
}
// State set state
func (p *Prom) State(name string, v int64, extra ...string) {
if p.state != nil {
label := append([]string{name}, extra...)
p.state.WithLabelValues(label...).Set(float64(v))
}
}
// Add add count v must > 0
func (p *Prom) Add(name string, v int64, extra ...string) {
label := append([]string{name}, extra...)
if p.counter != nil {
p.counter.WithLabelValues(label...).Add(float64(v))
}
if p.state != nil {
p.state.WithLabelValues(label...).Add(float64(v))
}
}

25
library/stat/stat.go Normal file
View File

@@ -0,0 +1,25 @@
package stat
import (
"go-common/library/stat/prom"
)
// Stat interface.
type Stat interface {
Timing(name string, time int64, extra ...string)
Incr(name string, extra ...string) // name,ext...,code
State(name string, val int64, extra ...string)
}
// default stat struct.
var (
// http
HTTPClient Stat = prom.HTTPClient
HTTPServer Stat = prom.HTTPServer
// storage
Cache Stat = prom.LibClient
DB Stat = prom.LibClient
// rpc
RPCClient Stat = prom.RPCClient
RPCServer Stat = prom.RPCServer
)

View File

@@ -0,0 +1,27 @@
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["statsd.go"],
importpath = "go-common/library/stat/statsd",
tags = ["automanaged"],
visibility = ["//visibility:public"],
deps = ["//library/log:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,137 @@
package statsd
import (
"bytes"
"fmt"
"math/rand"
"net"
"strings"
"time"
"go-common/library/log"
)
const (
_quit = ""
_size = 1400
)
// Config statsd config.
type Config struct {
Project string
Addr string
ChanSize int
}
// Statsd statsd struct.
type Statsd struct {
// project.hostname.api
// Make sure no '/' in the api.
c *Config
business string
r *rand.Rand
stats chan string
}
// New new a statsd struct.
func New(c *Config) (s *Statsd) {
s = new(Statsd)
s.c = c
s.business = fmt.Sprintf("%s", c.Project)
// init rand
s.r = rand.New(rand.NewSource(time.Now().Unix()))
// init stat channel
s.stats = make(chan string, c.ChanSize)
go s.writeproc()
return
}
// send data to udp statsd daemon
func (s *Statsd) send(data string, rate float32) {
if rate < 1 && s.r != nil {
if s.r.Float32() < rate {
return
}
}
select {
case s.stats <- data:
default:
log.Warn("Statsd stat channel is full")
}
}
// writeproc write data into connection.
func (s *Statsd) writeproc() {
var (
err error
l int
stat string
conn net.Conn
buf bytes.Buffer
tick = time.Tick(1 * time.Second)
)
for {
select {
case stat = <-s.stats:
if stat == _quit {
if conn != nil {
conn.Close()
}
return
}
case <-tick:
if l = buf.Len(); l > 0 {
conn.Write(buf.Bytes()[:l-1])
buf.Reset()
}
continue
}
if conn == nil {
if conn, err = net.Dial("udp", s.c.Addr); err != nil {
log.Error("net.Dial('udp', %s) error(%v)", s.c.Addr, err)
time.Sleep(time.Second)
continue
}
}
if l = buf.Len(); l+len(stat) >= _size {
conn.Write(buf.Bytes()[:l-1])
buf.Reset()
}
buf.WriteString(stat)
buf.WriteByte('\n')
}
}
// Close close the connection.
func (s *Statsd) Close() {
s.stats <- _quit
}
// Timing log timing information (in milliseconds) without sampling
func (s *Statsd) Timing(name string, time int64, extra ...string) {
val := formatTiming(s.business, name, time, extra...)
s.send(val, 1)
}
// Incr increments one stat counter without sampling
func (s *Statsd) Incr(name string, extra ...string) {
val := formatIncr(s.business, name, extra...)
s.send(val, 1)
}
// State set state
func (s *Statsd) State(stat string, val int64, extra ...string) {
return
}
func formatIncr(business, name string, extra ...string) string {
ss := []string{business, name}
ss = append(ss, extra...)
return strings.Join(ss, ".") + ":1|c"
}
func formatTiming(business, name string, time int64, extra ...string) string {
ss := []string{business, name}
ss = append(ss, extra...)
return strings.Join(ss, ".") + fmt.Sprintf(":%d|ms", time)
}

View File

@@ -0,0 +1,38 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = ["summary_test.go"],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["summary.go"],
importpath = "go-common/library/stat/summary",
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,129 @@
package summary
import (
"sync"
"time"
)
type bucket struct {
val int64
count int64
next *bucket
}
func (b *bucket) Add(val int64) {
b.val += val
b.count++
}
func (b *bucket) Value() (int64, int64) {
return b.val, b.count
}
func (b *bucket) Reset() {
b.val = 0
b.count = 0
}
// Summary is a summary interface.
type Summary interface {
Add(int64)
Reset()
Value() (val int64, cnt int64)
}
type summary struct {
mu sync.RWMutex
buckets []bucket
bucketTime int64
lastAccess int64
cur *bucket
}
// New new a summary.
//
// use RollingCounter creates a new window. windowTime is the time covering the entire
// window. windowBuckets is the number of buckets the window is divided into.
// An example: a 10 second window with 10 buckets will have 10 buckets covering
// 1 second each.
func New(window time.Duration, winBucket int) Summary {
buckets := make([]bucket, winBucket)
bucket := &buckets[0]
for i := 1; i < winBucket; i++ {
bucket.next = &buckets[i]
bucket = bucket.next
}
bucket.next = &buckets[0]
bucketTime := time.Duration(window.Nanoseconds() / int64(winBucket))
return &summary{
cur: &buckets[0],
buckets: buckets,
bucketTime: int64(bucketTime),
lastAccess: time.Now().UnixNano(),
}
}
// Add increments the summary by value.
func (s *summary) Add(val int64) {
s.mu.Lock()
s.lastBucket().Add(val)
s.mu.Unlock()
}
// Value get the summary value and count.
func (s *summary) Value() (val int64, cnt int64) {
now := time.Now().UnixNano()
s.mu.RLock()
b := s.cur
i := s.elapsed(now)
for j := 0; j < len(s.buckets); j++ {
// skip all future reset bucket.
if i > 0 {
i--
} else {
v, c := b.Value()
val += v
cnt += c
}
b = b.next
}
s.mu.RUnlock()
return
}
// Reset reset the counter.
func (s *summary) Reset() {
s.mu.Lock()
for i := range s.buckets {
s.buckets[i].Reset()
}
s.mu.Unlock()
}
func (s *summary) elapsed(now int64) (i int) {
var e int64
if e = now - s.lastAccess; e <= s.bucketTime {
return
}
if i = int(e / s.bucketTime); i > len(s.buckets) {
i = len(s.buckets)
}
return
}
func (s *summary) lastBucket() (b *bucket) {
now := time.Now().UnixNano()
b = s.cur
// reset the buckets between now and number of buckets ago. If
// that is more that the existing buckets, reset all.
if i := s.elapsed(now); i > 0 {
s.lastAccess = now
for ; i > 0; i-- {
// replace the next used bucket.
b = b.next
b.Reset()
}
}
s.cur = b
return
}

View File

@@ -0,0 +1,69 @@
package summary
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestSummaryMinInterval(t *testing.T) {
count := New(time.Second/2, 10)
tk1 := time.NewTicker(5 * time.Millisecond)
defer tk1.Stop()
for i := 0; i < 100; i++ {
<-tk1.C
count.Add(2)
}
v, c := count.Value()
t.Logf("count value: %d, %d\n", v, c)
// 10% of error when bucket is 10
if v < 190 || v > 210 {
t.Errorf("expect value in [90-110] get %d", v)
}
// 10% of error when bucket is 10
if c < 90 || c > 110 {
t.Errorf("expect value in [90-110] get %d", v)
}
}
func TestSummary(t *testing.T) {
s := New(time.Second, 10)
t.Run("add", func(t *testing.T) {
s.Add(1)
v, c := s.Value()
assert.Equal(t, v, int64(1))
assert.Equal(t, c, int64(1))
})
time.Sleep(time.Millisecond * 110)
t.Run("add2", func(t *testing.T) {
s.Add(1)
v, c := s.Value()
assert.Equal(t, v, int64(2))
assert.Equal(t, c, int64(2))
})
time.Sleep(time.Millisecond * 900) // expire one bucket, 110 + 900
t.Run("expire", func(t *testing.T) {
v, c := s.Value()
assert.Equal(t, v, int64(1))
assert.Equal(t, c, int64(1))
s.Add(1)
v, c = s.Value()
assert.Equal(t, v, int64(2)) // expire one bucket
assert.Equal(t, c, int64(2)) // expire one bucket
})
time.Sleep(time.Millisecond * 1100)
t.Run("expire_all", func(t *testing.T) {
v, c := s.Value()
assert.Equal(t, v, int64(0))
assert.Equal(t, c, int64(0))
})
t.Run("reset", func(t *testing.T) {
s.Reset()
v, c := s.Value()
assert.Equal(t, v, int64(0))
assert.Equal(t, c, int64(0))
})
}

View File

@@ -0,0 +1,60 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_test",
"go_library",
)
go_test(
name = "go_default_test",
srcs = [
"cgroup_test.go",
"stat_test.go",
],
embed = [":go_default_library"],
rundir = ".",
tags = ["automanaged"],
deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"],
)
go_library(
name = "go_default_library",
srcs = [
"cgroup.go",
"cpu.go",
"sysconfig_notcgo.go",
"util.go",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"cpu_linux.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"cpu_darwin.go",
],
"//conditions:default": [
"cpu_other.go",
],
}),
importpath = "go-common/library/stat/sys/cpu",
tags = ["manual"],
visibility = ["//visibility:public"],
deps = [
"//library/log:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,5 @@
### os/stat
##### Version 1.0.0
1. 可以获取cpu使用率\cpu核心数\cpu最高主频

View File

@@ -0,0 +1,8 @@
# Owner
caoguoliang
# Author
caoguoliang
# Reviewer
maojian

View File

@@ -0,0 +1,7 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- caoguoliang
reviewers:
- caoguoliang
- maojian

View File

@@ -0,0 +1,12 @@
#### os/stat
> System Information
##### 项目简介
获取Linux平台下的系统信息包括cpu主频、cpu使用率等
##### 编译环境
- **请只用 Golang v1.8.x 以上版本编译执行**

View File

@@ -0,0 +1,125 @@
// +build linux
package cpu
import (
"bufio"
"fmt"
"io"
"os"
"path"
"strconv"
"strings"
)
const cgroupRootDir = "/sys/fs/cgroup"
// cgroup Linux cgroup
type cgroup struct {
cgroupSet map[string]string
}
// CPUCFSQuotaUs cpu.cfs_quota_us
func (c *cgroup) CPUCFSQuotaUs() (int64, error) {
data, err := readFile(path.Join(c.cgroupSet["cpu"], "cpu.cfs_quota_us"))
if err != nil {
return 0, err
}
return strconv.ParseInt(data, 10, 64)
}
// CPUCFSPeriodUs cpu.cfs_period_us
func (c *cgroup) CPUCFSPeriodUs() (uint64, error) {
data, err := readFile(path.Join(c.cgroupSet["cpu"], "cpu.cfs_period_us"))
if err != nil {
return 0, err
}
return parseUint(data)
}
// CPUAcctUsage cpuacct.usage
func (c *cgroup) CPUAcctUsage() (uint64, error) {
data, err := readFile(path.Join(c.cgroupSet["cpuacct"], "cpuacct.usage"))
if err != nil {
return 0, err
}
return parseUint(data)
}
// CPUAcctUsagePerCPU cpuacct.usage_percpu
func (c *cgroup) CPUAcctUsagePerCPU() ([]uint64, error) {
data, err := readFile(path.Join(c.cgroupSet["cpuacct"], "cpuacct.usage_percpu"))
if err != nil {
return nil, err
}
var usage []uint64
for _, v := range strings.Fields(string(data)) {
var u uint64
if u, err = parseUint(v); err != nil {
return nil, err
}
usage = append(usage, u)
}
return usage, nil
}
// CPUSetCPUs cpuset.cpus
func (c *cgroup) CPUSetCPUs() ([]uint64, error) {
data, err := readFile(path.Join(c.cgroupSet["cpuset"], "cpuset.cpus"))
if err != nil {
return nil, err
}
cpus, err := ParseUintList(data)
if err != nil {
return nil, err
}
var sets []uint64
for k := range cpus {
sets = append(sets, uint64(k))
}
return sets, nil
}
// CurrentcGroup get current process cgroup
func currentcGroup() (*cgroup, error) {
pid := os.Getpid()
cgroupFile := fmt.Sprintf("/proc/%d/cgroup", pid)
cgroupSet := make(map[string]string)
fp, err := os.Open(cgroupFile)
if err != nil {
return nil, err
}
defer fp.Close()
buf := bufio.NewReader(fp)
for {
line, err := buf.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
col := strings.Split(strings.TrimSpace(line), ":")
if len(col) != 3 {
return nil, fmt.Errorf("invalid cgroup format %s", line)
}
dir := col[2]
// When dir is not equal to /, it must be in docker
if dir != "/" {
cgroupSet[col[1]] = path.Join(cgroupRootDir, col[1])
if strings.Contains(col[1], ",") {
for _, k := range strings.Split(col[1], ",") {
cgroupSet[k] = path.Join(cgroupRootDir, k)
}
}
} else {
cgroupSet[col[1]] = path.Join(cgroupRootDir, col[1], col[2])
if strings.Contains(col[1], ",") {
for _, k := range strings.Split(col[1], ",") {
cgroupSet[k] = path.Join(cgroupRootDir, k, col[2])
}
}
}
}
return &cgroup{cgroupSet: cgroupSet}, nil
}

View File

@@ -0,0 +1,11 @@
// +build linux
package cpu
import (
"testing"
)
func TestCGroup(t *testing.T) {
// TODO
}

110
library/stat/sys/cpu/cpu.go Normal file
View File

@@ -0,0 +1,110 @@
package cpu
import (
"fmt"
"go-common/library/log"
"sync/atomic"
"time"
)
var (
cores uint64
maxFreq uint64
quota float64
usage uint64
preSystem uint64
preTotal uint64
)
func init() {
cpus, err := perCPUUsage()
if err != nil {
panic(fmt.Errorf("stat/sys/cpu: perCPUUsage() failed!err:=%v", err))
}
cores = uint64(len(cpus))
sets, err := cpuSets()
if err != nil {
panic(fmt.Errorf("stat/sys/cpu: cpuSets() failed!err:=%v", err))
}
quota = float64(len(sets))
cq, err := cpuQuota()
if err == nil {
if cq != -1 {
var period uint64
if period, err = cpuPeriod(); err != nil {
panic(fmt.Errorf("stat/sys/cpu: cpuPeriod() failed!err:=%v", err))
}
limit := float64(cq) / float64(period)
if limit < quota {
quota = limit
}
}
}
maxFreq = cpuMaxFreq()
preSystem, err = systemCPUUsage()
if err != nil {
panic(fmt.Errorf("sys/cpu: systemCPUUsage() failed!err:=%v", err))
}
preTotal, err = totalCPUUsage()
if err != nil {
panic(fmt.Errorf("sys/cpu: totalCPUUsage() failed!err:=%v", err))
}
go func() {
ticker := time.NewTicker(time.Millisecond * 250)
defer ticker.Stop()
for {
<-ticker.C
cpu := refreshCPU()
if cpu != 0 {
atomic.StoreUint64(&usage, cpu)
}
}
}()
}
func refreshCPU() (u uint64) {
total, err := totalCPUUsage()
if err != nil {
log.Warn("os/stat: get totalCPUUsage failed,error(%v)", err)
return
}
system, err := systemCPUUsage()
if err != nil {
log.Warn("os/stat: get systemCPUUsage failed,error(%v)", err)
return
}
if system != preSystem {
u = uint64(float64((total-preTotal)*cores*1e3) / (float64(system-preSystem) * quota))
}
preSystem = system
preTotal = total
return u
}
// Stat cpu stat.
type Stat struct {
Usage uint64 // cpu use ratio.
}
// Info cpu info.
type Info struct {
Frequency uint64
Quota float64
}
// ReadStat read cpu stat.
func ReadStat(stat *Stat) {
stat.Usage = atomic.LoadUint64(&usage)
}
// GetInfo get cpu info.
func GetInfo() Info {
return Info{
Frequency: maxFreq,
Quota: quota,
}
}

View File

@@ -0,0 +1,20 @@
// +build darwin
package cpu
var su uint64 = 10
var tu uint64 = 10
func systemCPUUsage() (usage uint64, err error) {
su += 1000
return su, nil
}
func totalCPUUsage() (usage uint64, err error) {
tu += 500
return tu, nil
}
func perCPUUsage() (usage []uint64, err error) { return []uint64{10, 10, 10, 10}, nil }
func cpuSets() (sets []uint64, err error) { return []uint64{0, 1, 2, 3}, nil }
func cpuQuota() (quota int64, err error) { return 100, nil }
func cpuPeriod() (peroid uint64, err error) { return 10, nil }
func cpuMaxFreq() (feq uint64) { return 10 }

View File

@@ -0,0 +1,147 @@
// +build linux
package cpu
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
"github.com/pkg/errors"
)
const nanoSecondsPerSecond = 1e9
// ErrNoCFSLimit is no quota limit
var ErrNoCFSLimit = errors.Errorf("no quota limit")
var clockTicksPerSecond = uint64(GetClockTicks())
// systemCPUUsage returns the host system's cpu usage in
// nanoseconds. An error is returned if the format of the underlying
// file does not match.
//
// Uses /proc/stat defined by POSIX. Looks for the cpu
// statistics line and then sums up the first seven fields
// provided. See man 5 proc for details on specific field
// information.
func systemCPUUsage() (usage uint64, err error) {
var (
line string
f *os.File
)
if f, err = os.Open("/proc/stat"); err != nil {
return
}
bufReader := bufio.NewReaderSize(nil, 128)
defer func() {
bufReader.Reset(nil)
f.Close()
}()
bufReader.Reset(f)
for err == nil {
if line, err = bufReader.ReadString('\n'); err != nil {
err = errors.WithStack(err)
return
}
parts := strings.Fields(line)
switch parts[0] {
case "cpu":
if len(parts) < 8 {
err = errors.WithStack(fmt.Errorf("bad format of cpu stats"))
return
}
var totalClockTicks uint64
for _, i := range parts[1:8] {
var v uint64
if v, err = strconv.ParseUint(i, 10, 64); err != nil {
err = errors.WithStack(fmt.Errorf("error parsing cpu stats"))
return
}
totalClockTicks += v
}
usage = (totalClockTicks * nanoSecondsPerSecond) / clockTicksPerSecond
return
}
}
err = errors.Errorf("bad stats format")
return
}
func totalCPUUsage() (usage uint64, err error) {
var cg *cgroup
if cg, err = currentcGroup(); err != nil {
return
}
return cg.CPUAcctUsage()
}
func perCPUUsage() (usage []uint64, err error) {
var cg *cgroup
if cg, err = currentcGroup(); err != nil {
return
}
return cg.CPUAcctUsagePerCPU()
}
func cpuSets() (sets []uint64, err error) {
var cg *cgroup
if cg, err = currentcGroup(); err != nil {
return
}
return cg.CPUSetCPUs()
}
func cpuQuota() (quota int64, err error) {
var cg *cgroup
if cg, err = currentcGroup(); err != nil {
return
}
return cg.CPUCFSQuotaUs()
}
func cpuPeriod() (peroid uint64, err error) {
var cg *cgroup
if cg, err = currentcGroup(); err != nil {
return
}
return cg.CPUCFSPeriodUs()
}
func cpuFreq() uint64 {
lines, err := readLines("/proc/cpuinfo")
if err != nil {
return 0
}
for _, line := range lines {
fields := strings.Split(line, ":")
if len(fields) < 2 {
continue
}
key := strings.TrimSpace(fields[0])
value := strings.TrimSpace(fields[1])
if key == "cpu MHz" || key == "clock" {
// treat this as the fallback value, thus we ignore error
if t, err := strconv.ParseFloat(strings.Replace(value, "MHz", "", 1), 64); err == nil {
return uint64(t * 1000.0 * 1000.0)
}
}
}
return 0
}
func cpuMaxFreq() uint64 {
feq := cpuFreq()
data, err := readFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq")
if err != nil {
return feq
}
// override the max freq from /proc/cpuinfo
cfeq, err := parseUint(data)
if err == nil {
feq = cfeq
}
return feq
}

View File

@@ -0,0 +1,11 @@
// +build windows
package cpu
func systemCPUUsage() (usage uint64, err error) { return 10, nil }
func totalCPUUsage() (usage uint64, err error) { return 10, nil }
func perCPUUsage() (usage []uint64, err error) { return []uint64{10, 10, 10, 10}, nil }
func cpuSets() (sets []uint64, err error) { return []uint64{0, 1, 2, 3}, nil }
func cpuQuota() (quota int64, err error) { return 100, nil }
func cpuPeriod() (peroid uint64, err error) { return 10, nil }
func cpuMaxFreq() (feq uint64) { return 10 }

View File

@@ -0,0 +1,20 @@
package cpu
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestStat(t *testing.T) {
time.Sleep(time.Second * 2)
var s Stat
var i Info
ReadStat(&s)
i = GetInfo()
assert.NotZero(t, s.Usage)
assert.NotZero(t, i.Frequency)
assert.NotZero(t, i.Quota)
}

View File

@@ -0,0 +1,14 @@
package cpu
//GetClockTicks get the OS's ticks per second
func GetClockTicks() int {
// TODO figure out a better alternative for platforms where we're missing cgo
//
// TODO Windows. This could be implemented using Win32 QueryPerformanceFrequency().
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms644905(v=vs.85).aspx
//
// An example of its usage can be found here.
// https://msdn.microsoft.com/en-us/library/windows/desktop/dn553408(v=vs.85).aspx
return 100
}

View File

@@ -0,0 +1,121 @@
package cpu
import (
"bufio"
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/pkg/errors"
)
func readFile(path string) (string, error) {
contents, err := ioutil.ReadFile(path)
if err != nil {
return "", errors.Wrapf(err, "os/stat: read file(%s) failed!", path)
}
return strings.TrimSpace(string(contents)), nil
}
func parseUint(s string) (uint64, error) {
v, err := strconv.ParseUint(s, 10, 64)
if err != nil {
intValue, intErr := strconv.ParseInt(s, 10, 64)
// 1. Handle negative values greater than MinInt64 (and)
// 2. Handle negative values lesser than MinInt64
if intErr == nil && intValue < 0 {
return 0, nil
} else if intErr != nil &&
intErr.(*strconv.NumError).Err == strconv.ErrRange &&
intValue < 0 {
return 0, nil
}
return 0, errors.Wrapf(err, "os/stat: parseUint(%s) failed!", s)
}
return v, nil
}
// ParseUintList parses and validates the specified string as the value
// found in some cgroup file (e.g. cpuset.cpus, cpuset.mems), which could be
// one of the formats below. Note that duplicates are actually allowed in the
// input string. It returns a map[int]bool with available elements from val
// set to true.
// Supported formats:
// 7
// 1-6
// 0,3-4,7,8-10
// 0-0,0,1-7
// 03,1-3 <- this is gonna get parsed as [1,2,3]
// 3,2,1
// 0-2,3,1
func ParseUintList(val string) (map[int]bool, error) {
if val == "" {
return map[int]bool{}, nil
}
availableInts := make(map[int]bool)
split := strings.Split(val, ",")
errInvalidFormat := errors.Errorf("os/stat: invalid format: %s", val)
for _, r := range split {
if !strings.Contains(r, "-") {
v, err := strconv.Atoi(r)
if err != nil {
return nil, errInvalidFormat
}
availableInts[v] = true
} else {
split := strings.SplitN(r, "-", 2)
min, err := strconv.Atoi(split[0])
if err != nil {
return nil, errInvalidFormat
}
max, err := strconv.Atoi(split[1])
if err != nil {
return nil, errInvalidFormat
}
if max < min {
return nil, errInvalidFormat
}
for i := min; i <= max; i++ {
availableInts[i] = true
}
}
}
return availableInts, nil
}
// ReadLines reads contents from a file and splits them by new lines.
// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1).
func readLines(filename string) ([]string, error) {
return readLinesOffsetN(filename, 0, -1)
}
// ReadLinesOffsetN reads contents from file and splits them by new line.
// The offset tells at which line number to start.
// The count determines the number of lines to read (starting from offset):
// n >= 0: at most n lines
// n < 0: whole file
func readLinesOffsetN(filename string, offset uint, n int) ([]string, error) {
f, err := os.Open(filename)
if err != nil {
return []string{""}, err
}
defer f.Close()
var ret []string
r := bufio.NewReader(f)
for i := 0; i < n+int(offset) || n < 0; i++ {
line, err := r.ReadString('\n')
if err != nil {
break
}
if i < int(offset) {
continue
}
ret = append(ret, strings.Trim(line, "\n"))
}
return ret, nil
}