Create & Init Project...
This commit is contained in:
53
app/admin/main/cache/service/BUILD
vendored
Normal file
53
app/admin/main/cache/service/BUILD
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["service_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
rundir = ".",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//app/admin/main/cache/conf:go_default_library",
|
||||
"//app/admin/main/cache/model:go_default_library",
|
||||
"//vendor/github.com/smartystreets/goconvey/convey:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"opscache.go",
|
||||
"overlord.go",
|
||||
"service.go",
|
||||
],
|
||||
importpath = "go-common/app/admin/main/cache/service",
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//app/admin/main/cache/conf:go_default_library",
|
||||
"//app/admin/main/cache/dao:go_default_library",
|
||||
"//app/admin/main/cache/model:go_default_library",
|
||||
"//library/ecode:go_default_library",
|
||||
"//vendor/github.com/BurntSushi/toml:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
24
app/admin/main/cache/service/opscache.go
vendored
Normal file
24
app/admin/main/cache/service/opscache.go
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Service) loadOpsCache() {
|
||||
mcs, err := s.dao.OpsMemcaches(context.Background())
|
||||
if err == nil {
|
||||
s.opsMcs = mcs
|
||||
}
|
||||
rds, err := s.dao.OpsRediss(context.Background())
|
||||
if err == nil {
|
||||
s.opsRds = rds
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) loadOpsproc() {
|
||||
for {
|
||||
s.loadOpsCache()
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}
|
514
app/admin/main/cache/service/overlord.go
vendored
Normal file
514
app/admin/main/cache/service/overlord.go
vendored
Normal file
@ -0,0 +1,514 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"go-common/app/admin/main/cache/model"
|
||||
"go-common/library/ecode"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
// OpsClusterNames .
|
||||
func (s *Service) OpsClusterNames(c context.Context, arg *model.OverlordReq) (resp *model.OverlordResp, err error) {
|
||||
resp = &model.OverlordResp{}
|
||||
if arg.Type == "memcache" {
|
||||
for _, opsmc := range s.opsMcs {
|
||||
resp.Names = append(resp.Names, opsmc.Labels.Name)
|
||||
}
|
||||
} else if arg.Type == "redis" {
|
||||
for _, opsrd := range s.opsRds {
|
||||
if opsrd.Type == "redis_standalone" {
|
||||
resp.Names = append(resp.Names, opsrd.Labels.Name)
|
||||
}
|
||||
}
|
||||
} else if arg.Type == "redis_cluster" {
|
||||
for _, opsrd := range s.opsRds {
|
||||
if opsrd.Type == "redis_cluster" {
|
||||
resp.Names = append(resp.Names, opsrd.Labels.Name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("unsupport type:%s", arg.Type)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OpsClusterNodes .
|
||||
func (s *Service) OpsClusterNodes(c context.Context, arg *model.OverlordReq) (resp *model.OverlordResp, err error) {
|
||||
resp = &model.OverlordResp{}
|
||||
if arg.Type == "memcache" {
|
||||
for _, opsmc := range s.opsMcs {
|
||||
if arg.Name == opsmc.Labels.Name {
|
||||
resp.Addrs = opsmc.Targets
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if arg.Type == "redis" {
|
||||
for _, opsrd := range s.opsRds {
|
||||
if opsrd.Type == "redis_standalone" && arg.Name == opsrd.Labels.Name {
|
||||
resp.Addrs = opsrd.Targets
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if arg.Type == "redis_cluster" {
|
||||
for _, opsrd := range s.opsRds {
|
||||
if opsrd.Type == "redis_cluster" && arg.Name == opsrd.Labels.Name {
|
||||
resp.Addrs = opsrd.Targets
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("unsupport type:%s", arg.Type)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ImportOpsCluster .
|
||||
func (s *Service) ImportOpsCluster(c context.Context, arg *model.OverlordReq) (resp *model.EmpResp, err error) {
|
||||
exist := 0
|
||||
if err = s.dao.DB.Model(&model.OverlordCluster{}).Where("name=?", arg.Name).Count(&exist).Error; err != nil {
|
||||
return
|
||||
}
|
||||
if exist > 0 {
|
||||
return
|
||||
}
|
||||
var targets []string
|
||||
if arg.Type == "memcache" {
|
||||
for _, opsmc := range s.opsMcs {
|
||||
if arg.Name == opsmc.Labels.Name {
|
||||
targets = opsmc.Targets
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if arg.Type == "redis" {
|
||||
for _, opsrd := range s.opsRds {
|
||||
if opsrd.Type == "redis_standalone" && arg.Name == opsrd.Labels.Name {
|
||||
targets = opsrd.Targets
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if arg.Type == "redis_cluster" {
|
||||
for _, opsrd := range s.opsRds {
|
||||
if opsrd.Type == "redis_cluster" && arg.Name == opsrd.Labels.Name {
|
||||
targets = opsrd.Targets
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("unsupport type:%s", arg.Type)
|
||||
return
|
||||
}
|
||||
port := 0
|
||||
if err = s.dao.DB.Model(&model.OverlordCluster{}).Where("type=?", arg.Type).Count(&port).Error; err != nil {
|
||||
return
|
||||
}
|
||||
if arg.Type == "memcache" {
|
||||
port += 11211
|
||||
} else {
|
||||
port += 26379
|
||||
}
|
||||
tranDB := s.dao.DB.Begin()
|
||||
oc := &model.OverlordCluster{
|
||||
Name: arg.Name,
|
||||
Type: arg.Type,
|
||||
Zone: arg.Zone,
|
||||
HashMethod: "fnv1a_64",
|
||||
HashDistribution: "ketama",
|
||||
HashTag: "",
|
||||
ListenProto: "tcp",
|
||||
ListenAddr: "0.0.0.0:" + strconv.Itoa(port),
|
||||
DailTimeout: 1000,
|
||||
ReadTimeout: 1000,
|
||||
WriteTimeout: 1000,
|
||||
NodeConn: 2,
|
||||
PingFailLimit: 3,
|
||||
PingAutoEject: true,
|
||||
}
|
||||
if err = tranDB.Create(oc).Error; err != nil {
|
||||
tranDB.Rollback()
|
||||
return
|
||||
}
|
||||
for i, target := range targets {
|
||||
ocn := &model.OverlordNode{
|
||||
Cid: oc.ID,
|
||||
Alias: arg.Name + strconv.Itoa(i+1),
|
||||
Addr: target,
|
||||
Weight: 1,
|
||||
}
|
||||
if err = tranDB.Create(ocn).Error; err != nil {
|
||||
tranDB.Rollback()
|
||||
return
|
||||
}
|
||||
}
|
||||
err = tranDB.Commit().Error
|
||||
return
|
||||
}
|
||||
|
||||
// OverlordClusters .
|
||||
func (s *Service) OverlordClusters(c context.Context, arg *model.OverlordReq) (resp *model.OverlordResp, err error) {
|
||||
resp = &model.OverlordResp{}
|
||||
if arg.Name != "" {
|
||||
err = s.dao.DB.Where("zone=? AND type=? AND name like ?", arg.Zone, arg.Type, "%"+arg.Name+"%").Order("id desc").Offset((arg.PN - 1) * arg.PS).Limit(arg.PS).Find(&resp.Clusters).Error
|
||||
s.dao.DB.Model(&model.OverlordCluster{}).Where("zone=? AND type=? AND name like ?", arg.Zone, arg.Type, arg.Name).Count(&resp.Total)
|
||||
} else {
|
||||
err = s.dao.DB.Where("zone=? AND type=?", arg.Zone, arg.Type).Order("id desc").Offset((arg.PN - 1) * arg.PS).Limit(arg.PS).Find(&resp.Clusters).Error
|
||||
s.dao.DB.Model(&model.OverlordCluster{}).Where("zone=? AND type=?", arg.Zone, arg.Type).Count(&resp.Total)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, cluster := range resp.Clusters {
|
||||
var ens *model.OverlordResp
|
||||
if ens, err = s.ExistOverlordNodes(c, &model.OverlordReq{Name: cluster.Name}); err != nil {
|
||||
return
|
||||
}
|
||||
cluster.Nodes = ens.Nodes
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExistOverlordNodes .
|
||||
func (s *Service) ExistOverlordNodes(c context.Context, arg *model.OverlordReq) (resp *model.OverlordResp, err error) {
|
||||
cluster := &model.OverlordCluster{}
|
||||
if err = s.dao.DB.Model(cluster).Where("name=?", arg.Name).First(cluster).Error; err != nil {
|
||||
fmt.Printf("get cluster err %v\n", err)
|
||||
return
|
||||
}
|
||||
if cluster.ID == 0 {
|
||||
err = fmt.Errorf("cluster not exist:%s", arg.Name)
|
||||
return
|
||||
}
|
||||
var exists []*model.OverlordNode
|
||||
if err = s.dao.DB.Where("cid=?", cluster.ID).Order("id").Find(&exists).Error; err != nil {
|
||||
return
|
||||
}
|
||||
resp = &model.OverlordResp{}
|
||||
resp.Cluster = cluster
|
||||
resp.Nodes = exists
|
||||
return
|
||||
}
|
||||
|
||||
// NotExistOverlordAddrs .
|
||||
func (s *Service) NotExistOverlordAddrs(c context.Context, arg *model.OverlordReq) (resp *model.OverlordResp, err error) {
|
||||
cluster := &model.OverlordCluster{}
|
||||
if err = s.dao.DB.Model(cluster).Where("name=?", arg.Name).First(cluster).Error; err != nil {
|
||||
return
|
||||
}
|
||||
if cluster.ID == 0 {
|
||||
err = fmt.Errorf("cluster not exist:%s", arg.Name)
|
||||
return
|
||||
}
|
||||
var targets []string
|
||||
tp := arg.Type
|
||||
if tp == "memcache" {
|
||||
for _, opsmc := range s.opsMcs {
|
||||
if arg.Name == opsmc.Labels.Name {
|
||||
targets = opsmc.Targets
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if tp == "redis" {
|
||||
for _, opsrd := range s.opsRds {
|
||||
if arg.Name == opsrd.Labels.Name {
|
||||
targets = opsrd.Targets
|
||||
if opsrd.Type == "redis_cluster" {
|
||||
tp = "redis_cluster"
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("unsupport type:%s", arg.Type)
|
||||
return
|
||||
}
|
||||
var exists []*model.OverlordNode
|
||||
if err = s.dao.DB.Where("cid=?", cluster.ID).Order("id").Find(&exists).Error; err != nil {
|
||||
return
|
||||
}
|
||||
resp = &model.OverlordResp{}
|
||||
NEXT:
|
||||
for _, target := range targets {
|
||||
for _, exist := range exists {
|
||||
if target == exist.Addr {
|
||||
continue NEXT
|
||||
}
|
||||
}
|
||||
resp.Addrs = append(resp.Addrs, target)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ImportOpsNode .
|
||||
func (s *Service) ImportOpsNode(c context.Context, arg *model.OverlordReq) (resp *model.EmpResp, err error) {
|
||||
nen, err := s.NotExistOverlordAddrs(c, arg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
en, err := s.ExistOverlordNodes(c, arg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
i := len(en.Nodes)
|
||||
tranDB := s.dao.DB.Begin()
|
||||
for _, target := range nen.Addrs {
|
||||
ocn := &model.OverlordNode{
|
||||
Cid: en.Cluster.ID,
|
||||
Alias: arg.Name + strconv.Itoa(i+1),
|
||||
Addr: target,
|
||||
Weight: 1,
|
||||
}
|
||||
if err = tranDB.Create(ocn).Error; err != nil {
|
||||
tranDB.Rollback()
|
||||
return
|
||||
}
|
||||
i++
|
||||
}
|
||||
err = tranDB.Commit().Error
|
||||
return
|
||||
}
|
||||
|
||||
// ReplaceOpsNode .
|
||||
func (s *Service) ReplaceOpsNode(c context.Context, arg *model.OverlordReq) (resp *model.EmpResp, err error) {
|
||||
nen, err := s.NotExistOverlordAddrs(c, arg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(nen.Addrs) == 0 {
|
||||
err = fmt.Errorf("cluster have not new node:%s", arg.Name)
|
||||
return
|
||||
}
|
||||
en, err := s.ExistOverlordNodes(c, arg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, n := range en.Nodes {
|
||||
if n.Alias != arg.Alias && n.Addr == arg.Addr {
|
||||
err = fmt.Errorf("cluster:%s node:%s used by other node:%s ", arg.Name, arg.Addr, n.Alias)
|
||||
return
|
||||
}
|
||||
}
|
||||
node := &model.OverlordNode{}
|
||||
if err = s.dao.DB.Model(node).Where("cid=? AND alias=?", en.Cluster.ID, arg.Alias).First(node).Error; err != nil {
|
||||
return
|
||||
}
|
||||
if node.Addr == arg.Addr {
|
||||
return
|
||||
}
|
||||
err = s.dao.DB.Model(node).Where("alias=? AND addr=?", node.Alias, node.Addr).Update("addr", arg.Addr).Error
|
||||
return
|
||||
}
|
||||
|
||||
// DelOverlordCluster .
|
||||
func (s *Service) DelOverlordCluster(c context.Context, arg *model.OverlordReq) (resp *model.EmpResp, err error) {
|
||||
en, err := s.ExistOverlordNodes(c, arg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = s.dao.DB.Delete(en.Cluster).Error; err != nil {
|
||||
return
|
||||
}
|
||||
err = s.dao.DB.Delete(&model.OverlordNode{}, "cid=?", en.Cluster.ID).Error
|
||||
return
|
||||
}
|
||||
|
||||
// DelOverlordNode .
|
||||
func (s *Service) DelOverlordNode(c context.Context, arg *model.OverlordReq) (resp *model.EmpResp, err error) {
|
||||
en, err := s.ExistOverlordNodes(c, arg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = s.dao.DB.Delete(&model.OverlordNode{}, "cid=? AND alias=? AND addr=?", en.Cluster.ID, arg.Alias, arg.Addr).Error
|
||||
return
|
||||
}
|
||||
|
||||
// OverlordAppClusters .
|
||||
func (s *Service) OverlordAppClusters(c context.Context, arg *model.OverlordReq) (resp *model.OverlordResp, err error) {
|
||||
appids, err := s.appids(c, arg.Cookie, arg.AppID)
|
||||
if err != nil {
|
||||
err = ecode.AccessDenied
|
||||
return
|
||||
}
|
||||
resp = &model.OverlordResp{}
|
||||
if len(appids) <= 1 {
|
||||
err = s.dao.DB.Where("app_id like ?", "%"+arg.AppID+"%").Order("id desc").Offset((arg.PN - 1) * arg.PS).Limit(arg.PS).Find(&resp.Apps).Error
|
||||
s.dao.DB.Model(&model.OverlordApp{}).Where("app_id like ?", arg.AppID).Count(&resp.Total)
|
||||
} else if len(appids) > 1 {
|
||||
err = s.dao.DB.Where("app_id in (?)", appids).Order("id desc").Offset((arg.PN - 1) * arg.PS).Limit(arg.PS).Find(&resp.Apps).Error
|
||||
s.dao.DB.Model(&model.OverlordApp{}).Where("app_id in (?)", appids).Count(&resp.Total)
|
||||
}
|
||||
if err != nil || len(resp.Apps) == 0 {
|
||||
return
|
||||
}
|
||||
var cids []int64
|
||||
for _, app := range resp.Apps {
|
||||
cids = append(cids, app.Cid)
|
||||
}
|
||||
var clusters []*model.OverlordCluster
|
||||
if err = s.dao.DB.Find(&clusters, "id in (?)", cids).Error; err != nil {
|
||||
return
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
var ens *model.OverlordResp
|
||||
if ens, err = s.ExistOverlordNodes(c, &model.OverlordReq{Name: cluster.Name}); err != nil {
|
||||
return
|
||||
}
|
||||
cluster.Nodes = ens.Nodes
|
||||
for _, app := range resp.Apps {
|
||||
if cluster.ID == app.Cid {
|
||||
app.Cluster = cluster
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(appids) <= 1 {
|
||||
// 当使用appid查询时,填充overlord-mesos的数据
|
||||
if ocs, err := s.dao.OverlordClusters(c, "", arg.AppID); err == nil {
|
||||
clusters = append(clusters, ocs...)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OverlordAppCanBindClusters .
|
||||
func (s *Service) OverlordAppCanBindClusters(c context.Context, arg *model.OverlordReq) (resp *model.OverlordResp, err error) {
|
||||
resp = &model.OverlordResp{}
|
||||
err = s.dao.DB.Where("zone=? AND type=?", arg.Zone, arg.Type).Find(&resp.Clusters).Error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, cluster := range resp.Clusters {
|
||||
resp.Names = append(resp.Names, cluster.Name)
|
||||
}
|
||||
resp.Clusters = nil
|
||||
return
|
||||
}
|
||||
|
||||
// OverlordAppClusterBind .
|
||||
func (s *Service) OverlordAppClusterBind(c context.Context, arg *model.OverlordReq) (resp *model.EmpResp, err error) {
|
||||
treeid, err := s.treeid(c, arg.Cookie, arg.AppID)
|
||||
if err != nil || treeid == 0 {
|
||||
err = ecode.AccessDenied
|
||||
return
|
||||
}
|
||||
cluster := &model.OverlordCluster{}
|
||||
if err = s.dao.DB.Model(cluster).Where("zone=? AND type=? AND name=?", arg.Zone, arg.Type, arg.Name).First(cluster).Error; err != nil {
|
||||
return
|
||||
}
|
||||
app := &model.OverlordApp{
|
||||
TreeID: treeid,
|
||||
AppID: arg.AppID,
|
||||
Cid: cluster.ID,
|
||||
}
|
||||
err = s.dao.DB.Create(app).Error
|
||||
return
|
||||
}
|
||||
|
||||
// OverlordAppClusterDel .
|
||||
func (s *Service) OverlordAppClusterDel(c context.Context, arg *model.OverlordReq) (resp *model.EmpResp, err error) {
|
||||
treeid, err := s.treeid(c, arg.Cookie, arg.AppID)
|
||||
if err != nil || treeid == 0 {
|
||||
err = ecode.AccessDenied
|
||||
return
|
||||
}
|
||||
cluster := &model.OverlordCluster{}
|
||||
if err = s.dao.DB.Model(cluster).Where("zone=? AND type=? AND name=?", arg.Zone, arg.Type, arg.Name).First(cluster).Error; err != nil {
|
||||
return
|
||||
}
|
||||
app := &model.OverlordApp{}
|
||||
if err = s.dao.DB.Model(app).Where("app_id=? AND cid=?", arg.AppID, cluster.ID).First(app).Error; err != nil {
|
||||
return
|
||||
}
|
||||
err = s.dao.DB.Table(app.TableName()).Delete(app).Error
|
||||
return
|
||||
}
|
||||
|
||||
// OverlordAppAppIDs .
|
||||
func (s *Service) OverlordAppAppIDs(c context.Context, arg *model.OverlordReq) (resp *model.OverlordResp, err error) {
|
||||
appids, err := s.appids(c, arg.Cookie, "")
|
||||
if err != nil {
|
||||
err = ecode.AccessDenied
|
||||
return
|
||||
}
|
||||
resp = &model.OverlordResp{}
|
||||
resp.AppIDs = appids
|
||||
return
|
||||
}
|
||||
|
||||
// OverlordToml return a toml file of cluster infos.
|
||||
func (s *Service) OverlordToml(c context.Context, arg *model.OverlordReq) (resp []byte, err error) {
|
||||
var apps []*model.OverlordApp
|
||||
if err = s.dao.DB.Where("app_id=?", arg.AppID).Find(&apps).Error; err != nil {
|
||||
return
|
||||
}
|
||||
var cids []int64
|
||||
for _, app := range apps {
|
||||
cids = append(cids, app.Cid)
|
||||
}
|
||||
var clusters []*model.OverlordCluster
|
||||
// TODO(felix): 待都走overlord-mesos后干掉
|
||||
if err = s.dao.DB.Where("zone=? AND id in (?)", arg.Zone, cids).Find(&clusters).Error; err != nil {
|
||||
return
|
||||
}
|
||||
if len(clusters) == 0 {
|
||||
// TODO(felix): 待都走overlord-mesos后干掉
|
||||
if err = s.dao.DB.Where("zone='sh001' AND id in (?)", cids).Find(&clusters).Error; err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
var ocs []*model.OverlordCluster
|
||||
if ocs, err = s.dao.OverlordClusters(c, arg.Zone, arg.AppID); err == nil {
|
||||
if len(ocs) == 0 {
|
||||
ocs, err = s.dao.OverlordClusters(c, "sh001", arg.AppID)
|
||||
}
|
||||
if len(ocs) > 0 {
|
||||
clusters = append(clusters, ocs...)
|
||||
}
|
||||
}
|
||||
t := struct {
|
||||
Clusters []*model.OverlordToml `toml:"clusters"`
|
||||
}{}
|
||||
for _, cluster := range clusters {
|
||||
ot := &model.OverlordToml{
|
||||
Name: cluster.Name,
|
||||
Type: cluster.Type,
|
||||
HashMethod: cluster.HashMethod,
|
||||
HashDistribution: cluster.HashDistribution,
|
||||
HashTag: cluster.HashTag,
|
||||
ListenProto: cluster.ListenProto,
|
||||
ListenAddr: cluster.ListenAddr,
|
||||
DailTimeout: cluster.DailTimeout,
|
||||
ReadTimeout: cluster.ReadTimeout,
|
||||
WriteTimeout: cluster.WriteTimeout,
|
||||
NodeConn: cluster.NodeConn,
|
||||
PingFailLimit: cluster.PingFailLimit,
|
||||
PingAutoEject: cluster.PingAutoEject,
|
||||
}
|
||||
var nodes []*model.OverlordNode
|
||||
if len(cluster.Nodes) == 0 {
|
||||
if err = s.dao.DB.Where("cid=?", cluster.ID).Order("id").Find(&nodes).Error; err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
nodes = cluster.Nodes
|
||||
}
|
||||
var servers []string
|
||||
for _, node := range nodes {
|
||||
var server string
|
||||
if cluster.Type == "redis_cluster" {
|
||||
server = node.Addr
|
||||
} else {
|
||||
server = fmt.Sprintf("%s:%d %s", node.Addr, node.Weight, node.Alias)
|
||||
}
|
||||
servers = append(servers, server)
|
||||
}
|
||||
ot.Servers = servers
|
||||
t.Clusters = append(t.Clusters, ot)
|
||||
}
|
||||
buf := bytes.NewBuffer(resp)
|
||||
err = toml.NewEncoder(buf).Encode(t)
|
||||
resp = buf.Bytes()
|
||||
return
|
||||
}
|
256
app/admin/main/cache/service/service.go
vendored
Normal file
256
app/admin/main/cache/service/service.go
vendored
Normal file
@ -0,0 +1,256 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"go-common/app/admin/main/cache/conf"
|
||||
"go-common/app/admin/main/cache/dao"
|
||||
"go-common/app/admin/main/cache/model"
|
||||
"go-common/library/ecode"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
// Service struct
|
||||
type Service struct {
|
||||
c *conf.Config
|
||||
dao *dao.Dao
|
||||
|
||||
opsMcs []*model.OpsCacheMemcache
|
||||
opsRds []*model.OpsCacheRedis
|
||||
}
|
||||
|
||||
// New init
|
||||
func New(c *conf.Config) (s *Service) {
|
||||
s = &Service{
|
||||
c: c,
|
||||
dao: dao.New(c),
|
||||
}
|
||||
go s.loadOpsproc()
|
||||
return s
|
||||
}
|
||||
|
||||
// Ping Service
|
||||
func (s *Service) Ping(c context.Context) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Close Service
|
||||
func (s *Service) Close() {
|
||||
s.dao.Close()
|
||||
}
|
||||
|
||||
func (s *Service) appids(c context.Context, cookie, appid string) (appids []string, err error) {
|
||||
msg, err := s.dao.Auth(c, cookie)
|
||||
if err != nil {
|
||||
err = ecode.AccessDenied
|
||||
return
|
||||
}
|
||||
tmp, ok := msg["token"]
|
||||
if !ok {
|
||||
err = ecode.NothingFound
|
||||
return
|
||||
}
|
||||
token, ok := tmp.(string)
|
||||
if !ok {
|
||||
err = ecode.NothingFound
|
||||
return
|
||||
}
|
||||
nodes, err := s.dao.Role(c, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if appid == "" {
|
||||
for _, node := range nodes.Data {
|
||||
appids = append(appids, node.Path)
|
||||
}
|
||||
} else {
|
||||
for _, node := range nodes.Data {
|
||||
if appid == node.Path {
|
||||
appids = []string{appid}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) treeid(c context.Context, cookie, appid string) (treeid int64, err error) {
|
||||
if appid == "" {
|
||||
err = ecode.AccessDenied
|
||||
return
|
||||
}
|
||||
msg, err := s.dao.Auth(c, cookie)
|
||||
if err != nil {
|
||||
err = ecode.AccessDenied
|
||||
return
|
||||
}
|
||||
tmp, ok := msg["token"]
|
||||
if !ok {
|
||||
err = ecode.NothingFound
|
||||
return
|
||||
}
|
||||
token, ok := tmp.(string)
|
||||
if !ok {
|
||||
err = ecode.NothingFound
|
||||
return
|
||||
}
|
||||
nodes, err := s.dao.Role(c, token)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, node := range nodes.Data {
|
||||
if appid == node.Path {
|
||||
treeid = node.ID
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Clusters get clusters.
|
||||
func (s *Service) Clusters(c context.Context, req *model.ClusterReq) (resp *model.ClusterResp, err error) {
|
||||
appids, err := s.appids(c, req.Cookie, req.AppID)
|
||||
if err != nil {
|
||||
err = ecode.AccessDenied
|
||||
return
|
||||
}
|
||||
resp = new(model.ClusterResp)
|
||||
if len(appids) == 0 {
|
||||
return
|
||||
}
|
||||
if err = s.dao.DB.Where("appids in (?) AND zone=? AND type=?", appids, req.Zone, req.Type).Order("id").Offset((req.PN - 1) * req.PS).Limit(req.PS).Find(&resp.Clusters).Error; err != nil {
|
||||
return
|
||||
}
|
||||
var count int64
|
||||
s.dao.DB.Model(&model.Cluster{}).Where("appids in (?) AND zone=? AND type=?", appids, req.Zone, req.Type).Count(&count)
|
||||
resp.Total = count
|
||||
return
|
||||
}
|
||||
|
||||
// AddCluster add new cluster.
|
||||
func (s *Service) AddCluster(c context.Context, req *model.AddClusterReq) (resp *model.EmpResp, err error) {
|
||||
cluster := &model.Cluster{
|
||||
Name: req.Name,
|
||||
Type: req.Type,
|
||||
AppID: req.AppID,
|
||||
Zone: req.Zone,
|
||||
HashMethod: req.HashMethod,
|
||||
HashDistribution: req.HashDistribution,
|
||||
HashTag: req.HashTag,
|
||||
DailTimeout: req.DailTimeout,
|
||||
ReadTimeout: req.ReadTimeout,
|
||||
WriteTimeout: req.WriteTimeout,
|
||||
NodeConn: req.NodeConn,
|
||||
ListenAddr: req.ListenAddr,
|
||||
ListenProto: req.ListenProto,
|
||||
PingFailLimit: req.PingFailLimit,
|
||||
PingAutoEject: req.PingAutoEject,
|
||||
}
|
||||
|
||||
if req.ID == 0 {
|
||||
err = s.dao.DB.Create(cluster).Error
|
||||
} else {
|
||||
cluster.ID = req.ID
|
||||
s.dao.DB.Save(cluster)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DelCluster del cluster of req id.
|
||||
func (s *Service) DelCluster(c context.Context, req *model.DelClusterReq) (resp *model.EmpResp, err error) {
|
||||
err = s.dao.DB.Exec("DELETE FROM cluster where id= ?", req.ID).Error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = s.dao.DB.Exec("DELETE FROM nodes where cid= ?", req.ID).Error
|
||||
return
|
||||
}
|
||||
|
||||
// Cluster search cluster by appid or cluster name.
|
||||
func (s *Service) Cluster(c context.Context, req *model.ClusterReq) (resp []*model.Cluster, err error) {
|
||||
if req.Type != "" {
|
||||
err = s.dao.DB.Where("appids=? AND zone=? AND type=?", req.AppID, req.Zone, req.Type).Find(&resp).Error
|
||||
} else {
|
||||
err = s.dao.DB.Where("appids=? AND zone=?", req.AppID, req.Zone).Find(&resp).Error
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, clu := range resp {
|
||||
err = s.dao.DB.Where("cid = ?", clu.ID).Find(&clu.Nodes).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ModifyCluster add or del cluster nodes.
|
||||
func (s *Service) ModifyCluster(c context.Context, req *model.ModifyClusterReq) (resp *model.EmpResp, err error) {
|
||||
var nodes []*model.NodeDtl
|
||||
err = json.Unmarshal([]byte(req.Nodes), &nodes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var id = req.ID
|
||||
if req.Name != "" {
|
||||
var cluster = &model.Cluster{}
|
||||
err = s.dao.DB.Where("name = ?", req.Name).First(cluster).Error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
id = cluster.ID
|
||||
}
|
||||
if req.Action == 2 {
|
||||
var alias []string
|
||||
for _, ali := range nodes {
|
||||
alias = append(alias, ali.Alias)
|
||||
}
|
||||
//err = s.dao.DB.Delete(&nodes).Error
|
||||
err = s.dao.DB.Exec("DELETE FROM nodes WHERE alias in (?) ", strings.Join(alias, ",")).Error
|
||||
return
|
||||
} else if req.Action == 1 {
|
||||
// var nodes []*model.NodeDtl
|
||||
for _, node := range nodes {
|
||||
node.Cid = id
|
||||
err = s.dao.DB.Create(node).Error
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ClusterDtl get cluster detail about nodes info.
|
||||
func (s *Service) ClusterDtl(c context.Context, req *model.ClusterDtlReq) (resp *model.ClusterDtlResp, err error) {
|
||||
resp = new(model.ClusterDtlResp)
|
||||
err = s.dao.DB.Where("cid = ?", req.ID).Find(&resp.Nodes).Error
|
||||
// TODO(lintanghui):get node info
|
||||
return
|
||||
}
|
||||
|
||||
// Toml return a toml file of cluster infos.
|
||||
func (s *Service) Toml(c context.Context, req *model.ClusterReq) (resp []byte, err error) {
|
||||
clusters, err := s.Cluster(c, req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, cluster := range clusters {
|
||||
for _, node := range cluster.Nodes {
|
||||
cluster.Servers = append(cluster.Servers, fmt.Sprintf("%s:%d %s", node.Addr, node.Weight, node.Alias))
|
||||
}
|
||||
}
|
||||
buf := bytes.NewBuffer(resp)
|
||||
t := struct {
|
||||
Clusters []*model.Cluster `toml:"clusters"`
|
||||
}{
|
||||
Clusters: clusters,
|
||||
}
|
||||
err = toml.NewEncoder(buf).Encode(t)
|
||||
resp = buf.Bytes()
|
||||
return
|
||||
}
|
91
app/admin/main/cache/service/service_test.go
vendored
Normal file
91
app/admin/main/cache/service/service_test.go
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"go-common/app/admin/main/cache/conf"
|
||||
"go-common/app/admin/main/cache/model"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
var (
|
||||
svr *Service
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
dir, _ := filepath.Abs("../cmd/test.toml")
|
||||
if err = flag.Set("conf", dir); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err = conf.Init(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
svr = New(conf.Conf)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
func TestCluster(t *testing.T) {
|
||||
Convey("test cluster ", t, func() {
|
||||
req := &model.ClusterReq{
|
||||
PN: 1,
|
||||
PS: 10,
|
||||
}
|
||||
resp, err := svr.Clusters(context.TODO(), req)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp, ShouldNotBeNil)
|
||||
t.Logf("resp %v", resp.Clusters[0])
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddCluster(t *testing.T) {
|
||||
Convey("test add cluster ", t, func() {
|
||||
req := &model.AddClusterReq{
|
||||
Type: "memcache",
|
||||
AppID: "test",
|
||||
HashMethod: "sha1",
|
||||
HashDistribution: "ketama",
|
||||
}
|
||||
_, err := svr.AddCluster(context.TODO(), req)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSearchCluster(t *testing.T) {
|
||||
Convey("test search cluster", t, func() {
|
||||
req := &model.ClusterReq{
|
||||
AppID: "test",
|
||||
}
|
||||
resp, err := svr.Cluster(context.TODO(), req)
|
||||
So(err, ShouldBeNil)
|
||||
So(resp, ShouldNotBeNil)
|
||||
t.Logf("search resp %+v", resp)
|
||||
})
|
||||
}
|
||||
|
||||
func TestModifyCluster(t *testing.T) {
|
||||
Convey("test add cluster nodes", t, func() {
|
||||
req := &model.ModifyClusterReq{
|
||||
ID: 1,
|
||||
Action: 1,
|
||||
Nodes: `[{"addr":"11","alias":"test1","weight":1}]`,
|
||||
}
|
||||
_, err := svr.ModifyCluster(context.TODO(), req)
|
||||
So(err, ShouldBeNil)
|
||||
})
|
||||
Convey("test get cluster detail", t, func() {
|
||||
req := &model.ClusterDtlReq{
|
||||
ID: 1,
|
||||
}
|
||||
resp, err := svr.ClusterDtl(context.TODO(), req)
|
||||
So(err, ShouldBeNil)
|
||||
So(len(resp.Nodes), ShouldEqual, 2)
|
||||
})
|
||||
|
||||
}
|
Reference in New Issue
Block a user