VOL-1577 - General cleanup (gofmt, go vet, go test, dep check)
Change-Id: I536b2746b8bd266f3e75aeccc65bfe7468f1b44a
diff --git a/Makefile b/Makefile
index a89e3e0..be79cbd 100644
--- a/Makefile
+++ b/Makefile
@@ -74,6 +74,7 @@
@echo "simulated_onu : Build the simulated_onu docker container"
@echo "lint-style : Verify code is properly gofmt-ed"
@echo "lint-sanity : Verify that 'go vet' doesn't report any issues"
+ @echo "lint-dep : Verify the integrity of the `dep` files"
@echo "lint : Shorthand for lint-style & lint-sanity"
@echo "test : Generate reports for all go tests"
@echo
@@ -135,7 +136,10 @@
lint-sanity:
go vet ./...
-lint: lint-style lint-sanity
+lint-dep:
+ dep check
+
+lint: lint-style lint-sanity lint-dep
test:
hash go-junit-report > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
diff --git a/afrouter/afrouter/affinity-router.go b/afrouter/afrouter/affinity-router.go
index 30d1982..45ec26a 100644
--- a/afrouter/afrouter/affinity-router.go
+++ b/afrouter/afrouter/affinity-router.go
@@ -18,37 +18,37 @@
package afrouter
import (
- "fmt"
"errors"
+ "fmt"
+ "github.com/golang/protobuf/proto"
+ pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "github.com/opencord/voltha-go/common/log"
+ "google.golang.org/grpc"
+ "io/ioutil"
"regexp"
"strconv"
- "io/ioutil"
- "google.golang.org/grpc"
- "github.com/golang/protobuf/proto"
- "github.com/opencord/voltha-go/common/log"
- pb "github.com/golang/protobuf/protoc-gen-go/descriptor"
)
const (
- PKG_MTHD_PKG int = 1
+ PKG_MTHD_PKG int = 1
PKG_MTHD_MTHD int = 2
)
type AffinityRouter struct {
- name string
- routerType int // TODO: This is probably not needed
- association int
- routingField string
- grpcService string
- protoDescriptor *pb.FileDescriptorSet
- methodMap map[string]byte
+ name string
+ routerType int // TODO: This is probably not needed
+ association int
+ routingField string
+ grpcService string
+ protoDescriptor *pb.FileDescriptorSet
+ methodMap map[string]byte
nbBindingMthdMap map[string]byte
- bkndClstr *backendCluster
- affinity map[string]*backend
- curBknd **backend
+ bkndClstr *backendCluster
+ affinity map[string]*backend
+ curBknd **backend
}
-func newAffinityRouter(rconf *RouterConfig, config *RouteConfig) (Router,error) {
+func newAffinityRouter(rconf *RouterConfig, config *RouteConfig) (Router, error) {
var err error = nil
var rtrn_err bool = false
var pkg_re *regexp.Regexp = regexp.MustCompile(`^(\.[^.]+\.)(.+)$`)
@@ -80,28 +80,27 @@
// routing_field. This needs to be added so that methods
// can have different routing fields.
var bptr *backend
- bptr = nil
+ bptr = nil
dr := AffinityRouter{
- name:config.Name,
- grpcService:rconf.ProtoService,
- affinity:make(map[string]*backend),
- methodMap:make(map[string]byte),
- nbBindingMthdMap:make(map[string]byte),
- curBknd:&bptr,
+ name: config.Name,
+ grpcService: rconf.ProtoService,
+ affinity: make(map[string]*backend),
+ methodMap: make(map[string]byte),
+ nbBindingMthdMap: make(map[string]byte),
+ curBknd: &bptr,
//serialNo:0,
}
// An association must exist
dr.association = strIndex(rAssnNames, config.Association)
if dr.association == 0 {
if config.Association == "" {
- log.Error("An association must be specified")
+ log.Error("An association must be specified")
} else {
- log.Errorf("The association '%s' is not valid", config.Association)
+ log.Errorf("The association '%s' is not valid", config.Association)
}
rtrn_err = true
}
-
// This has already been validated bfore this function
// is called so just use it.
for idx := range rTypeNames {
@@ -113,9 +112,9 @@
// Load the protobuf descriptor file
dr.protoDescriptor = &pb.FileDescriptorSet{}
- fb, err := ioutil.ReadFile(config.ProtoFile);
+ fb, err := ioutil.ReadFile(config.ProtoFile)
if err != nil {
- log.Errorf("Could not open proto file '%s'",config.ProtoFile)
+ log.Errorf("Could not open proto file '%s'", config.ProtoFile)
rtrn_err = true
}
err = proto.Unmarshal(fb, dr.protoDescriptor)
@@ -124,42 +123,41 @@
rtrn_err = true
}
-
// Build the routing structure based on the loaded protobuf
// descriptor file and the config information.
type key struct {
- mthd string
+ mthd string
field string
}
var msgs map[key]byte = make(map[key]byte)
- for _,f := range dr.protoDescriptor.File {
+ for _, f := range dr.protoDescriptor.File {
// Build a temporary map of message types by name.
- for _,m := range f.MessageType {
- for _,fld := range m.Field {
+ for _, m := range f.MessageType {
+ for _, fld := range m.Field {
log.Debugf("Processing message '%s', field '%s'", *m.Name, *fld.Name)
msgs[key{*m.Name, *fld.Name}] = byte(*fld.Number)
}
}
}
log.Debugf("The map contains: %v", msgs)
- for _,f := range dr.protoDescriptor.File {
+ for _, f := range dr.protoDescriptor.File {
if *f.Package == rconf.ProtoPackage {
- for _, s:= range f.Service {
+ for _, s := range f.Service {
if *s.Name == rconf.ProtoService {
log.Debugf("Loading package data '%s' for service '%s' for router '%s'", *f.Package, *s.Name, dr.name)
// Now create a map keyed by method name with the value being the
// field number of the route selector.
var ok bool
- for _,m := range s.Method {
+ for _, m := range s.Method {
// Find the input type in the messages and extract the
// field number and save it for future reference.
- log.Debugf("Processing method '%s'",*m.Name)
+ log.Debugf("Processing method '%s'", *m.Name)
// Determine if this is a method we're supposed to be processing.
if needMethod(*m.Name, config) == true {
- log.Debugf("Enabling method '%s'",*m.Name)
+ log.Debugf("Enabling method '%s'", *m.Name)
pkg_methd := pkg_re.FindStringSubmatch(*m.InputType)
if pkg_methd == nil {
- log.Errorf("Regular expression didn't match input type '%s'",*m.InputType)
+ log.Errorf("Regular expression didn't match input type '%s'", *m.InputType)
rtrn_err = true
}
// The input type has the package name prepended to it. Remove it.
@@ -168,19 +166,19 @@
dr.methodMap[*m.Name], ok = msgs[key{in, config.RouteField}]
if ok == false {
log.Errorf("Method '%s' has no field named '%s' in it's parameter message '%s'",
- *m.Name, config.RouteField, in)
+ *m.Name, config.RouteField, in)
rtrn_err = true
}
}
// The sb method is always included in the methods so we can check it here too.
if needSbMethod(*m.Name, config) == true {
- log.Debugf("Enabling southbound method '%s'",*m.Name)
+ log.Debugf("Enabling southbound method '%s'", *m.Name)
// The output type has the package name prepended to it. Remove it.
out := (*m.OutputType)[len(rconf.ProtoPackage)+2:]
dr.nbBindingMthdMap[*m.Name], ok = msgs[key{out, config.RouteField}]
if ok == false {
log.Errorf("Method '%s' has no field named '%s' in it's parameter message '%s'",
- *m.Name, config.RouteField, out)
+ *m.Name, config.RouteField, out)
rtrn_err = true
}
}
@@ -190,8 +188,7 @@
}
}
-
- // Create the backend cluster or link to an existing one
+ // Create the backend cluster or link to an existing one
ok := true
if dr.bkndClstr, ok = bClusters[config.backendCluster.Name]; ok == false {
if dr.bkndClstr, err = newBackendCluster(config.backendCluster); err != nil {
@@ -201,14 +198,14 @@
}
if rtrn_err {
- return dr,errors.New(fmt.Sprintf("Failed to create a new router '%s'",dr.name))
+ return dr, errors.New(fmt.Sprintf("Failed to create a new router '%s'", dr.name))
}
- return dr,nil
+ return dr, nil
}
func needSbMethod(mthd string, conf *RouteConfig) bool {
- for _,m := range conf.NbBindingMethods {
+ for _, m := range conf.NbBindingMethods {
if mthd == m {
return true
}
@@ -217,7 +214,7 @@
}
func needMethod(mthd string, conf *RouteConfig) bool {
- for _,m := range conf.Methods {
+ for _, m := range conf.Methods {
if mthd == m {
return true
}
@@ -225,36 +222,38 @@
return false
}
-func (r AffinityRouter) Service() (string) {
+func (r AffinityRouter) Service() string {
return r.grpcService
}
-func (r AffinityRouter) Name() (string) {
+func (r AffinityRouter) Name() string {
return r.name
}
-func (r AffinityRouter) skipField(data *[]byte, idx *int) (error) {
- switch (*data)[*idx]&3 {
- case 0: // Varint
+func (r AffinityRouter) skipField(data *[]byte, idx *int) error {
+ switch (*data)[*idx] & 3 {
+ case 0: // Varint
(*idx)++
- for (*data)[*idx] >= 128 { (*idx)++}
- case 1: // 64 bit
- (*idx)+= 9
- case 2: // Length delimited
+ for (*data)[*idx] >= 128 {
(*idx)++
- b := proto.NewBuffer((*data)[*idx:])
- t , _ := b.DecodeVarint()
- (*idx) += int(t)+1
- case 3: // Deprecated
- case 4: // Deprecated
- case 5: // 32 bit
- (*idx)+= 5
+ }
+ case 1: // 64 bit
+ (*idx) += 9
+ case 2: // Length delimited
+ (*idx)++
+ b := proto.NewBuffer((*data)[*idx:])
+ t, _ := b.DecodeVarint()
+ (*idx) += int(t) + 1
+ case 3: // Deprecated
+ case 4: // Deprecated
+ case 5: // 32 bit
+ (*idx) += 5
}
return nil
}
func (r AffinityRouter) decodeProtoField(payload []byte, fieldId byte) (string, error) {
- idx :=0
+ idx := 0
b := proto.NewBuffer([]byte{})
//b.DebugPrint("The Buffer", payload)
for { // Find the route selector field
@@ -265,31 +264,31 @@
// TODO: Consider supporting other selector types.... Way, way in the future
// ok, the future is now, support strings as well... ugh.
var selector string
- switch payload[idx]&3 {
- case 0: // Integer
- b.SetBuf(payload[idx+1:])
- v,e := b.DecodeVarint()
- if e == nil {
- log.Debugf("Decoded the ing field: %v", v)
- selector = strconv.Itoa(int(v))
- } else {
- log.Errorf("Failed to decode varint %v", e)
- return "", e
- }
- case 2: // Length delimited AKA string
- b.SetBuf(payload[idx+1:])
- v,e := b.DecodeStringBytes()
- if e == nil {
- log.Debugf("Decoded the string field: %v", v)
- selector = v
- } else {
- log.Errorf("Failed to decode string %v", e)
- return "", e
- }
- default:
- err := errors.New(fmt.Sprintf("Only integer and string route selectors are permitted"))
- log.Error(err)
- return "", err
+ switch payload[idx] & 3 {
+ case 0: // Integer
+ b.SetBuf(payload[idx+1:])
+ v, e := b.DecodeVarint()
+ if e == nil {
+ log.Debugf("Decoded the ing field: %v", v)
+ selector = strconv.Itoa(int(v))
+ } else {
+ log.Errorf("Failed to decode varint %v", e)
+ return "", e
+ }
+ case 2: // Length delimited AKA string
+ b.SetBuf(payload[idx+1:])
+ v, e := b.DecodeStringBytes()
+ if e == nil {
+ log.Debugf("Decoded the string field: %v", v)
+ selector = v
+ } else {
+ log.Errorf("Failed to decode string %v", e)
+ return "", e
+ }
+ default:
+ err := errors.New(fmt.Sprintf("Only integer and string route selectors are permitted"))
+ log.Error(err)
+ return "", err
}
return selector, nil
} else if err := r.skipField(&payload, &idx); err != nil {
@@ -301,54 +300,54 @@
func (r AffinityRouter) Route(sel interface{}) *backend {
switch sl := sel.(type) {
- case *nbFrame:
- log.Debugf("Route called for nbFrame with method %s", sl.mthdSlice[REQ_METHOD]);
- // Check if this method should be affinity bound from the
- // reply rather than the request.
- if _,ok := r.nbBindingMthdMap[sl.mthdSlice[REQ_METHOD]]; ok == true {
+ case *nbFrame:
+ log.Debugf("Route called for nbFrame with method %s", sl.mthdSlice[REQ_METHOD])
+ // Check if this method should be affinity bound from the
+ // reply rather than the request.
+ if _, ok := r.nbBindingMthdMap[sl.mthdSlice[REQ_METHOD]]; ok == true {
+ var err error
+ log.Debugf("Method '%s' affinity binds on reply", sl.mthdSlice[REQ_METHOD])
+ // Just round robin route the southbound request
+ if *r.curBknd, err = r.bkndClstr.nextBackend(*r.curBknd, BE_SEQ_RR); err == nil {
+ return *r.curBknd
+ } else {
+ sl.err = err
+ return nil
+ }
+ }
+ // Not a south affinity binding method, proceed with north affinity binding.
+ if selector, err := r.decodeProtoField(sl.payload, r.methodMap[sl.mthdSlice[REQ_METHOD]]); err == nil {
+ log.Debugf("Establishing affinity for selector: %s", selector)
+ if rtrn, ok := r.affinity[selector]; ok {
+ return rtrn
+ } else {
+ // The selector isn't in the map, create a new affinity mapping
+ log.Debugf("MUST CREATE A NEW AFFINITY MAP ENTRY!!")
var err error
- log.Debugf("Method '%s' affinity binds on reply", sl.mthdSlice[REQ_METHOD])
- // Just round robin route the southbound request
- if *r.curBknd, err = r.bkndClstr.nextBackend(*r.curBknd,BE_SEQ_RR); err == nil {
+ if *r.curBknd, err = r.bkndClstr.nextBackend(*r.curBknd, BE_SEQ_RR); err == nil {
+ r.setAffinity(selector, *r.curBknd)
+ //r.affinity[selector] = *r.curBknd
+ //log.Debugf("New affinity set to backend %s",(*r.curBknd).name)
return *r.curBknd
} else {
sl.err = err
return nil
}
}
- // Not a south affinity binding method, proceed with north affinity binding.
- if selector,err := r.decodeProtoField(sl.payload, r.methodMap[sl.mthdSlice[REQ_METHOD]]); err == nil {
- log.Debugf("Establishing affinity for selector: %s", selector)
- if rtrn,ok := r.affinity[selector]; ok {
- return rtrn
- } else {
- // The selector isn't in the map, create a new affinity mapping
- log.Debugf("MUST CREATE A NEW AFFINITY MAP ENTRY!!")
- var err error
- if *r.curBknd, err = r.bkndClstr.nextBackend(*r.curBknd,BE_SEQ_RR); err == nil {
- r.setAffinity(selector, *r.curBknd)
- //r.affinity[selector] = *r.curBknd
- //log.Debugf("New affinity set to backend %s",(*r.curBknd).name)
- return *r.curBknd
- } else {
- sl.err = err
- return nil
- }
- }
- }
- default:
- log.Errorf("Internal: invalid data type in Route call %v", sel);
- return nil
+ }
+ default:
+ log.Errorf("Internal: invalid data type in Route call %v", sel)
+ return nil
}
- log.Errorf("Bad lookup in affinity map %v",r.affinity);
+ log.Errorf("Bad lookup in affinity map %v", r.affinity)
return nil
}
-func (ar AffinityRouter) GetMetaKeyVal(serverStream grpc.ServerStream) (string,string,error) {
- return "","",nil
+func (ar AffinityRouter) GetMetaKeyVal(serverStream grpc.ServerStream) (string, string, error) {
+ return "", "", nil
}
-func (ar AffinityRouter) BackendCluster(mthd string, metaKey string) (*backendCluster,error) {
+func (ar AffinityRouter) BackendCluster(mthd string, metaKey string) (*backendCluster, error) {
return ar.bkndClstr, nil
}
@@ -361,41 +360,41 @@
func (r AffinityRouter) ReplyHandler(sel interface{}) error {
switch sl := sel.(type) {
- case *sbFrame:
- sl.lck.Lock()
- defer sl.lck.Unlock()
- log.Debugf("Reply handler called for sbFrame with method %s", sl.method);
- // Determine if reply action is required.
- if fld, ok := r.nbBindingMthdMap[sl.method]; ok == true && len(sl.payload) > 0 {
- // Extract the field value from the frame and
- // and set affinity accordingly
- if selector,err := r.decodeProtoField(sl.payload, fld); err == nil {
- log.Debug("Settign affinity on reply")
- if r.setAffinity(selector, sl.be) != nil {
- log.Error("Setting affinity on reply failed")
- }
- return nil
- } else {
- err := errors.New(fmt.Sprintf("Failed to decode reply field %d for method %s", fld, sl.method))
- log.Error(err)
- return err
+ case *sbFrame:
+ sl.lck.Lock()
+ defer sl.lck.Unlock()
+ log.Debugf("Reply handler called for sbFrame with method %s", sl.method)
+ // Determine if reply action is required.
+ if fld, ok := r.nbBindingMthdMap[sl.method]; ok == true && len(sl.payload) > 0 {
+ // Extract the field value from the frame and
+ // and set affinity accordingly
+ if selector, err := r.decodeProtoField(sl.payload, fld); err == nil {
+ log.Debug("Settign affinity on reply")
+ if r.setAffinity(selector, sl.be) != nil {
+ log.Error("Setting affinity on reply failed")
}
+ return nil
+ } else {
+ err := errors.New(fmt.Sprintf("Failed to decode reply field %d for method %s", fld, sl.method))
+ log.Error(err)
+ return err
}
- return nil
- default:
- err := errors.New(fmt.Sprintf("Internal: invalid data type in ReplyHander call %v", sl))
- log.Error(err)
- return err
+ }
+ return nil
+ default:
+ err := errors.New(fmt.Sprintf("Internal: invalid data type in ReplyHander call %v", sl))
+ log.Error(err)
+ return err
}
}
func (ar AffinityRouter) setAffinity(key string, be *backend) error {
- if be2,ok := ar.affinity[key]; ok == false {
+ if be2, ok := ar.affinity[key]; ok == false {
ar.affinity[key] = be
- log.Debugf("New affinity set to backend %s for key %s",be.name, key)
+ log.Debugf("New affinity set to backend %s for key %s", be.name, key)
} else if be2 != be {
err := errors.New(fmt.Sprintf("Attempting multiple sets of affinity for key %s to backend %s from %s on router %s",
- key, be.name, ar.affinity[key].name, ar.name))
+ key, be.name, ar.affinity[key].name, ar.name))
log.Error(err)
return err
}
diff --git a/afrouter/afrouter/api.go b/afrouter/afrouter/api.go
index aec1221..36e79a3 100644
--- a/afrouter/afrouter/api.go
+++ b/afrouter/afrouter/api.go
@@ -18,25 +18,24 @@
package afrouter
import (
- "net"
- "fmt"
"errors"
- "runtime"
- "strconv"
- "google.golang.org/grpc"
- "golang.org/x/net/context"
+ "fmt"
"github.com/opencord/voltha-go/common/log"
pb "github.com/opencord/voltha-protos/go/afrouter"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "net"
+ "runtime"
+ "strconv"
)
-
type ArouterApi struct {
- addr string
- port int
+ addr string
+ port int
apiListener net.Listener
- apiServer * grpc.Server
- running bool
- ar *ArouterProxy
+ apiServer *grpc.Server
+ running bool
+ ar *ArouterProxy
}
func newApi(config *ApiConfig, ar *ArouterProxy) (*ArouterApi, error) {
@@ -51,23 +50,23 @@
return nil, errors.New("Errors in API configuration")
} else {
var err error = nil
- aa := &ArouterApi{addr:config.Addr,port:int(config.Port),ar:ar}
+ aa := &ArouterApi{addr: config.Addr, port: int(config.Port), ar: ar}
// Create the listener for the API server
if aa.apiListener, err =
- net.Listen("tcp", config.Addr + ":"+
- strconv.Itoa(int(config.Port))); err != nil {
+ net.Listen("tcp", config.Addr+":"+
+ strconv.Itoa(int(config.Port))); err != nil {
log.Error(err)
return nil, err
}
// Create the API server
aa.apiServer = grpc.NewServer()
pb.RegisterConfigurationServer(aa.apiServer, *aa)
- return aa,err
+ return aa, err
}
}
func (aa *ArouterApi) getServer(srvr string) (*server, error) {
- if s,ok := aa.ar.servers[srvr]; ok == false {
+ if s, ok := aa.ar.servers[srvr]; ok == false {
err := errors.New(fmt.Sprintf("Server '%s' doesn't exist", srvr))
return nil, err
} else {
@@ -76,8 +75,8 @@
}
func (aa *ArouterApi) getRouter(s *server, clstr string) (Router, error) {
- for _,pkg := range s.routers {
- for _,r := range pkg {
+ for _, pkg := range s.routers {
+ for _, r := range pkg {
if c := r.FindBackendCluster(clstr); c != nil {
return r, nil
}
@@ -88,8 +87,8 @@
}
func (aa *ArouterApi) getCluster(s *server, clstr string) (*backendCluster, error) {
- for _,pkg := range s.routers {
- for _,r := range pkg {
+ for _, pkg := range s.routers {
+ for _, r := range pkg {
if c := r.FindBackendCluster(clstr); c != nil {
return c, nil
}
@@ -100,18 +99,18 @@
}
func (aa *ArouterApi) getBackend(c *backendCluster, bknd string) (*backend, error) {
- for _,b := range c.backends {
+ for _, b := range c.backends {
if b.name == bknd {
- return b,nil
+ return b, nil
}
}
err := errors.New(fmt.Sprintf("Backend '%s' doesn't exist in cluster %s",
- bknd, c.name))
+ bknd, c.name))
return nil, err
}
func (aa *ArouterApi) getConnection(b *backend, con string) (*beConnection, error) {
- if c,ok := b.connections[con]; ok == false {
+ if c, ok := b.connections[con]; ok == false {
err := errors.New(fmt.Sprintf("Connection '%s' doesn't exist", con))
return nil, err
} else {
@@ -119,8 +118,8 @@
}
}
-func (aa * ArouterApi) updateConnection(in *pb.Conn, cn *beConnection, b *backend) error {
- sPort := strconv.FormatUint(in.Port,10)
+func (aa *ArouterApi) updateConnection(in *pb.Conn, cn *beConnection, b *backend) error {
+ sPort := strconv.FormatUint(in.Port, 10)
// Check that the ip address and or port are different
if in.Addr == cn.addr && sPort == cn.port {
err := errors.New(fmt.Sprintf("Refusing to change connection '%s' to identical values", in.Connection))
@@ -135,7 +134,7 @@
}
func (aa ArouterApi) SetAffinity(ctx context.Context, in *pb.Affinity) (*pb.Result, error) {
- log.Debugf("SetAffinity called! %v",in);
+ log.Debugf("SetAffinity called! %v", in)
//return &pb.Result{Success:true,Error:""},nil
// Navigate down tot he connection and compare IP addresses and ports if they're
// not the same then close the existing connection. If they are bothe the same
@@ -144,33 +143,33 @@
aap := &aa
- _=aap
+ _ = aap
log.Debugf("Getting router %s and route %s", in.Router, in.Route)
- if r,ok := allRouters[in.Router+in.Route]; ok == true {
+ if r, ok := allRouters[in.Router+in.Route]; ok == true {
switch rr := r.(type) {
- case AffinityRouter:
- log.Debug("Affinity router found")
- b := rr.FindBackendCluster(in.Cluster).getBackend(in.Backend)
- if b != nil {
- rr.setAffinity(in.Id, b)
- } else {
- log.Errorf("Requested backend '%s' not found", in.Backend)
- }
- _ = rr
- case MethodRouter:
- log.Debug("Method router found")
- _ = rr
- default:
- log.Debug("Some other router found")
- _ = rr
+ case AffinityRouter:
+ log.Debug("Affinity router found")
+ b := rr.FindBackendCluster(in.Cluster).getBackend(in.Backend)
+ if b != nil {
+ rr.setAffinity(in.Id, b)
+ } else {
+ log.Errorf("Requested backend '%s' not found", in.Backend)
+ }
+ _ = rr
+ case MethodRouter:
+ log.Debug("Method router found")
+ _ = rr
+ default:
+ log.Debug("Some other router found")
+ _ = rr
}
} else {
log.Debugf("Couldn't get router type")
- return &pb.Result{Success:false,Error:err.Error()}, err
+ return &pb.Result{Success: false, Error: err.Error()}, err
}
- return &pb.Result{Success:true,Error:""},nil
+ return &pb.Result{Success: true, Error: ""}, nil
}
func (aa ArouterApi) SetConnection(ctx context.Context, in *pb.Conn) (*pb.Result, error) {
@@ -179,47 +178,47 @@
// then return an error describing the situation.
var s *server
var c *backendCluster
- var b * backend
- var cn * beConnection
+ var b *backend
+ var cn *beConnection
var err error
- log.Debugf("SetConnection called! %v",in);
+ log.Debugf("SetConnection called! %v", in)
aap := &aa
- if s,err = (aap).getServer(in.Server); err != nil {
+ if s, err = (aap).getServer(in.Server); err != nil {
err := errors.New(fmt.Sprintf("Server '%s' doesn't exist", in.Server))
log.Error(err)
- return &pb.Result{Success:false,Error:err.Error()}, err
+ return &pb.Result{Success: false, Error: err.Error()}, err
}
// The cluster is usually accessed via tha router but since each
// cluster is unique it's good enough to find the router that
// has the cluster we're looking for rather than fully keying
// the path
- if c,err = aap.getCluster(s, in.Cluster); err != nil {
+ if c, err = aap.getCluster(s, in.Cluster); err != nil {
log.Error(err)
- return &pb.Result{Success:false,Error:err.Error()}, err
+ return &pb.Result{Success: false, Error: err.Error()}, err
}
- if b,err = aap.getBackend(c, in.Backend); err != nil {
+ if b, err = aap.getBackend(c, in.Backend); err != nil {
log.Error(err)
- return &pb.Result{Success:false,Error:err.Error()}, err
+ return &pb.Result{Success: false, Error: err.Error()}, err
}
- if cn,err = aap.getConnection(b, in.Connection); err != nil {
+ if cn, err = aap.getConnection(b, in.Connection); err != nil {
log.Error(err)
- return &pb.Result{Success:false,Error:err.Error()}, err
+ return &pb.Result{Success: false, Error: err.Error()}, err
}
if err = aap.updateConnection(in, cn, b); err != nil {
log.Error(err)
- return &pb.Result{Success:false,Error:err.Error()}, err
+ return &pb.Result{Success: false, Error: err.Error()}, err
}
- return &pb.Result{Success:true,Error:""},nil
+ return &pb.Result{Success: true, Error: ""}, nil
}
func (aa ArouterApi) GetGoroutineCount(ctx context.Context, in *pb.Empty) (*pb.Count, error) {
- return &pb.Count{Count:uint32(runtime.NumGoroutine())}, nil
+ return &pb.Count{Count: uint32(runtime.NumGoroutine())}, nil
}
func (aa *ArouterApi) serve() {
@@ -233,4 +232,3 @@
}
}()
}
-
diff --git a/afrouter/afrouter/arproxy.go b/afrouter/afrouter/arproxy.go
index d809fbb..71c7b1f 100644
--- a/afrouter/afrouter/arproxy.go
+++ b/afrouter/afrouter/arproxy.go
@@ -25,41 +25,39 @@
"github.com/opencord/voltha-go/common/log"
)
-
type nbi int
const (
- GRPC_NBI nbi = 1
+ GRPC_NBI nbi = 1
GRPC_STREAMING_NBI nbi = 2
- GRPC_CONTROL_NBI nbi = 3
+ GRPC_CONTROL_NBI nbi = 3
)
// String names for display in error messages.
var arpxyNames = [...]string{"grpc_nbi", "grpc_streaming_nbi", "grpc_control_nbi"}
-var arProxy *ArouterProxy= nil
+var arProxy *ArouterProxy = nil
type ArouterProxy struct {
servers map[string]*server // Defined in handler.go
- api *ArouterApi
+ api *ArouterApi
}
-
// Create the routing proxy
func NewArouterProxy(conf *Configuration) (*ArouterProxy, error) {
- arProxy = &ArouterProxy{servers:make(map[string]*server)}
+ arProxy = &ArouterProxy{servers: make(map[string]*server)}
// Create all the servers listed in the configuration
- for _,s := range conf.Servers {
- if ns, err := newServer(&s); err != nil {
- log.Error("Configuration failed")
- return nil, err
- } else {
+ for _, s := range conf.Servers {
+ if ns, err := newServer(&s); err != nil {
+ log.Error("Configuration failed")
+ return nil, err
+ } else {
arProxy.servers[ns.Name()] = ns
}
}
// TODO: The API is not mandatory, check if it's even in the config before
// trying to create it. If it isn't then don't bother but log a warning.
- if api,err := newApi(&conf.Api, arProxy); err != nil {
+ if api, err := newApi(&conf.Api, arProxy); err != nil {
return nil, err
} else {
arProxy.api = api
diff --git a/afrouter/afrouter/backend.go b/afrouter/afrouter/backend.go
index 3f17af1..863652f 100644
--- a/afrouter/afrouter/backend.go
+++ b/afrouter/afrouter/backend.go
@@ -20,102 +20,99 @@
// Backend manager handles redundant connections per backend
import (
- "io"
- "fmt"
- "net"
- "sync"
- "time"
- "sort"
"errors"
- "strconv"
- "strings"
+ "fmt"
+ "github.com/opencord/voltha-go/common/log"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
"google.golang.org/grpc/connectivity"
- "github.com/opencord/voltha-go/common/log"
+ "google.golang.org/grpc/metadata"
+ "io"
+ "net"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
)
-
-
const (
BE_ACTIVE_ACTIVE = 1 // Backend type active/active
- BE_SERVER = 2 // Backend type single server
- BE_SEQ_RR = 0 // Backend sequence round robin
- AS_NONE = 0 // Association strategy: none
- AS_SERIAL_NO = 1 // Association strategy: serial number
- AL_NONE = 0 // Association location: none
- AL_HEADER = 1 // Association location: header
- AL_PROTOBUF = 2 // Association location: protobuf
+ BE_SERVER = 2 // Backend type single server
+ BE_SEQ_RR = 0 // Backend sequence round robin
+ AS_NONE = 0 // Association strategy: none
+ AS_SERIAL_NO = 1 // Association strategy: serial number
+ AL_NONE = 0 // Association location: none
+ AL_HEADER = 1 // Association location: header
+ AL_PROTOBUF = 2 // Association location: protobuf
)
-
-var beTypeNames = []string{"","active_active","server"}
-var asTypeNames = []string{"","serial_number"}
-var alTypeNames = []string{"","header","protobuf"}
+var beTypeNames = []string{"", "active_active", "server"}
+var asTypeNames = []string{"", "serial_number"}
+var alTypeNames = []string{"", "header", "protobuf"}
var bClusters map[string]*backendCluster = make(map[string]*backendCluster)
type backendCluster struct {
name string
//backends map[string]*backend
- backends []*backend
- beRvMap map[*backend]int
+ backends []*backend
+ beRvMap map[*backend]int
serialNoSource chan uint64
}
type backend struct {
- lck sync.Mutex
- name string
- beType int
- activeAssoc assoc
- connFailCallback func(string, *backend)bool
- connections map[string]*beConnection
- srtdConns []*beConnection
- opnConns int
+ lck sync.Mutex
+ name string
+ beType int
+ activeAssoc assoc
+ connFailCallback func(string, *backend) bool
+ connections map[string]*beConnection
+ srtdConns []*beConnection
+ opnConns int
}
type assoc struct {
strategy int
location int
- field string // Used only if location is protobuf
- key string
+ field string // Used only if location is protobuf
+ key string
}
type beConnection struct {
- lck sync.Mutex
- cncl context.CancelFunc
- name string
- addr string
- port string
+ lck sync.Mutex
+ cncl context.CancelFunc
+ name string
+ addr string
+ port string
gConn *gConnection
- bknd *backend
+ bknd *backend
}
// This structure should never be referred to
// by any routine outside of *beConnection
// routines.
type gConnection struct {
- lck sync.Mutex
+ lck sync.Mutex
state connectivity.State
- conn *grpc.ClientConn
- cncl context.CancelFunc
+ conn *grpc.ClientConn
+ cncl context.CancelFunc
}
type beClStrm struct {
- strm grpc.ClientStream
- ctxt context.Context
- cncl context.CancelFunc
+ strm grpc.ClientStream
+ ctxt context.Context
+ cncl context.CancelFunc
ok2Close chan struct{}
- c2sRtrn chan error
- s2cRtrn error
+ c2sRtrn chan error
+ s2cRtrn error
}
type beClStrms struct {
- lck sync.Mutex
- actvStrm *beClStrm
- strms map[string]*beClStrm
+ lck sync.Mutex
+ actvStrm *beClStrm
+ strms map[string]*beClStrm
srtdStrms []*beClStrm
}
@@ -137,7 +134,7 @@
rtrn_err = true
}
//bc := &backendCluster{name:conf.Name,backends:make(map[string]*backend)}
- bc := &backendCluster{name:conf.Name, beRvMap:make(map[*backend]int)}
+ bc := &backendCluster{name: conf.Name, beRvMap: make(map[*backend]int)}
bClusters[bc.name] = bc
bc.startSerialNumberSource() // Serial numberere for active/active backends
idx := 0
@@ -146,7 +143,7 @@
log.Errorf("A backend must have a name in cluster %s\n", conf.Name)
rtrn_err = true
}
- if be,err = newBackend(&bec, conf.Name); err != nil {
+ if be, err = newBackend(&bec, conf.Name); err != nil {
log.Errorf("Error creating backend %s", bec.Name)
rtrn_err = true
}
@@ -160,8 +157,8 @@
return bc, nil
}
-func (bc * backendCluster) getBackend(name string) *backend {
- for _,v := range bc.backends {
+func (bc *backendCluster) getBackend(name string) *backend {
+ for _, v := range bc.backends {
if v.name == name {
return v
}
@@ -181,48 +178,48 @@
}()
}
-func (bc *backendCluster) nextBackend(be *backend, seq int) (*backend,error) {
+func (bc *backendCluster) nextBackend(be *backend, seq int) (*backend, error) {
switch seq {
- case BE_SEQ_RR: // Round robin
- in := be
- // If no backend is found having a connection
- // then return nil.
- if be == nil {
- log.Debug("Previous backend is nil")
- be = bc.backends[0]
- in = be
- if be.opnConns != 0 {
- return be,nil
- }
+ case BE_SEQ_RR: // Round robin
+ in := be
+ // If no backend is found having a connection
+ // then return nil.
+ if be == nil {
+ log.Debug("Previous backend is nil")
+ be = bc.backends[0]
+ in = be
+ if be.opnConns != 0 {
+ return be, nil
}
- for {
- log.Debugf("Requesting a new backend starting from %s", be.name)
- cur := bc.beRvMap[be]
- cur++
- if cur >= len(bc.backends) {
- cur = 0
- }
- log.Debugf("Next backend is %d:%s", cur, bc.backends[cur].name)
- if bc.backends[cur].opnConns > 0 {
- return bc.backends[cur], nil
- }
- if bc.backends[cur] == in {
- err := fmt.Errorf("No backend with open connections found")
- log.Debug(err);
- return nil,err
- }
- be = bc.backends[cur]
- log.Debugf("Backend '%s' has no open connections, trying next", bc.backends[cur].name)
+ }
+ for {
+ log.Debugf("Requesting a new backend starting from %s", be.name)
+ cur := bc.beRvMap[be]
+ cur++
+ if cur >= len(bc.backends) {
+ cur = 0
}
- default: // Invalid, defalt to routnd robin
- log.Errorf("Invalid backend sequence %d. Defaulting to round robin", seq)
- return bc.nextBackend(be, BE_SEQ_RR)
+ log.Debugf("Next backend is %d:%s", cur, bc.backends[cur].name)
+ if bc.backends[cur].opnConns > 0 {
+ return bc.backends[cur], nil
+ }
+ if bc.backends[cur] == in {
+ err := fmt.Errorf("No backend with open connections found")
+ log.Debug(err)
+ return nil, err
+ }
+ be = bc.backends[cur]
+ log.Debugf("Backend '%s' has no open connections, trying next", bc.backends[cur].name)
+ }
+ default: // Invalid, defalt to routnd robin
+ log.Errorf("Invalid backend sequence %d. Defaulting to round robin", seq)
+ return bc.nextBackend(be, BE_SEQ_RR)
}
}
func (bec *backendCluster) handler(srv interface{}, serverStream grpc.ServerStream, r Router, mthdSlice []string,
- mk string, mv string) error {
-//func (bec *backendCluster) handler(nbR * nbRequest) error {
+ mk string, mv string) error {
+ //func (bec *backendCluster) handler(nbR * nbRequest) error {
// The final backend cluster needs to be determined here. With non-affinity routed backends it could
// just be determined here and for affinity routed backends the first message must be received
@@ -231,25 +228,25 @@
// Get the backend to use.
// Allocate the nbFrame here since it holds the "context" of this communication
- nf := &nbFrame{router:r, mthdSlice:mthdSlice, serNo:bec.serialNoSource, metaKey:mk, metaVal:mv}
+ nf := &nbFrame{router: r, mthdSlice: mthdSlice, serNo: bec.serialNoSource, metaKey: mk, metaVal: mv}
log.Debugf("Nb frame allocate with method %s", nf.mthdSlice[REQ_METHOD])
- if be,err := bec.assignBackend(serverStream, nf); err != nil {
+ if be, err := bec.assignBackend(serverStream, nf); err != nil {
// At this point, no backend streams have been initiated
// so just return the error.
return err
} else {
log.Debugf("Backend '%s' selected", be.name)
// Allocate a sbFrame here because it might be needed for return value intercept
- sf := &sbFrame{router:r, be:be, method:nf.mthdSlice[REQ_METHOD], metaKey:mk, metaVal:mv}
- log.Debugf("Sb frame allocated with router %s",r.Name())
+ sf := &sbFrame{router: r, be: be, method: nf.mthdSlice[REQ_METHOD], metaKey: mk, metaVal: mv}
+ log.Debugf("Sb frame allocated with router %s", r.Name())
return be.handler(srv, serverStream, nf, sf)
}
}
-func (be *backend) openSouthboundStreams(srv interface{}, serverStream grpc.ServerStream, f * nbFrame) (*beClStrms, error) {
+func (be *backend) openSouthboundStreams(srv interface{}, serverStream grpc.ServerStream, f *nbFrame) (*beClStrms, error) {
- rtrn := &beClStrms{strms:make(map[string]*beClStrm),actvStrm:nil}
+ rtrn := &beClStrms{strms: make(map[string]*beClStrm), actvStrm: nil}
log.Debugf("Opening southbound streams for method '%s'", f.mthdSlice[REQ_METHOD])
// Get the metadata from the incoming message on the server
@@ -268,7 +265,7 @@
var atLeastOne bool = false
var errStr strings.Builder
log.Debugf("There are %d connections to open", len(be.connections))
- for _,cn := range be.srtdConns {
+ for _, cn := range be.srtdConns {
// TODO: THIS IS A HACK to suspend redundancy for binding routers for all calls
// and its very specific to a use case. There should really be a per method
// mechanism to select non-redundant calls for all router types. This needs
@@ -281,7 +278,7 @@
continue
}
// Copy in the metadata
- if cn.getState() == connectivity.Ready && cn.getConn() != nil {
+ if cn.getState() == connectivity.Ready && cn.getConn() != nil {
log.Debugf("Opening southbound stream for connection '%s'", cn.name)
// Create an outgoing context that includes the incoming metadata
// and that will cancel if the server's context is canceled
@@ -289,18 +286,18 @@
clientCtx = metadata.NewOutgoingContext(clientCtx, md.Copy())
//TODO: Same check here, only add the serial number if necessary
clientCtx = metadata.AppendToOutgoingContext(clientCtx, "voltha_serial_number",
- strconv.FormatUint(serialNo,10))
+ strconv.FormatUint(serialNo, 10))
// Create the client stream
if clientStream, err := grpc.NewClientStream(clientCtx, clientStreamDescForProxying,
- cn.getConn(), f.mthdSlice[REQ_ALL]); err !=nil {
- log.Debugf("Failed to create a client stream '%s', %v",cn.name,err)
+ cn.getConn(), f.mthdSlice[REQ_ALL]); err != nil {
+ log.Debugf("Failed to create a client stream '%s', %v", cn.name, err)
fmt.Fprintf(&errStr, "{{Failed to create a client stream '%s', %v}} ", cn.name, err)
rtrn.strms[cn.name] = nil
} else {
- rtrn.strms[cn.name] = &beClStrm{strm:clientStream, ctxt:clientCtx,
- cncl:clientCancel, s2cRtrn:nil,
- ok2Close:make(chan struct{}),
- c2sRtrn:make(chan error, 1)}
+ rtrn.strms[cn.name] = &beClStrm{strm: clientStream, ctxt: clientCtx,
+ cncl: clientCancel, s2cRtrn: nil,
+ ok2Close: make(chan struct{}),
+ c2sRtrn: make(chan error, 1)}
atLeastOne = true
}
} else if cn.getConn() == nil {
@@ -315,24 +312,24 @@
}
if atLeastOne == true {
rtrn.sortStreams()
- return rtrn,nil
+ return rtrn, nil
}
- fmt.Fprintf(&errStr, "{{No streams available for backend '%s' unable to send}} ",be.name)
+ fmt.Fprintf(&errStr, "{{No streams available for backend '%s' unable to send}} ", be.name)
log.Error(errStr.String())
return nil, errors.New(errStr.String())
}
-func (be *backend) handler(srv interface{}, serverStream grpc.ServerStream, nf * nbFrame, sf * sbFrame) error {
+func (be *backend) handler(srv interface{}, serverStream grpc.ServerStream, nf *nbFrame, sf *sbFrame) error {
- // Set up and launch each individual southbound stream
+ // Set up and launch each individual southbound stream
var beStrms *beClStrms
var rtrn error = nil
var s2cOk bool = false
var c2sOk bool = false
- beStrms, err := be.openSouthboundStreams(srv,serverStream,nf)
+ beStrms, err := be.openSouthboundStreams(srv, serverStream, nf)
if err != nil {
- log.Errorf("openStreams failed: %v",err)
+ log.Errorf("openStreams failed: %v", err)
return err
}
// If we get here, there has to be AT LEAST ONE open stream
@@ -362,7 +359,7 @@
return rtrn
}
} else {
- log.Debugf("s2cErr reporting %v",s2cErr)
+ log.Debugf("s2cErr reporting %v", s2cErr)
// however, we may have gotten a receive error (stream disconnected, a read error etc) in which case we need
// to cancel the clientStream to the backend, let all of its goroutines be freed up by the CancelFunc and
// exit with an error to the stack
@@ -382,7 +379,7 @@
// the southbound streams are closed. Should this happen one of the
// backends may not get the request.
if c2sErr != io.EOF {
- rtrn = c2sErr
+ rtrn = c2sErr
}
log.Debug("c2sErr reporting EOF")
if s2cOk == true {
@@ -394,7 +391,7 @@
}
func (strms *beClStrms) clientCancel() {
- for _,strm := range strms.strms {
+ for _, strm := range strms.strms {
if strm != nil {
strm.cncl()
}
@@ -402,7 +399,7 @@
}
func (strms *beClStrms) closeSend() {
- for _,strm := range strms.strms {
+ for _, strm := range strms.strms {
if strm != nil {
<-strm.ok2Close
log.Debug("Closing southbound stream")
@@ -436,13 +433,13 @@
return f.be, nil
}
-func (strms * beClStrms) getActive() *beClStrm {
+func (strms *beClStrms) getActive() *beClStrm {
strms.lck.Lock()
defer strms.lck.Unlock()
return strms.actvStrm
}
-func (strms *beClStrms) setThenGetActive(strm *beClStrm) (*beClStrm) {
+func (strms *beClStrms) setThenGetActive(strm *beClStrm) *beClStrm {
strms.lck.Lock()
defer strms.lck.Unlock()
if strms.actvStrm == nil {
@@ -505,17 +502,17 @@
ret := make(chan error, 1)
agg := make(chan *beClStrm)
atLeastOne := false
- for _,strm := range src.strms {
+ for _, strm := range src.strms {
if strm != nil {
go fc2s(strm)
go func(s *beClStrm) { // Wait on result and aggregate
r := <-s.c2sRtrn // got the return code
if r == nil {
- return // We're the redundat stream, just die
+ return // We're the redundat stream, just die
}
s.c2sRtrn <- r // put it back to pass it along
- agg <- s // send the stream to the aggregator
- } (strm)
+ agg <- s // send the stream to the aggregator
+ }(strm)
atLeastOne = true
}
}
@@ -536,7 +533,7 @@
var rtrn error
atLeastOne := false
- for _,strm := range strms.srtdStrms {
+ for _, strm := range strms.srtdStrms {
if strm != nil {
if err := strm.strm.SendMsg(f); err != nil {
log.Debugf("Error on SendMsg: %s", err.Error())
@@ -550,7 +547,7 @@
// If one of the streams succeeded, declare success
// if none did pick an error and return it.
if atLeastOne == true {
- for _,strm := range strms.srtdStrms {
+ for _, strm := range strms.srtdStrms {
if strm != nil {
rtrn = strm.s2cRtrn
if rtrn == nil {
@@ -563,14 +560,14 @@
rtrn = errors.New("There are no open streams, this should never happen")
log.Error(rtrn)
}
- return rtrn;
+ return rtrn
}
func (dst *beClStrms) forwardServerToClient(src grpc.ServerStream, f *nbFrame) chan error {
ret := make(chan error, 1)
go func() {
// The frame buffer already has the results of a first
- // RecvMsg in it so the first thing to do is to
+ // RecvMsg in it so the first thing to do is to
// send it to the list of client streams and only
// then read some more.
for i := 0; ; i++ {
@@ -590,24 +587,24 @@
return ret
}
-func (st * beClStrms) sortStreams() {
+func (st *beClStrms) sortStreams() {
var tmpKeys []string
- for k,_ := range st.strms {
+ for k, _ := range st.strms {
tmpKeys = append(tmpKeys, k)
}
sort.Strings(tmpKeys)
- for _,v := range tmpKeys {
+ for _, v := range tmpKeys {
st.srtdStrms = append(st.srtdStrms, st.strms[v])
}
}
-func (be * backend) sortConns() {
+func (be *backend) sortConns() {
var tmpKeys []string
- for k,_ := range be.connections {
+ for k, _ := range be.connections {
tmpKeys = append(tmpKeys, k)
}
sort.Strings(tmpKeys)
- for _,v := range tmpKeys {
+ for _, v := range tmpKeys {
be.srtdConns = append(be.srtdConns, be.connections[v])
}
}
@@ -617,8 +614,8 @@
log.Debugf("Configuring the backend with %v", *conf)
// Validate the conifg and configure the backend
- be:=&backend{name:conf.Name,connections:make(map[string]*beConnection),opnConns:0}
- idx := strIndex([]string(beTypeNames),conf.Type)
+ be := &backend{name: conf.Name, connections: make(map[string]*beConnection), opnConns: 0}
+ idx := strIndex([]string(beTypeNames), conf.Type)
if idx == 0 {
log.Error("Invalid type specified for backend %s in cluster %s", conf.Name, clusterName)
rtrn_err = true
@@ -628,7 +625,7 @@
idx = strIndex(asTypeNames, conf.Association.Strategy)
if idx == 0 && be.beType == BE_ACTIVE_ACTIVE {
log.Errorf("An association strategy must be provided if the backend "+
- "type is active/active for backend %s in cluster %s", conf.Name, clusterName)
+ "type is active/active for backend %s in cluster %s", conf.Name, clusterName)
rtrn_err = true
}
be.activeAssoc.strategy = idx
@@ -636,23 +633,23 @@
idx = strIndex(alTypeNames, conf.Association.Location)
if idx == 0 && be.beType == BE_ACTIVE_ACTIVE {
log.Errorf("An association location must be provided if the backend "+
- "type is active/active for backend %s in cluster %s", conf.Name, clusterName)
+ "type is active/active for backend %s in cluster %s", conf.Name, clusterName)
rtrn_err = true
}
be.activeAssoc.location = idx
if conf.Association.Field == "" && be.activeAssoc.location == AL_PROTOBUF {
log.Errorf("An association field must be provided if the backend "+
- "type is active/active and the location is set to protobuf "+
- "for backend %s in cluster %s", conf.Name, clusterName)
+ "type is active/active and the location is set to protobuf "+
+ "for backend %s in cluster %s", conf.Name, clusterName)
rtrn_err = true
}
be.activeAssoc.field = conf.Association.Field
if conf.Association.Key == "" && be.activeAssoc.location == AL_HEADER {
log.Errorf("An association key must be provided if the backend "+
- "type is active/active and the location is set to header "+
- "for backend %s in cluster %s", conf.Name, clusterName)
+ "type is active/active and the location is set to header "+
+ "for backend %s in cluster %s", conf.Name, clusterName)
rtrn_err = true
}
be.activeAssoc.key = conf.Association.Key
@@ -664,34 +661,34 @@
// at a later time.
// TODO: validate that there is one connection for all but active/active backends
if len(conf.Connections) > 1 && be.activeAssoc.strategy != BE_ACTIVE_ACTIVE {
- log.Errorf("Only one connection must be specified if the association "+
- "strategy is not set to 'active_active'")
+ log.Errorf("Only one connection must be specified if the association " +
+ "strategy is not set to 'active_active'")
rtrn_err = true
}
if len(conf.Connections) == 0 {
log.Errorf("At least one connection must be specified")
rtrn_err = true
}
- for _,cnConf := range conf.Connections {
+ for _, cnConf := range conf.Connections {
if cnConf.Name == "" {
log.Errorf("A connection must have a name for backend %s in cluster %s",
- conf.Name, clusterName)
+ conf.Name, clusterName)
} else {
- gc:=&gConnection{conn:nil,cncl:nil,state:connectivity.Idle}
- be.connections[cnConf.Name] = &beConnection{name:cnConf.Name,addr:cnConf.Addr,port:cnConf.Port,bknd:be,gConn:gc}
+ gc := &gConnection{conn: nil, cncl: nil, state: connectivity.Idle}
+ be.connections[cnConf.Name] = &beConnection{name: cnConf.Name, addr: cnConf.Addr, port: cnConf.Port, bknd: be, gConn: gc}
if cnConf.Addr != "" { // This connection will be specified later.
if ip := net.ParseIP(cnConf.Addr); ip == nil {
log.Errorf("The IP address for connection %s in backend %s in cluster %s is invalid",
- cnConf.Name, conf.Name, clusterName)
+ cnConf.Name, conf.Name, clusterName)
rtrn_err = true
}
// Validate the port number. This just validtes that it's a non 0 integer
- if n,err := strconv.Atoi(cnConf.Port); err != nil || n <= 0 || n > 65535 {
+ if n, err := strconv.Atoi(cnConf.Port); err != nil || n <= 0 || n > 65535 {
log.Errorf("Port %s for connection %s in backend %s in cluster %s is invalid",
cnConf.Port, cnConf.Name, conf.Name, clusterName)
rtrn_err = true
} else {
- if n <=0 && n > 65535 {
+ if n <= 0 && n > 65535 {
log.Errorf("Port %s for connection %s in backend %s in cluster %s is invalid",
cnConf.Port, cnConf.Name, conf.Name, clusterName)
rtrn_err = true
@@ -738,20 +735,20 @@
// on a first attempt to connect. Individual connections should be
// handled after that.
func (be *backend) connectAll() {
- for _,cn := range be.connections {
+ for _, cn := range be.connections {
cn.connect()
}
}
func (cn *beConnection) connect() {
if cn.addr != "" && cn.getConn() == nil {
- log.Infof("Connecting to connection %s with addr: %s and port %s", cn.name,cn.addr,cn.port)
+ log.Infof("Connecting to connection %s with addr: %s and port %s", cn.name, cn.addr, cn.port)
// Dial doesn't block, it just returns and continues connecting in the background.
// Check back later to confirm and increase the connection count.
ctx, cnclFnc := context.WithCancel(context.Background()) // Context for canceling the connection
cn.setCncl(cnclFnc)
if conn, err := grpc.Dial(cn.addr+":"+cn.port, grpc.WithCodec(Codec()), grpc.WithInsecure()); err != nil {
- log.Errorf("Dialng connection %v:%v",cn,err)
+ log.Errorf("Dialng connection %v:%v", cn, err)
cn.waitAndTryAgain(ctx)
} else {
cn.setConn(conn)
@@ -767,19 +764,19 @@
func (cn *beConnection) waitAndTryAgain(ctx context.Context) {
go func(ctx context.Context) {
- ctxTm,cnclTm := context.WithTimeout(context.Background(), 10 * time.Second)
- select {
- case <-ctxTm.Done():
- cnclTm()
- log.Debugf("Trying to connect '%s'",cn.name)
- // Connect creates a new context so cancel this one.
- cn.cancel()
- cn.connect()
- return
- case <-ctx.Done():
- cnclTm()
- return
- }
+ ctxTm, cnclTm := context.WithTimeout(context.Background(), 10*time.Second)
+ select {
+ case <-ctxTm.Done():
+ cnclTm()
+ log.Debugf("Trying to connect '%s'", cn.name)
+ // Connect creates a new context so cancel this one.
+ cn.cancel()
+ cn.connect()
+ return
+ case <-ctx.Done():
+ cnclTm()
+ return
+ }
}(ctx)
}
@@ -787,7 +784,7 @@
cn.lck.Lock()
defer cn.lck.Unlock()
log.Debugf("Canceling connection %s", cn.name)
- if cn.gConn != nil{
+ if cn.gConn != nil {
if cn.gConn.cncl != nil {
cn.cncl()
} else {
@@ -844,7 +841,7 @@
// Now replace the gConn object with a new one as this one just
// fades away as references to it are released after the close
// finishes in the background.
- cn.gConn = &gConnection{conn:nil,cncl:nil,state:connectivity.TransientFailure}
+ cn.gConn = &gConnection{conn: nil, cncl: nil, state: connectivity.TransientFailure}
} else {
log.Errorf("Internal error, attempt to close a nil connection object for '%s'", cn.name)
}
@@ -861,7 +858,7 @@
}
}
-func (cn *beConnection) getState() (connectivity.State) {
+func (cn *beConnection) getState() connectivity.State {
cn.lck.Lock()
defer cn.lck.Unlock()
if cn.gConn != nil {
@@ -877,7 +874,6 @@
return connectivity.TransientFailure
}
-
func (cn *beConnection) monitor(ctx context.Context) {
bp := cn.bknd
log.Debugf("Setting up monitoring for backend %s", bp.name)
@@ -885,58 +881,58 @@
var delay time.Duration = 100 //ms
for {
//log.Debugf("****** Monitoring connection '%s' on backend '%s', %v", cn.name, bp.name, cn.conn)
- if cn.getState() == connectivity.Ready {
+ if cn.getState() == connectivity.Ready {
log.Debugf("connection '%s' on backend '%s' becomes ready", cn.name, bp.name)
cn.setState(connectivity.Ready)
bp.incConn()
if cn.getConn() != nil && cn.getConn().WaitForStateChange(ctx, connectivity.Ready) == false {
// The context was canceled. This is done by the close function
// so just exit the routine
- log.Debugf("Contxt canceled for connection '%s' on backend '%s'",cn.name, bp.name)
+ log.Debugf("Contxt canceled for connection '%s' on backend '%s'", cn.name, bp.name)
return
}
if cs := cn.getConn(); cs != nil {
switch cs := cn.getState(); cs {
- case connectivity.TransientFailure:
- cn.setState(cs)
- bp.decConn()
- log.Infof("Transient failure for connection '%s' on backend '%s'",cn.name, bp.name)
- delay = 100
- case connectivity.Shutdown:
- //The connection was closed. The assumption here is that the closer
- // will manage the connection count and setting the conn to nil.
- // Exit the routine
- log.Infof("Shutdown for connection '%s' on backend '%s'",cn.name, bp.name)
- return
- case connectivity.Idle:
- // This can only happen if the server sends a GoAway. This can
- // only happen if the server has modified MaxConnectionIdle from
- // its default of infinity. The only solution here is to close the
- // connection and keepTrying()?
- //TODO: Read the grpc source code to see if there's a different approach
- log.Errorf("Server sent 'GoAway' on connection '%s' on backend '%s'",cn.name, bp.name)
- cn.close()
- cn.connect()
- return
+ case connectivity.TransientFailure:
+ cn.setState(cs)
+ bp.decConn()
+ log.Infof("Transient failure for connection '%s' on backend '%s'", cn.name, bp.name)
+ delay = 100
+ case connectivity.Shutdown:
+ //The connection was closed. The assumption here is that the closer
+ // will manage the connection count and setting the conn to nil.
+ // Exit the routine
+ log.Infof("Shutdown for connection '%s' on backend '%s'", cn.name, bp.name)
+ return
+ case connectivity.Idle:
+ // This can only happen if the server sends a GoAway. This can
+ // only happen if the server has modified MaxConnectionIdle from
+ // its default of infinity. The only solution here is to close the
+ // connection and keepTrying()?
+ //TODO: Read the grpc source code to see if there's a different approach
+ log.Errorf("Server sent 'GoAway' on connection '%s' on backend '%s'", cn.name, bp.name)
+ cn.close()
+ cn.connect()
+ return
}
} else { // A nil means something went horribly wrong, error and exit.
- log.Errorf("Somthing horrible happned, the connection is nil and shouldn't be for connection %s",cn.name)
+ log.Errorf("Somthing horrible happned, the connection is nil and shouldn't be for connection %s", cn.name)
return
}
} else {
log.Debugf("Waiting for connection '%s' on backend '%s' to become ready", cn.name, bp.name)
- ctxTm, cnclTm := context.WithTimeout(context.Background(), delay * time.Millisecond)
+ ctxTm, cnclTm := context.WithTimeout(context.Background(), delay*time.Millisecond)
if delay < 30000 {
- delay += delay
+ delay += delay
}
select {
- case <-ctxTm.Done():
- cnclTm() // Doubt this is required but it's harmless.
- // Do nothing but let the loop continue
- case <-ctx.Done():
- // Context was closed, close and exit routine
- //cn.close() NO! let the close be managed externally!
- return
+ case <-ctxTm.Done():
+ cnclTm() // Doubt this is required but it's harmless.
+ // Do nothing but let the loop continue
+ case <-ctx.Done():
+ // Context was closed, close and exit routine
+ //cn.close() NO! let the close be managed externally!
+ return
}
}
}
@@ -945,7 +941,6 @@
// Set a callback for connection failure notification
// This is currently not used.
-func (bp * backend) setConnFailCallback(cb func(string, *backend)bool) {
+func (bp *backend) setConnFailCallback(cb func(string, *backend) bool) {
bp.connFailCallback = cb
}
-
diff --git a/afrouter/afrouter/binding-router.go b/afrouter/afrouter/binding-router.go
index 11e852d..7de3ea7 100644
--- a/afrouter/afrouter/binding-router.go
+++ b/afrouter/afrouter/binding-router.go
@@ -18,59 +18,59 @@
package afrouter
import (
- "fmt"
"errors"
+ "fmt"
+ "github.com/opencord/voltha-go/common/log"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
- "github.com/opencord/voltha-go/common/log"
)
type BindingRouter struct {
- name string
- routerType int // TODO: This is probably not needed
+ name string
+ routerType int // TODO: This is probably not needed
association int
//routingField string
grpcService string
//protoDescriptor *pb.FileDescriptorSet
//methodMap map[string]byte
- bkndClstr *backendCluster
- bindings map[string]*backend
- bindingType string
- bindingField string
+ bkndClstr *backendCluster
+ bindings map[string]*backend
+ bindingType string
+ bindingField string
bindingMethod string
- curBknd **backend
+ curBknd **backend
}
-func (br BindingRouter) BackendCluster(s string, metaKey string) (*backendCluster,error) {
+func (br BindingRouter) BackendCluster(s string, metaKey string) (*backendCluster, error) {
return br.bkndClstr, nil
//return nil,errors.New("Not implemented yet")
}
-func (br BindingRouter) Name() (string) {
+func (br BindingRouter) Name() string {
return br.name
}
-func (br BindingRouter) Service() (string) {
+func (br BindingRouter) Service() string {
return br.grpcService
}
-func (br BindingRouter) GetMetaKeyVal(serverStream grpc.ServerStream) (string,string,error) {
+func (br BindingRouter) GetMetaKeyVal(serverStream grpc.ServerStream) (string, string, error) {
var rtrnK string = ""
var rtrnV string = ""
// Get the metadata from the server stream
- md, ok := metadata.FromIncomingContext(serverStream.Context())
+ md, ok := metadata.FromIncomingContext(serverStream.Context())
if !ok {
- return rtrnK, rtrnV, errors.New("Could not get a server stream metadata")
- }
+ return rtrnK, rtrnV, errors.New("Could not get a server stream metadata")
+ }
// Determine if one of the method routing keys exists in the metadata
- if _,ok := md[br.bindingField]; ok == true {
+ if _, ok := md[br.bindingField]; ok == true {
rtrnV = md[br.bindingField][0]
rtrnK = br.bindingField
}
- return rtrnK,rtrnV,nil
+ return rtrnK, rtrnV, nil
}
-func (br BindingRouter) FindBackendCluster(becName string) (*backendCluster) {
- if becName == br.bkndClstr.name {
+func (br BindingRouter) FindBackendCluster(becName string) *backendCluster {
+ if becName == br.bkndClstr.name {
return br.bkndClstr
}
return nil
@@ -78,56 +78,55 @@
func (br BindingRouter) ReplyHandler(v interface{}) error {
return nil
}
-func (br BindingRouter) Route(sel interface{}) (*backend) {
+func (br BindingRouter) Route(sel interface{}) *backend {
var err error
switch sl := sel.(type) {
- case *nbFrame:
- if b, ok := br.bindings[sl.metaVal]; ok == true { // binding exists, just return it
- return b
- } else { // establish a new binding or error.
- if sl.metaVal != "" {
- err = errors.New(fmt.Sprintf("Attempt to route on non-existent metadata value '%s' in key '%s'",
- sl.metaVal, sl.metaKey))
- log.Error(err)
- sl.err = err
- return nil
- }
- if sl.mthdSlice[REQ_METHOD] != br.bindingMethod {
- err = errors.New(fmt.Sprintf("Binding must occur with method %s but attempted with method %s",
- br.bindingMethod, sl.mthdSlice[REQ_METHOD]))
- log.Error(err)
- sl.err = err
- return nil
- }
- log.Debugf("MUST CREATE A NEW BINDING MAP ENTRY!!")
- if len(br.bindings) < len(br.bkndClstr.backends) {
- if *br.curBknd, err = br.bkndClstr.nextBackend(*br.curBknd,BE_SEQ_RR); err == nil {
- // Use the name of the backend as the metaVal for this new binding
- br.bindings[(*br.curBknd).name] = *br.curBknd
- return *br.curBknd
- } else {
- log.Error(err)
- sl.err = err
- return nil
- }
- } else {
- err = errors.New(fmt.Sprintf("Backends exhausted in attempt to bind for metakey '%s' with value '%s'",
- sl.metaKey, sl.metaVal))
- log.Error(err)
- sl.err = err
- }
+ case *nbFrame:
+ if b, ok := br.bindings[sl.metaVal]; ok == true { // binding exists, just return it
+ return b
+ } else { // establish a new binding or error.
+ if sl.metaVal != "" {
+ err = errors.New(fmt.Sprintf("Attempt to route on non-existent metadata value '%s' in key '%s'",
+ sl.metaVal, sl.metaKey))
+ log.Error(err)
+ sl.err = err
+ return nil
}
- return nil
- default:
- return nil
+ if sl.mthdSlice[REQ_METHOD] != br.bindingMethod {
+ err = errors.New(fmt.Sprintf("Binding must occur with method %s but attempted with method %s",
+ br.bindingMethod, sl.mthdSlice[REQ_METHOD]))
+ log.Error(err)
+ sl.err = err
+ return nil
+ }
+ log.Debugf("MUST CREATE A NEW BINDING MAP ENTRY!!")
+ if len(br.bindings) < len(br.bkndClstr.backends) {
+ if *br.curBknd, err = br.bkndClstr.nextBackend(*br.curBknd, BE_SEQ_RR); err == nil {
+ // Use the name of the backend as the metaVal for this new binding
+ br.bindings[(*br.curBknd).name] = *br.curBknd
+ return *br.curBknd
+ } else {
+ log.Error(err)
+ sl.err = err
+ return nil
+ }
+ } else {
+ err = errors.New(fmt.Sprintf("Backends exhausted in attempt to bind for metakey '%s' with value '%s'",
+ sl.metaKey, sl.metaVal))
+ log.Error(err)
+ sl.err = err
+ }
+ }
+ return nil
+ default:
+ return nil
}
- return nil
}
func newBindingRouter(rconf *RouterConfig, config *RouteConfig) (Router, error) {
var rtrn_err bool = false
var err error = nil
- log.Debugf("Creating binding router %s",config.Name)
+ log.Debugf("Creating binding router %s", config.Name)
// A name must exist
if config.Name == "" {
log.Error("A router 'name' must be specified")
@@ -162,11 +161,11 @@
var bptr *backend
bptr = nil
br := BindingRouter{
- name:config.Name,
- grpcService:rconf.ProtoService,
- bindings:make(map[string]*backend),
+ name: config.Name,
+ grpcService: rconf.ProtoService,
+ bindings: make(map[string]*backend),
//methodMap:make(map[string]byte),
- curBknd:&bptr,
+ curBknd: &bptr,
//serialNo:0,
}
@@ -174,9 +173,9 @@
br.association = strIndex(rAssnNames, config.Binding.Association)
if br.association == 0 {
if config.Binding.Association == "" {
- log.Error("An binding association must be specified")
+ log.Error("An binding association must be specified")
} else {
- log.Errorf("The binding association '%s' is not valid", config.Binding.Association)
+ log.Errorf("The binding association '%s' is not valid", config.Binding.Association)
}
rtrn_err = true
}
@@ -203,7 +202,6 @@
br.bindingField = config.Binding.Field
}
-
// This has already been validated bfore this function
// is called so just use it.
for idx := range rTypeNames {
@@ -213,7 +211,7 @@
}
}
- // Create the backend cluster or link to an existing one
+ // Create the backend cluster or link to an existing one
ok := true
if br.bkndClstr, ok = bClusters[config.backendCluster.Name]; ok == false {
if br.bkndClstr, err = newBackendCluster(config.backendCluster); err != nil {
@@ -225,9 +223,8 @@
// HERE HERE HERE
if rtrn_err {
- return br,errors.New(fmt.Sprintf("Failed to create a new router '%s'",br.name))
+ return br, errors.New(fmt.Sprintf("Failed to create a new router '%s'", br.name))
}
-
- return br,nil
+ return br, nil
}
diff --git a/afrouter/afrouter/codec.go b/afrouter/afrouter/codec.go
index 6090bdc..7147916 100644
--- a/afrouter/afrouter/codec.go
+++ b/afrouter/afrouter/codec.go
@@ -19,10 +19,10 @@
import (
"fmt"
- "sync"
- "google.golang.org/grpc"
"github.com/golang/protobuf/proto"
"github.com/opencord/voltha-go/common/log"
+ "google.golang.org/grpc"
+ "sync"
)
func Codec() grpc.Codec {
@@ -39,53 +39,53 @@
type sbFrame struct {
payload []byte
- router Router
- method string
- be *backend
- lck sync.Mutex
+ router Router
+ method string
+ be *backend
+ lck sync.Mutex
metaKey string
metaVal string
}
type nbFrame struct {
- payload []byte
- router Router
- be *backend
- err error
+ payload []byte
+ router Router
+ be *backend
+ err error
mthdSlice []string
- serNo chan uint64
- metaKey string
- metaVal string
+ serNo chan uint64
+ metaKey string
+ metaVal string
}
func (cdc *transparentRoutingCodec) Marshal(v interface{}) ([]byte, error) {
switch t := v.(type) {
- case *sbFrame:
- return t.payload, nil
- case *nbFrame:
- return t.payload, nil
- default:
- return cdc.parentCodec.Marshal(v)
+ case *sbFrame:
+ return t.payload, nil
+ case *nbFrame:
+ return t.payload, nil
+ default:
+ return cdc.parentCodec.Marshal(v)
}
}
func (cdc *transparentRoutingCodec) Unmarshal(data []byte, v interface{}) error {
switch t := v.(type) {
- case *sbFrame:
- t.payload = data
- // This is where the affinity is established on a northbound response
- t.router.ReplyHandler(v)
- return nil
- case *nbFrame:
- t.payload = data
- // This is were the afinity value is pulled from the payload
- // and the backend selected.
- t.be = t.router.Route(v)
- log.Debugf("Routing returned %v for method %s", t.be, t.mthdSlice[REQ_METHOD])
- return nil
- default:
- return cdc.parentCodec.Unmarshal(data,v)
+ case *sbFrame:
+ t.payload = data
+ // This is where the affinity is established on a northbound response
+ t.router.ReplyHandler(v)
+ return nil
+ case *nbFrame:
+ t.payload = data
+ // This is were the afinity value is pulled from the payload
+ // and the backend selected.
+ t.be = t.router.Route(v)
+ log.Debugf("Routing returned %v for method %s", t.be, t.mthdSlice[REQ_METHOD])
+ return nil
+ default:
+ return cdc.parentCodec.Unmarshal(data, v)
}
}
diff --git a/afrouter/afrouter/config.go b/afrouter/afrouter/config.go
index a9a01eb..2cc2976 100644
--- a/afrouter/afrouter/config.go
+++ b/afrouter/afrouter/config.go
@@ -19,27 +19,27 @@
// Command line parameters and parsing
import (
- "os"
- "fmt"
- "flag"
- "path"
- "errors"
- "io/ioutil"
"encoding/json"
+ "errors"
+ "flag"
+ "fmt"
"github.com/opencord/voltha-go/common/log"
+ "io/ioutil"
+ "os"
+ "path"
)
func ParseCmd() (*Configuration, error) {
config := &Configuration{}
- cmdParse := flag.NewFlagSet(path.Base(os.Args[0]), flag.ContinueOnError);
+ cmdParse := flag.NewFlagSet(path.Base(os.Args[0]), flag.ContinueOnError)
config.ConfigFile = cmdParse.String("config", "arouter.json", "The configuration file for the affinity router")
config.LogLevel = cmdParse.Int("logLevel", 0, "The log level for the affinity router")
config.GrpcLog = cmdParse.Bool("grpclog", false, "Enable GRPC logging")
- err := cmdParse.Parse(os.Args[1:]);
+ err := cmdParse.Parse(os.Args[1:])
if err != nil {
//return err
- return nil, errors.New("Error parsing the command line");
+ return nil, errors.New("Error parsing the command line")
}
//if(!cmdParse.Parsed()) {
//}
@@ -48,72 +48,71 @@
// Configuration file loading and parsing
type Configuration struct {
- ConfigFile * string
- LogLevel * int
- GrpcLog * bool
- Servers []ServerConfig `json:"servers"`
- Ports PortConfig `json:"ports"`
- ServerCertificates ServerCertConfig `json:"serverCertificates"`
- ClientCertificates ClientCertConfig `json:"clientCertificates"`
- BackendClusters []BackendClusterConfig `json:"backend_clusters"`
- Routers []RouterConfig `json:"routers"`
- Api ApiConfig
+ ConfigFile *string
+ LogLevel *int
+ GrpcLog *bool
+ Servers []ServerConfig `json:"servers"`
+ Ports PortConfig `json:"ports"`
+ ServerCertificates ServerCertConfig `json:"serverCertificates"`
+ ClientCertificates ClientCertConfig `json:"clientCertificates"`
+ BackendClusters []BackendClusterConfig `json:"backend_clusters"`
+ Routers []RouterConfig `json:"routers"`
+ Api ApiConfig
}
type RouterConfig struct {
- Name string `json:"name"`
- ProtoService string `json:"service"`
- ProtoPackage string `json:"package"`
- Routes []RouteConfig `json:"routes"`
+ Name string `json:"name"`
+ ProtoService string `json:"service"`
+ ProtoPackage string `json:"package"`
+ Routes []RouteConfig `json:"routes"`
}
type RouteConfig struct {
- Name string `json:"name"`
- Type string `json:"type"`
- ProtoFile string `json:"proto_descriptor"`
- Association string `json:"association"`
- RouteField string `json:"routing_field"`
- Methods []string `json:"methods"` // The GRPC methods to route using the route field
- NbBindingMethods []string `json:"nb_binding_methods"`
- BackendCluster string `json:"backend_cluster"`
- Binding BindingConfig `json:"binding"`
- Overrides []OverrideConfig `json:"overrides"`
- backendCluster *BackendClusterConfig
+ Name string `json:"name"`
+ Type string `json:"type"`
+ ProtoFile string `json:"proto_descriptor"`
+ Association string `json:"association"`
+ RouteField string `json:"routing_field"`
+ Methods []string `json:"methods"` // The GRPC methods to route using the route field
+ NbBindingMethods []string `json:"nb_binding_methods"`
+ BackendCluster string `json:"backend_cluster"`
+ Binding BindingConfig `json:"binding"`
+ Overrides []OverrideConfig `json:"overrides"`
+ backendCluster *BackendClusterConfig
}
type BindingConfig struct {
- Type string `json:"type"`
- Field string `json:"field"`
- Method string `json:"method"`
+ Type string `json:"type"`
+ Field string `json:"field"`
+ Method string `json:"method"`
Association string `json:"association"`
-
}
type OverrideConfig struct {
- Methods []string `json:"methods"`
- Method string `json:"method"`
- RouteField string `json:"routing_field"`
+ Methods []string `json:"methods"`
+ Method string `json:"method"`
+ RouteField string `json:"routing_field"`
}
// Backend configuration
type BackendClusterConfig struct {
- Name string `json:"name"`
+ Name string `json:"name"`
Backends []BackendConfig `json:"backends"`
}
type BackendConfig struct {
- Name string `json:"name"`
- Type string `json:"type"`
- Association AssociationConfig `json:"association"`
+ Name string `json:"name"`
+ Type string `json:"type"`
+ Association AssociationConfig `json:"association"`
Connections []ConnectionConfig `json:"connections"`
}
type AssociationConfig struct {
Strategy string `json:"strategy"`
Location string `json:"location"`
- Field string `json:"field"`
- Key string `json:"key"`
+ Field string `json:"field"`
+ Key string `json:"key"`
}
type ConnectionConfig struct {
@@ -125,51 +124,51 @@
// Server configuration
type ServerConfig struct {
- Name string `json:"name"`
- Port uint `json:"port"`
- Addr string `json:"address"`
- Type string `json:"type"`
+ Name string `json:"name"`
+ Port uint `json:"port"`
+ Addr string `json:"address"`
+ Type string `json:"type"`
Routers []RouterPackage `json:"routers"`
routers map[string]*RouterConfig
}
type RouterPackage struct {
- Router string `json:"router"`
+ Router string `json:"router"`
Package string `json:"package"`
}
// Port configuration
type PortConfig struct {
- GrpcPort uint `json:"grpcPort"`
- StreamingGrpcPort uint `json:"streamingGrpcPort"`
- TlsGrpcPort uint `json:"tlsGrpcPort"`
+ GrpcPort uint `json:"grpcPort"`
+ StreamingGrpcPort uint `json:"streamingGrpcPort"`
+ TlsGrpcPort uint `json:"tlsGrpcPort"`
TlsStreamingGrpcPort uint `json:"tlsStreamingGrpcPort"`
- ControlPort uint `json:"controlPort"`
+ ControlPort uint `json:"controlPort"`
}
// Server Certificate configuration
type ServerCertConfig struct {
- GrpcCert string `json:"grpcCertificate"` // File path to the certificate file
- GrpcKey string `json:"grpcKey"` // File path to the key file
- GrpcCsr string `json:"grpcCsr"` // File path to the CSR file
+ GrpcCert string `json:"grpcCertificate"` // File path to the certificate file
+ GrpcKey string `json:"grpcKey"` // File path to the key file
+ GrpcCsr string `json:"grpcCsr"` // File path to the CSR file
}
// Client Certificate configuration
type ClientCertConfig struct {
- GrpcCert string `json:"grpcCertificate"` // File path to the certificate file
- GrpcKey string `json:"grpcKey"` // File path to the key file
- GrpcCsr string `json:"grpcCsr"` // File path to the CSR file
+ GrpcCert string `json:"grpcCertificate"` // File path to the certificate file
+ GrpcKey string `json:"grpcKey"` // File path to the key file
+ GrpcCsr string `json:"grpcCsr"` // File path to the CSR file
}
// Api configuration
type ApiConfig struct {
Addr string `json:"address"`
- Port uint `json:"port"`
+ Port uint `json:"port"`
}
-func (conf * Configuration) LoadConfig() error {
+func (conf *Configuration) LoadConfig() error {
- configF, err := os.Open(*conf.ConfigFile);
+ configF, err := os.Open(*conf.ConfigFile)
log.Info("Loading configuration from: ", *conf.ConfigFile)
if err != nil {
log.Error(err)
@@ -195,25 +194,25 @@
// references to backend_cluster in the routers.
// Resolve router references for the servers
- log.Debug("Resolving references in the config file");
- for k,_ := range conf.Servers {
+ log.Debug("Resolving references in the config file")
+ for k, _ := range conf.Servers {
//s.routers =make(map[string]*RouterConfig)
conf.Servers[k].routers = make(map[string]*RouterConfig)
- for _,rPkg := range conf.Servers[k].Routers {
+ for _, rPkg := range conf.Servers[k].Routers {
var found bool = false
// Locate the router "r" in the top lever Routers array
- log.Debugf("Resolving router reference to router '%s' from server '%s'",rPkg.Router, conf.Servers[k].Name)
+ log.Debugf("Resolving router reference to router '%s' from server '%s'", rPkg.Router, conf.Servers[k].Name)
for rk, _ := range conf.Routers {
if conf.Routers[rk].Name == rPkg.Router && !found {
log.Debugf("Reference to router '%s' found for package '%s'", rPkg.Router, rPkg.Package)
conf.Servers[k].routers[rPkg.Package] = &conf.Routers[rk]
found = true
} else if conf.Routers[rk].Name == rPkg.Router && found {
- if _,ok := conf.Servers[k].routers[rPkg.Package]; !ok {
+ if _, ok := conf.Servers[k].routers[rPkg.Package]; !ok {
log.Debugf("Reference to router '%s' found for package '%s'", rPkg.Router, rPkg.Package)
conf.Servers[k].routers[rPkg.Package] = &conf.Routers[rk]
} else {
- err := errors.New(fmt.Sprintf("Duplicate router '%s' defined for package '%s'",rPkg.Router, rPkg.Package))
+ err := errors.New(fmt.Sprintf("Duplicate router '%s' defined for package '%s'", rPkg.Router, rPkg.Package))
log.Error(err)
return err
}
@@ -228,30 +227,29 @@
}
// Resolve backend references for the routers
- for rk,rv := range conf.Routers {
- for rtk,rtv := range rv.Routes {
+ for rk, rv := range conf.Routers {
+ for rtk, rtv := range rv.Routes {
var found bool = false
- log.Debugf("Resolving backend reference to %s from router %s",rtv.BackendCluster, rv.Name)
- for bek,bev := range conf.BackendClusters {
+ log.Debugf("Resolving backend reference to %s from router %s", rtv.BackendCluster, rv.Name)
+ for bek, bev := range conf.BackendClusters {
log.Debugf("Checking cluster %s", conf.BackendClusters[bek].Name)
if rtv.BackendCluster == bev.Name && !found {
conf.Routers[rk].Routes[rtk].backendCluster = &conf.BackendClusters[bek]
found = true
} else if rtv.BackendCluster == bev.Name && found {
- err := errors.New(fmt.Sprintf("Duplicate backend defined, %s",conf.BackendClusters[bek].Name))
+ err := errors.New(fmt.Sprintf("Duplicate backend defined, %s", conf.BackendClusters[bek].Name))
log.Error(err)
return err
}
}
- if !found {
+ if !found {
err := errors.New(fmt.Sprintf("Backend %s for router %s not found in config",
- rtv.BackendCluster, rv.Name))
+ rtv.BackendCluster, rv.Name))
log.Error(err)
return err
}
}
}
-
return nil
}
diff --git a/afrouter/afrouter/helpers.go b/afrouter/afrouter/helpers.go
index 4d7362b..441b4b9 100644
--- a/afrouter/afrouter/helpers.go
+++ b/afrouter/afrouter/helpers.go
@@ -20,7 +20,7 @@
//import "github.com/opencord/voltha-go/common/log"
func strIndex(ar []string, match string) int {
- for idx,v := range ar {
+ for idx, v := range ar {
if v == match {
return idx
}
diff --git a/afrouter/afrouter/method-router.go b/afrouter/afrouter/method-router.go
index 379bf11..5e11fa6 100644
--- a/afrouter/afrouter/method-router.go
+++ b/afrouter/afrouter/method-router.go
@@ -18,38 +18,38 @@
package afrouter
import (
- "fmt"
"errors"
+ "fmt"
+ "github.com/opencord/voltha-go/common/log"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
- "github.com/opencord/voltha-go/common/log"
)
const NoMeta = "nometa"
type MethodRouter struct {
- name string
+ name string
service string
- mthdRt map[string]map[string]Router // map of [metadata][method]
+ mthdRt map[string]map[string]Router // map of [metadata][method]
}
func newMethodRouter(config *RouterConfig) (Router, error) {
- mr := MethodRouter{name:config.Name,service:config.ProtoService,mthdRt:make(map[string]map[string]Router)}
+ mr := MethodRouter{name: config.Name, service: config.ProtoService, mthdRt: make(map[string]map[string]Router)}
mr.mthdRt[NoMeta] = make(map[string]Router) // For routes not needing metadata (all expcept binding at this time)
log.Debugf("Processing MethodRouter config %v", *config)
if len(config.Routes) == 0 {
return nil, errors.New(fmt.Sprintf("Router %s must have at least one route", config.Name))
}
- for _,rtv := range config.Routes {
+ for _, rtv := range config.Routes {
//log.Debugf("Processing route: %v",rtv)
var idx1 string
- r,err := newSubRouter(config, &rtv)
+ r, err := newSubRouter(config, &rtv)
if err != nil {
return nil, err
}
if rtv.Type == "binding" {
idx1 = rtv.Binding.Field
- if _,ok := mr.mthdRt[idx1]; ok == false { // /First attempt on this key
+ if _, ok := mr.mthdRt[idx1]; ok == false { // /First attempt on this key
mr.mthdRt[idx1] = make(map[string]Router)
}
} else {
@@ -62,8 +62,8 @@
if rtv.Methods[0] == "*" {
return r, nil
} else {
- log.Debugf("Setting router '%s' for single method '%s'",r.Name(),rtv.Methods[0])
- if _,ok := mr.mthdRt[idx1][rtv.Methods[0]]; ok == false {
+ log.Debugf("Setting router '%s' for single method '%s'", r.Name(), rtv.Methods[0])
+ if _, ok := mr.mthdRt[idx1][rtv.Methods[0]]; ok == false {
mr.mthdRt[idx1][rtv.Methods[0]] = r
} else {
err := errors.New(fmt.Sprintf("Attempt to define method %s for 2 routes: %s & %s", rtv.Methods[0],
@@ -73,10 +73,10 @@
}
}
default:
- for _,m := range rtv.Methods {
+ for _, m := range rtv.Methods {
log.Debugf("Processing Method %s", m)
- if _,ok := mr.mthdRt[idx1][m]; ok == false {
- log.Debugf("Setting router '%s' for method '%s'",r.Name(),m)
+ if _, ok := mr.mthdRt[idx1][m]; ok == false {
+ log.Debugf("Setting router '%s' for method '%s'", r.Name(), m)
mr.mthdRt[idx1][m] = r
} else {
err := errors.New(fmt.Sprintf("Attempt to define method %s for 2 routes: %s & %s", m, r.Name(), mr.mthdRt[idx1][m].Name()))
@@ -98,69 +98,68 @@
return mr.service
}
-func (mr MethodRouter) GetMetaKeyVal(serverStream grpc.ServerStream) (string,string,error) {
+func (mr MethodRouter) GetMetaKeyVal(serverStream grpc.ServerStream) (string, string, error) {
var rtrnK string = NoMeta
var rtrnV string = ""
// Get the metadata from the server stream
- md, ok := metadata.FromIncomingContext(serverStream.Context())
+ md, ok := metadata.FromIncomingContext(serverStream.Context())
if !ok {
- return rtrnK, rtrnV, errors.New("Could not get a server stream metadata")
- }
+ return rtrnK, rtrnV, errors.New("Could not get a server stream metadata")
+ }
// Determine if one of the method routing keys exists in the metadata
- for k,_ := range mr.mthdRt {
- if _,ok := md[k]; ok == true {
+ for k, _ := range mr.mthdRt {
+ if _, ok := md[k]; ok == true {
rtrnV = md[k][0]
rtrnK = k
break
}
}
- return rtrnK,rtrnV,nil
+ return rtrnK, rtrnV, nil
}
func (mr MethodRouter) ReplyHandler(sel interface{}) error {
switch sl := sel.(type) {
- case *sbFrame:
- if r,ok := mr.mthdRt[NoMeta][sl.method]; ok == true {
- return r.ReplyHandler(sel)
- }
- // TODO: this case should also be an error
- default: //TODO: This should really be a big error
- // A reply handler should only be called on the sbFrame
- return nil
+ case *sbFrame:
+ if r, ok := mr.mthdRt[NoMeta][sl.method]; ok == true {
+ return r.ReplyHandler(sel)
+ }
+ // TODO: this case should also be an error
+ default: //TODO: This should really be a big error
+ // A reply handler should only be called on the sbFrame
+ return nil
}
return nil
}
func (mr MethodRouter) Route(sel interface{}) *backend {
switch sl := sel.(type) {
- case *nbFrame:
- if r,ok := mr.mthdRt[sl.metaKey][sl.mthdSlice[REQ_METHOD]]; ok == true {
- return r.Route(sel)
- }
- log.Errorf("Attept to route on non-existent method '%s'", sl.mthdSlice[REQ_METHOD])
- return nil
- default:
- return nil
+ case *nbFrame:
+ if r, ok := mr.mthdRt[sl.metaKey][sl.mthdSlice[REQ_METHOD]]; ok == true {
+ return r.Route(sel)
+ }
+ log.Errorf("Attept to route on non-existent method '%s'", sl.mthdSlice[REQ_METHOD])
+ return nil
+ default:
+ return nil
}
- return nil
}
-func (mr MethodRouter) BackendCluster(mthd string, metaKey string) (*backendCluster,error) {
- if r,ok := mr.mthdRt[metaKey][mthd]; ok == true {
+func (mr MethodRouter) BackendCluster(mthd string, metaKey string) (*backendCluster, error) {
+ if r, ok := mr.mthdRt[metaKey][mthd]; ok == true {
return r.BackendCluster(mthd, metaKey)
}
- err := errors.New(fmt.Sprintf("No backend cluster exists for method '%s' using meta key '%s'", mthd,metaKey))
+ err := errors.New(fmt.Sprintf("No backend cluster exists for method '%s' using meta key '%s'", mthd, metaKey))
log.Error(err)
return nil, err
}
func (mr MethodRouter) FindBackendCluster(beName string) *backendCluster {
- for _,meta := range mr.mthdRt {
- for _,r := range meta {
- if rtrn := r.FindBackendCluster(beName); rtrn != nil {
+ for _, meta := range mr.mthdRt {
+ for _, r := range meta {
+ if rtrn := r.FindBackendCluster(beName); rtrn != nil {
return rtrn
}
}
diff --git a/afrouter/afrouter/round-robin-router.go b/afrouter/afrouter/round-robin-router.go
index 8201541..65d883a 100644
--- a/afrouter/afrouter/round-robin-router.go
+++ b/afrouter/afrouter/round-robin-router.go
@@ -18,18 +18,18 @@
package afrouter
import (
- "fmt"
"errors"
- "google.golang.org/grpc"
+ "fmt"
"github.com/opencord/voltha-go/common/log"
+ "google.golang.org/grpc"
)
type RoundRobinRouter struct {
- name string
- routerType int // TODO: Likely not needed.
+ name string
+ routerType int // TODO: Likely not needed.
grpcService string
- bkndClstr *backendCluster
- curBknd **backend
+ bkndClstr *backendCluster
+ curBknd **backend
}
func newRoundRobinRouter(rconf *RouterConfig, config *RouteConfig) (Router, error) {
@@ -57,9 +57,9 @@
var bptr *backend
bptr = nil
rr := RoundRobinRouter{
- name:config.Name,
- grpcService:rconf.ProtoService,
- curBknd:&bptr,
+ name: config.Name,
+ grpcService: rconf.ProtoService,
+ curBknd: &bptr,
}
// This has already been validated bfore this function
@@ -71,7 +71,7 @@
}
}
- // Create the backend cluster or link to an existing one
+ // Create the backend cluster or link to an existing one
ok := true
if rr.bkndClstr, ok = bClusters[config.backendCluster.Name]; ok == false {
if rr.bkndClstr, err = newBackendCluster(config.backendCluster); err != nil {
@@ -81,49 +81,47 @@
}
if rtrn_err {
- return rr,errors.New(fmt.Sprintf("Failed to create a new router '%s'",rr.name))
+ return rr, errors.New(fmt.Sprintf("Failed to create a new router '%s'", rr.name))
}
- return rr,nil
+ return rr, nil
}
-func (rr RoundRobinRouter) GetMetaKeyVal(serverStream grpc.ServerStream) (string,string,error) {
- return "","",nil
+func (rr RoundRobinRouter) GetMetaKeyVal(serverStream grpc.ServerStream) (string, string, error) {
+ return "", "", nil
}
-func (rr RoundRobinRouter) BackendCluster(s string, mk string) (*backendCluster,error) {
+func (rr RoundRobinRouter) BackendCluster(s string, mk string) (*backendCluster, error) {
return rr.bkndClstr, nil
}
-func (rr RoundRobinRouter) Name() (string) {
+func (rr RoundRobinRouter) Name() string {
return rr.name
}
-func(rr RoundRobinRouter) Route(sel interface{}) (*backend) {
+func (rr RoundRobinRouter) Route(sel interface{}) *backend {
var err error
switch sl := sel.(type) {
- case *nbFrame:
- // Since this is a round robin router just get the next backend
- if *rr.curBknd, err = rr.bkndClstr.nextBackend(*rr.curBknd,BE_SEQ_RR); err == nil {
- return *rr.curBknd
- } else {
- sl.err = err
- return nil
- }
- default:
- log.Errorf("Internal: invalid data type in Route call %v", sel);
+ case *nbFrame:
+ // Since this is a round robin router just get the next backend
+ if *rr.curBknd, err = rr.bkndClstr.nextBackend(*rr.curBknd, BE_SEQ_RR); err == nil {
+ return *rr.curBknd
+ } else {
+ sl.err = err
return nil
+ }
+ default:
+ log.Errorf("Internal: invalid data type in Route call %v", sel)
+ return nil
}
- log.Errorf("Round robin error %v", err);
- return nil
}
-func (rr RoundRobinRouter) Service() (string) {
+func (rr RoundRobinRouter) Service() string {
return rr.grpcService
}
-func (rr RoundRobinRouter) FindBackendCluster(becName string) (*backendCluster) {
- if becName == rr.bkndClstr.name {
+func (rr RoundRobinRouter) FindBackendCluster(becName string) *backendCluster {
+ if becName == rr.bkndClstr.name {
return rr.bkndClstr
}
return nil
diff --git a/afrouter/afrouter/router.go b/afrouter/afrouter/router.go
index f1e729f..f71bd30 100644
--- a/afrouter/afrouter/router.go
+++ b/afrouter/afrouter/router.go
@@ -18,38 +18,38 @@
package afrouter
import (
- "fmt"
"errors"
+ "fmt"
"google.golang.org/grpc"
)
const (
- RT_RPC_AFFINITY_MESSAGE = iota+1
- RT_RPC_AFFINITY_HEADER = iota+1
- RT_BINDING = iota+1
- RT_ROUND_ROBIN = iota+1
+ RT_RPC_AFFINITY_MESSAGE = iota + 1
+ RT_RPC_AFFINITY_HEADER = iota + 1
+ RT_BINDING = iota + 1
+ RT_ROUND_ROBIN = iota + 1
)
// String names for display in error messages.
-var rTypeNames = []string{"","rpc_affinity_message","rpc_affinity_header","binding", "round_robin"}
-var rAssnNames = []string{"","round_robin"}
+var rTypeNames = []string{"", "rpc_affinity_message", "rpc_affinity_header", "binding", "round_robin"}
+var rAssnNames = []string{"", "round_robin"}
var allRouters map[string]Router = make(map[string]Router)
// The router interface
type Router interface {
- Name() (string)
+ Name() string
Route(interface{}) *backend
- Service() (string)
+ Service() string
BackendCluster(string, string) (*backendCluster, error)
- FindBackendCluster(string) (*backendCluster)
+ FindBackendCluster(string) *backendCluster
ReplyHandler(interface{}) error
- GetMetaKeyVal(serverStream grpc.ServerStream) (string,string,error)
+ GetMetaKeyVal(serverStream grpc.ServerStream) (string, string, error)
}
func newRouter(config *RouterConfig) (Router, error) {
- r,err := newMethodRouter(config)
- if err == nil {
+ r, err := newMethodRouter(config)
+ if err == nil {
allRouters[r.Name()] = r
}
return r, err
@@ -59,19 +59,19 @@
idx := strIndex(rTypeNames, config.Type)
switch idx {
case RT_RPC_AFFINITY_MESSAGE:
- r,err := newAffinityRouter(rconf, config)
+ r, err := newAffinityRouter(rconf, config)
if err == nil {
allRouters[rconf.Name+config.Name] = r
}
return r, err
case RT_BINDING:
- r,err := newBindingRouter(rconf, config)
+ r, err := newBindingRouter(rconf, config)
if err == nil {
allRouters[rconf.Name+config.Name] = r
}
return r, err
case RT_ROUND_ROBIN:
- r,err := newRoundRobinRouter(rconf, config)
+ r, err := newRoundRobinRouter(rconf, config)
if err == nil {
allRouters[rconf.Name+config.Name] = r
}
@@ -79,6 +79,4 @@
default:
return nil, errors.New(fmt.Sprintf("Internal error, undefined router type: %s", config.Type))
}
-
- return nil, errors.New(fmt.Sprintf("Unrecognized router type '%s'",config.Type))
}
diff --git a/afrouter/afrouter/server.go b/afrouter/afrouter/server.go
index 17c3b4f..0691da1 100644
--- a/afrouter/afrouter/server.go
+++ b/afrouter/afrouter/server.go
@@ -18,14 +18,14 @@
package afrouter
import (
- "fmt"
- "net"
- "regexp"
"errors"
- "strconv"
+ "fmt"
+ "github.com/opencord/voltha-go/common/log"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
- "github.com/opencord/voltha-go/common/log"
+ "net"
+ "regexp"
+ "strconv"
)
var (
@@ -34,36 +34,36 @@
ClientStreams: true,
}
)
+
const (
- REQ_ALL = 0
+ REQ_ALL = 0
REQ_PACKAGE = 1
REQ_SERVICE = 2
- REQ_METHOD = 3
+ REQ_METHOD = 3
)
type server struct {
- running bool
- name string
- stype nbi
+ running bool
+ name string
+ stype nbi
proxyListener net.Listener
- routers map[string]map[string]Router
- proxyServer *grpc.Server
+ routers map[string]map[string]Router
+ proxyServer *grpc.Server
}
type nbRequest struct {
- srv interface{}
+ srv interface{}
serverStream grpc.ServerStream
- r Router
- mthdSlice []string
- metaKey string // There should be at most one key specified. More than one is an error.
- metaVal string // This is the value extracted from the meta key if it exists or "" otherwise
+ r Router
+ mthdSlice []string
+ metaKey string // There should be at most one key specified. More than one is an error.
+ metaVal string // This is the value extracted from the meta key if it exists or "" otherwise
}
var mthdSlicerExp string = `^/([a-zA-Z][a-zA-Z0-9]+)\.([a-zA-Z][a-zA-Z0-9]+)/([a-zA-Z][a-zA-Z0-9]+)`
var mthdSlicer *regexp.Regexp // The compiled regex to extract the package/service/method
-
-func newServer(config *ServerConfig) (*server,error) {
+func newServer(config *ServerConfig) (*server, error) {
var err error = nil
var rtrn_err bool = false
var srvr *server
@@ -71,7 +71,7 @@
// Validate the configuration
// There should be a name
if config.Name == "" {
- log.Error("A server has been defined with no name")
+ log.Error("A server has been defined with no name")
rtrn_err = true
}
// Validate that there's a port specified
@@ -86,15 +86,15 @@
}
if config.Type != "grpc" && config.Type != "streaming_grpc" {
if config.Type == "" {
- log.Errorf("A server 'type' must be defined for server %s",config.Name)
+ log.Errorf("A server 'type' must be defined for server %s", config.Name)
} else {
- log.Errorf("The server type must be either 'grpc' or 'streaming_grpc' "+
- "but '%s' was found for server '%s'", config.Type, config.Name)
+ log.Errorf("The server type must be either 'grpc' or 'streaming_grpc' "+
+ "but '%s' was found for server '%s'", config.Type, config.Name)
}
rtrn_err = true
}
if len(config.Routers) == 0 {
- log.Errorf("At least one router must be specified for server '%s'", config.Name)
+ log.Errorf("At least one router must be specified for server '%s'", config.Name)
rtrn_err = true
}
@@ -102,25 +102,25 @@
return nil, errors.New("Server configuration failed")
} else {
// The configuration is valid, create a server and configure it.
- srvr = &server{name:config.Name,routers:make(map[string]map[string]Router)}
+ srvr = &server{name: config.Name, routers: make(map[string]map[string]Router)}
// The listener
if srvr.proxyListener, err =
- net.Listen("tcp", config.Addr + ":"+
- strconv.Itoa(int(config.Port))); err != nil {
+ net.Listen("tcp", config.Addr+":"+
+ strconv.Itoa(int(config.Port))); err != nil {
log.Error(err)
return nil, err
}
// Create the routers
log.Debugf("Configuring the routers for server %s", srvr.name)
- for p,r := range config.routers {
- log.Debugf("Processing router %s for package %s", r.Name,p)
- if dr,err := newRouter(r); err != nil {
- log.Error(err)
- return nil, err
- } else {
+ for p, r := range config.routers {
+ log.Debugf("Processing router %s for package %s", r.Name, p)
+ if dr, err := newRouter(r); err != nil {
+ log.Error(err)
+ return nil, err
+ } else {
log.Debugf("Adding router %s to the server %s for package %s and service %s",
- dr.Name(), srvr.name, p, dr.Service())
- if _,ok := srvr.routers[p]; ok {
+ dr.Name(), srvr.name, p, dr.Service())
+ if _, ok := srvr.routers[p]; ok {
srvr.routers[p][dr.Service()] = dr
} else {
srvr.routers[p] = make(map[string]Router)
@@ -140,7 +140,7 @@
return srvr, nil
}
-func (s *server) Name() (string) {
+func (s *server) Name() string {
return s.name
}
@@ -148,8 +148,7 @@
return s.handler
}
-
-func (s *server) getRouter(pkg *string, service *string) (Router,bool) {
+func (s *server) getRouter(pkg *string, service *string) (Router, bool) {
if fn, ok := s.routers[*pkg][*service]; ok { // Both specified
return fn, ok
} else if fn, ok = s.routers["*"][*service]; ok { // Package wild card
@@ -159,18 +158,17 @@
} else if fn, ok = s.routers["*"]["*"]; ok { // Both Wildcarded
return fn, ok
} else {
- return nil,false
+ return nil, false
}
}
-
func (s *server) handler(srv interface{}, serverStream grpc.ServerStream) error {
// Determine what router is intended to handle this request
fullMethodName, ok := grpc.MethodFromServerStream(serverStream)
if !ok {
return grpc.Errorf(codes.Internal, "lowLevelServerStream doesn't exist in context")
}
- log.Debugf("Processing grpc request %s on server %s",fullMethodName,s.name)
+ log.Debugf("Processing grpc request %s on server %s", fullMethodName, s.name)
// The full method name is structured as follows:
// <package name>.<service>/<method>
mthdSlice := mthdSlicer.FindStringSubmatch(fullMethodName)
@@ -179,7 +177,7 @@
} else {
log.Debugf("Sliced full method %s: %v", fullMethodName, mthdSlice)
}
- r, ok := s.getRouter(&mthdSlice[REQ_PACKAGE],&mthdSlice[REQ_SERVICE])
+ r, ok := s.getRouter(&mthdSlice[REQ_PACKAGE], &mthdSlice[REQ_SERVICE])
//fn, ok := s.routers[mthdSlice[REQ_PACKAGE]][mthdSlice[REQ_SERVICE]]
if !ok {
// TODO: Should this be punted to a default transparent router??
@@ -193,7 +191,7 @@
}
log.Debugf("Selected router %s\n", r.Name())
- mk,mv,err := r.GetMetaKeyVal(serverStream)
+ mk, mv, err := r.GetMetaKeyVal(serverStream)
if err != nil {
log.Error(err)
return err
@@ -202,13 +200,10 @@
//nbR := &nbRequest(srv:srv,serverStream:serverStream,r:r,mthdSlice:mthdSlice,metaKey:mk,metaVal:mv)
// Extract the cluster from the selected router and use it to manage the transfer
- if bkndClstr,err := r.BackendCluster(mthdSlice[REQ_METHOD], mk); err != nil {
+ if bkndClstr, err := r.BackendCluster(mthdSlice[REQ_METHOD], mk); err != nil {
return err
} else {
//return bkndClstr.handler(nbR)
return bkndClstr.handler(srv, serverStream, r, mthdSlice, mk, mv)
}
-
- return grpc.Errorf(codes.Internal, "gRPC proxying should never reach this stage.")
}
-
diff --git a/afrouter/afrouter/signals.go b/afrouter/afrouter/signals.go
index 6cc32ad..5416fda 100644
--- a/afrouter/afrouter/signals.go
+++ b/afrouter/afrouter/signals.go
@@ -22,17 +22,16 @@
package afrouter
import (
- "os"
- "syscall"
- "os/signal"
"github.com/opencord/voltha-go/common/log"
+ "os"
+ "os/signal"
+ "syscall"
)
var errChan = make(chan error)
var doneChan = make(chan error)
var holdChan = make(chan int)
-
func InitExitHandler() error {
// Start the signal handler
@@ -70,16 +69,16 @@
if arProxy != nil {
for _, srvr := range arProxy.servers {
if srvr.running {
- log.With(log.Fields{"server":srvr.name}).Debug("Closing server")
- srvr.proxyServer.GracefulStop();
- srvr.proxyListener.Close();
+ log.With(log.Fields{"server": srvr.name}).Debug("Closing server")
+ srvr.proxyServer.GracefulStop()
+ srvr.proxyListener.Close()
}
}
}
- for _,cl := range bClusters {
+ for _, cl := range bClusters {
for _, bknd := range cl.backends {
log.Debugf("Closing backend %s", bknd.name)
- for _,conn := range bknd.connections {
+ for _, conn := range bknd.connections {
log.Debugf("Closing connection %s", conn.name)
conn.close()
}
@@ -88,4 +87,3 @@
doneChan <- err
//os.Exit(0)
}
-
diff --git a/afrouter/arouter.go b/afrouter/arouter.go
index dfa9fc8..65bb7df 100644
--- a/afrouter/arouter.go
+++ b/afrouter/arouter.go
@@ -16,23 +16,22 @@
// gRPC affinity router with active/active backends
package main
+
/* package main // import "github.com/opencord/voltha-go/arouter" */
/* package main // import "github.com/opencord/voltha-go" */
-
import (
- "os"
"fmt"
- slog "log"
- "google.golang.org/grpc/grpclog"
- "github.com/opencord/voltha-go/common/log"
"github.com/opencord/voltha-go/afrouter/afrouter"
+ "github.com/opencord/voltha-go/common/log"
+ "google.golang.org/grpc/grpclog"
+ slog "log"
+ "os"
)
func main() {
-
- conf,err := afrouter.ParseCmd()
+ conf, err := afrouter.ParseCmd()
if err != nil {
fmt.Printf("Error: %v\n", err)
return
@@ -51,7 +50,7 @@
log.Error(err)
return
}
- log.With(log.Fields{"config":*conf}).Debug("Configuration loaded")
+ log.With(log.Fields{"config": *conf}).Debug("Configuration loaded")
// Enable grpc logging
if *conf.GrpcLog {
@@ -62,14 +61,13 @@
// Install the signal and error handlers.
afrouter.InitExitHandler()
-
// Create the affinity router proxy...
- if ap,err := afrouter.NewArouterProxy(conf); err != nil {
- log.Errorf("Failed to create the arouter proxy, exiting:%v",err)
- return
- // and start it.
- // This function never returns unless an error
- // occurs or a signal is caught.
+ if ap, err := afrouter.NewArouterProxy(conf); err != nil {
+ log.Errorf("Failed to create the arouter proxy, exiting:%v", err)
+ return
+ // and start it.
+ // This function never returns unless an error
+ // occurs or a signal is caught.
} else if err := ap.ListenAndServe(); err != nil {
log.Errorf("Exiting on error %v", err)
}
diff --git a/arouterd/arouterd.go b/arouterd/arouterd.go
index d8d7cff..44b8f53 100644
--- a/arouterd/arouterd.go
+++ b/arouterd/arouterd.go
@@ -17,30 +17,30 @@
package main
import (
- "time"
- "regexp"
"errors"
+ "regexp"
"strconv"
+ "time"
- "k8s.io/client-go/rest"
- "google.golang.org/grpc"
- "golang.org/x/net/context"
- "k8s.io/client-go/kubernetes"
"github.com/golang/protobuf/ptypes"
+ empty "github.com/golang/protobuf/ptypes/empty"
"github.com/opencord/voltha-go/common/log"
kafka "github.com/opencord/voltha-go/kafka"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- empty "github.com/golang/protobuf/ptypes/empty"
- vpb "github.com/opencord/voltha-protos/go/voltha"
- cmn "github.com/opencord/voltha-protos/go/common"
pb "github.com/opencord/voltha-protos/go/afrouter"
+ cmn "github.com/opencord/voltha-protos/go/common"
ic "github.com/opencord/voltha-protos/go/inter_container"
+ vpb "github.com/opencord/voltha-protos/go/voltha"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
)
type configConn struct {
- Server string `json:"Server"`
- Cluster string `json:"Cluster"`
- Backend string `json:"Backend"`
+ Server string `json:"Server"`
+ Cluster string `json:"Cluster"`
+ Backend string `json:"Backend"`
connections map[string]connection
}
@@ -51,18 +51,18 @@
}
type volthaPod struct {
- name string
- ipAddr string
- node string
- devIds map[string]struct{}
- cluster string
- backend string
+ name string
+ ipAddr string
+ node string
+ devIds map[string]struct{}
+ cluster string
+ backend string
connection string
}
type podTrack struct {
- pod *volthaPod
- dn bool
+ pod *volthaPod
+ dn bool
}
var nPods int = 6
@@ -92,7 +92,6 @@
return nil, errors.New("unsupported-client-type")
}
-
func k8sClientSet() *kubernetes.Clientset {
// creates the in-cluster config
config, err := rest.InClusterConfig()
@@ -108,24 +107,23 @@
return clientset
}
-
func connect(addr string) (*grpc.ClientConn, error) {
- for ctr :=0 ; ctr < 100; ctr++ {
+ for ctr := 0; ctr < 100; ctr++ {
log.Debugf("Trying to connect to %s", addr)
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
log.Debugf("Attempt to connect failed, retrying %v:", err)
} else {
log.Debugf("Connection succeeded")
- return conn,err
+ return conn, err
}
time.Sleep(10 * time.Second)
}
log.Debugf("Too many connection attempts, giving up!")
- return nil,errors.New("Timeout attempting to conect")
+ return nil, errors.New("Timeout attempting to conect")
}
-func getVolthaPods(cs *kubernetes.Clientset, coreFilter * regexp.Regexp) []*volthaPod {
+func getVolthaPods(cs *kubernetes.Clientset, coreFilter *regexp.Regexp) []*volthaPod {
var rtrn []*volthaPod
pods, err := cs.CoreV1().Pods("").List(metav1.ListOptions{})
@@ -134,34 +132,34 @@
}
//log.Debugf("There are a total of %d pods in the cluster\n", len(pods.Items))
- for _,v := range pods.Items {
+ for _, v := range pods.Items {
if v.Namespace == "voltha" && coreFilter.MatchString(v.Name) {
log.Debugf("Namespace: %s, PodName: %s, PodIP: %s, Host: %s\n", v.Namespace, v.Name,
- v.Status.PodIP, v.Spec.NodeName)
+ v.Status.PodIP, v.Spec.NodeName)
// Only add the pod if it has an IP address. If it doesn't then it likely crashed and
// and is still in the process of getting re-started.
if v.Status.PodIP != "" {
- rtrn = append(rtrn, &volthaPod{name:v.Name,ipAddr:v.Status.PodIP,node:v.Spec.NodeName,
- devIds:make(map[string]struct{}), backend:"", connection:""})
+ rtrn = append(rtrn, &volthaPod{name: v.Name, ipAddr: v.Status.PodIP, node: v.Spec.NodeName,
+ devIds: make(map[string]struct{}), backend: "", connection: ""})
}
}
}
return rtrn
}
-func reconcilePodDeviceIds(pod * volthaPod, ids map[string]struct{}) bool {
+func reconcilePodDeviceIds(pod *volthaPod, ids map[string]struct{}) bool {
var idList cmn.IDs
- for k,_ := range ids {
- idList.Items = append(idList.Items, &cmn.ID{Id:k})
+ for k, _ := range ids {
+ idList.Items = append(idList.Items, &cmn.ID{Id: k})
}
- conn,err := connect(pod.ipAddr+":50057")
+ conn, err := connect(pod.ipAddr + ":50057")
defer conn.Close()
if err != nil {
log.Debugf("Could not query devices from %s, could not connect", pod.name)
return false
}
client := vpb.NewVolthaServiceClient(conn)
- _,err = client.ReconcileDevices(context.Background(), &idList)
+ _, err = client.ReconcileDevices(context.Background(), &idList)
if err != nil {
log.Error(err)
return false
@@ -170,31 +168,31 @@
return true
}
-func queryPodDeviceIds(pod * volthaPod) map[string]struct{} {
+func queryPodDeviceIds(pod *volthaPod) map[string]struct{} {
var rtrn map[string]struct{} = make(map[string]struct{})
// Open a connection to the pod
// port 50057
- conn, err := connect(pod.ipAddr+":50057")
+ conn, err := connect(pod.ipAddr + ":50057")
if err != nil {
log.Debugf("Could not query devices from %s, could not connect", pod.name)
return rtrn
}
defer conn.Close()
client := vpb.NewVolthaServiceClient(conn)
- devs,err := client.ListDeviceIds(context.Background(), &empty.Empty{})
+ devs, err := client.ListDeviceIds(context.Background(), &empty.Empty{})
if err != nil {
log.Error(err)
return rtrn
}
- for _,dv := range devs.Items {
- rtrn[dv.Id]=struct{}{}
+ for _, dv := range devs.Items {
+ rtrn[dv.Id] = struct{}{}
}
return rtrn
}
func queryDeviceIds(pods []*volthaPod) {
- for pk,_ := range pods {
+ for pk, _ := range pods {
// Keep the old Id list if a new list is not returned
if idList := queryPodDeviceIds(pods[pk]); len(idList) != 0 {
pods[pk].devIds = idList
@@ -203,7 +201,7 @@
}
func allEmpty(pods []*volthaPod) bool {
- for k,_ := range pods {
+ for k, _ := range pods {
if len(pods[k].devIds) != 0 {
return false
}
@@ -212,10 +210,10 @@
}
func rmPod(pods []*volthaPod, idx int) []*volthaPod {
- return append(pods[:idx],pods[idx+1:]...)
+ return append(pods[:idx], pods[idx+1:]...)
}
-func groupIntersectingPods1(pods []*volthaPod, podCt int) ([][]*volthaPod,[]*volthaPod) {
+func groupIntersectingPods1(pods []*volthaPod, podCt int) ([][]*volthaPod, []*volthaPod) {
var rtrn [][]*volthaPod
var out []*volthaPod
@@ -232,11 +230,11 @@
// Start a pod group with this pod
var grp []*volthaPod
grp = append(grp, pods[0])
- pods = rmPod(pods,0)
+ pods = rmPod(pods, 0)
//log.Debugf("Creating new group %s", pd[k].pod.name)
// Find the peer pod based on device overlap
// It's ok if one isn't found, an empty one will be used instead
- for k,_ := range pods {
+ for k, _ := range pods {
if len(pods[k].devIds) == 0 { // Skip pods with no devices
//log.Debugf("%s empty pod", pd[k1].pod.name)
continue
@@ -246,7 +244,7 @@
if grp[0].node == pods[k].node {
// This should never happen
log.Errorf("pods %s and %s intersect and are on the same server!! Not pairing",
- grp[0].name, pods[k].name)
+ grp[0].name, pods[k].name)
continue
}
grp = append(grp, pods[k])
@@ -258,18 +256,18 @@
rtrn = append(rtrn, grp)
//log.Debugf("Added group %s", grp[0].name)
// Check if the number of groups = half the pods, if so all groups are started.
- if len(rtrn) == podCt >> 1 {
+ if len(rtrn) == podCt>>1 {
// Append any remaining pods to out
- out = append(out,pods[0:]...)
+ out = append(out, pods[0:]...)
break
}
}
- return rtrn,out
+ return rtrn, out
}
func unallocPodCount(pd []*podTrack) int {
var rtrn int = 0
- for _,v := range pd {
+ for _, v := range pd {
if v.dn == false {
rtrn++
}
@@ -277,9 +275,8 @@
return rtrn
}
-
func sameNode(pod *volthaPod, grps [][]*volthaPod) bool {
- for _,v := range grps {
+ for _, v := range grps {
if v[0].node == pod.node {
return true
}
@@ -293,7 +290,7 @@
func startRemainingGroups1(grps [][]*volthaPod, pods []*volthaPod, podCt int) ([][]*volthaPod, []*volthaPod) {
var grp []*volthaPod
- for k,_ := range pods {
+ for k, _ := range pods {
if sameNode(pods[k], grps) {
continue
}
@@ -301,7 +298,7 @@
grp = append(grp, pods[k])
pods = rmPod(pods, k)
grps = append(grps, grp)
- if len(grps) == podCt >> 1 {
+ if len(grps) == podCt>>1 {
break
}
}
@@ -310,7 +307,7 @@
func hasSingleSecondNode(grp []*volthaPod) bool {
var srvrs map[string]struct{} = make(map[string]struct{})
- for k,_ := range grp {
+ for k, _ := range grp {
if k == 0 {
continue // Ignore the first item
}
@@ -323,7 +320,7 @@
}
func addNode(grps [][]*volthaPod, idx *volthaPod, item *volthaPod) [][]*volthaPod {
- for k,_ := range grps {
+ for k, _ := range grps {
if grps[k][0].name == idx.name {
grps[k] = append(grps[k], item)
return grps
@@ -334,10 +331,10 @@
}
func removeNode(grps [][]*volthaPod, item *volthaPod) [][]*volthaPod {
- for k,_ := range grps {
- for k1,_ := range grps[k] {
+ for k, _ := range grps {
+ for k1, _ := range grps[k] {
if grps[k][k1].name == item.name {
- grps[k] = append(grps[k][:k1],grps[k][k1+1:]...)
+ grps[k] = append(grps[k][:k1], grps[k][k1+1:]...)
break
}
}
@@ -349,15 +346,15 @@
var lgrps [][]*volthaPod
// All groups must be started when this function is called.
// Copy incomplete groups
- for k,_ := range grps {
+ for k, _ := range grps {
if len(grps[k]) != 2 {
lgrps = append(lgrps, grps[k])
}
}
// Add all pairing candidates to each started group.
- for k,_ := range pods {
- for k2,_ := range lgrps {
+ for k, _ := range pods {
+ for k2, _ := range lgrps {
if lgrps[k2][0].node != pods[k].node {
lgrps[k2] = append(lgrps[k2], pods[k])
}
@@ -371,12 +368,12 @@
for { // Address groups with only a single server choice
var ssn bool = false
- for k,_ := range lgrps {
+ for k, _ := range lgrps {
// Now if any of the groups only have a single
// node as the choice for the second member
// address that one first.
if hasSingleSecondNode(lgrps[k]) == true {
- ssn = true
+ ssn = true
// Add this pairing to the groups
grps = addNode(grps, lgrps[k][0], lgrps[k][1])
// Since this node is now used, remove it from all
@@ -384,7 +381,7 @@
lgrps = removeNode(lgrps, lgrps[k][1])
// Now remove this group completely since
// it's been addressed
- lgrps = append(lgrps[:k],lgrps[k+1:]...)
+ lgrps = append(lgrps[:k], lgrps[k+1:]...)
break
}
}
@@ -398,7 +395,7 @@
}
grps = addNode(grps, lgrps[0][0], lgrps[0][1])
lgrps = removeNode(lgrps, lgrps[0][1])
- lgrps = append(lgrps[:0],lgrps[1:]...)
+ lgrps = append(lgrps[:0], lgrps[1:]...)
}
return grps
}
@@ -407,15 +404,15 @@
var rtrn [][]*volthaPod
var podCt int = len(pods)
- rtrn,pods = groupIntersectingPods1(pods, podCt)
- // There are several outcomes here
+ rtrn, pods = groupIntersectingPods1(pods, podCt)
+ // There are several outcomes here
// 1) All pods have been paired and we're done
// 2) Some un-allocated pods remain
// 2.a) All groups have been started
// 2.b) Not all groups have been started
if len(pods) == 0 {
return rtrn
- } else if len(rtrn) == podCt >> 1 { // All groupings started
+ } else if len(rtrn) == podCt>>1 { // All groupings started
// Allocate the remaining (presumably empty) pods to the started groups
return groupRemainingPods1(rtrn, pods)
} else { // Some groupings started
@@ -428,8 +425,8 @@
}
func intersect(d1 map[string]struct{}, d2 map[string]struct{}) bool {
- for k,_ := range d1 {
- if _,ok := d2[k]; ok == true {
+ for k, _ := range d1 {
+ if _, ok := d2[k]; ok == true {
return true
}
}
@@ -438,10 +435,10 @@
func setConnection(client pb.ConfigurationClient, cluster string, backend string, connection string, addr string, port uint64) {
log.Debugf("Configuring backend %s : connection %s in cluster %s\n\n",
- backend, connection, cluster)
- cnf := &pb.Conn{Server:"grpc_command",Cluster:cluster, Backend:backend,
- Connection:connection,Addr:addr,
- Port:port}
+ backend, connection, cluster)
+ cnf := &pb.Conn{Server: "grpc_command", Cluster: cluster, Backend: backend,
+ Connection: connection, Addr: addr,
+ Port: port}
if res, err := client.SetConnection(context.Background(), cnf); err != nil {
log.Debugf("failed SetConnection RPC call: %s", err)
} else {
@@ -451,8 +448,8 @@
func setAffinity(client pb.ConfigurationClient, ids map[string]struct{}, backend string) {
log.Debugf("Configuring backend %s : affinities \n", backend)
- aff := &pb.Affinity{Router:"vcore",Route:"dev_manager",Cluster:"vcore",Backend:backend}
- for k,_ := range ids {
+ aff := &pb.Affinity{Router: "vcore", Route: "dev_manager", Cluster: "vcore", Backend: backend}
+ for k, _ := range ids {
log.Debugf("Setting affinity for id %s", k)
aff.Id = k
if res, err := client.SetAffinity(context.Background(), aff); err != nil {
@@ -464,8 +461,8 @@
}
func getBackendForCore(coreId string, coreGroups [][]*volthaPod) string {
- for _,v := range coreGroups {
- for _,v2 := range v {
+ for _, v := range coreGroups {
+ for _, v2 := range v {
if v2.name == coreId {
return v2.backend
}
@@ -476,8 +473,8 @@
}
func monitorDiscovery(client pb.ConfigurationClient,
- ch <-chan *ic.InterContainerMessage,
- coreGroups [][]*volthaPod) {
+ ch <-chan *ic.InterContainerMessage,
+ coreGroups [][]*volthaPod) {
var id map[string]struct{} = make(map[string]struct{})
select {
@@ -489,7 +486,7 @@
} else {
// Set the affinity of the discovered device.
if be := getBackendForCore(device.Id, coreGroups); be != "" {
- id[device.Id]=struct{}{}
+ id[device.Id] = struct{}{}
setAffinity(client, id, be)
} else {
log.Error("Cant use an empty string as a backend name")
@@ -500,11 +497,11 @@
}
func startDiscoveryMonitor(client pb.ConfigurationClient,
- coreGroups [][]*volthaPod) error {
+ coreGroups [][]*volthaPod) error {
var ch <-chan *ic.InterContainerMessage
// Connect to kafka for discovery events
topic := &kafka.Topic{Name: "AffinityRouter"}
- kc,err := newKafkaClient("sarama", "kafka", 9092, "arouterd")
+ kc, err := newKafkaClient("sarama", "kafka", 9092, "arouterd")
kc.Start()
if ch, err = kc.Subscribe(topic); err != nil {
@@ -527,12 +524,12 @@
log.Debug("Get addr diffs")
// Start with an empty array
- for k,_ := range rtrn {
+ for k, _ := range rtrn {
rtrn[k] = make([]*volthaPod, 2)
}
// Build a list with only the new items
- for _,v := range rwPods {
+ for _, v := range rwPods {
if hasIpAddr(coreGroups, v.ipAddr) == false {
nList = append(nList, v)
}
@@ -540,9 +537,9 @@
}
// Now build the coreGroups with only the changed items
- for k1,v1 := range coreGroups {
- for k2,v2 := range v1 {
- if _,ok := ipAddrs[v2.ipAddr]; ok == false {
+ for k1, v1 := range coreGroups {
+ for k2, v2 := range v1 {
+ if _, ok := ipAddrs[v2.ipAddr]; ok == false {
rtrn[k1][k2] = v2
}
}
@@ -555,24 +552,24 @@
// pods being replaced. The criteria is that
// the new pod be on the same server as the
// old pod was.
-func reconcileAddrDiffs(coreGroupDiffs [][]*volthaPod, rwPodDiffs []*volthaPod) ([][]*volthaPod) {
+func reconcileAddrDiffs(coreGroupDiffs [][]*volthaPod, rwPodDiffs []*volthaPod) [][]*volthaPod {
var srvrs map[string][]*volthaPod = make(map[string][]*volthaPod)
log.Debug("Reconciling diffs")
log.Debug("Building server list")
- for _,v := range rwPodDiffs {
+ for _, v := range rwPodDiffs {
log.Debugf("Adding %v to the server list", *v)
srvrs[v.node] = append(srvrs[v.node], v)
}
- for k1,v1 := range coreGroupDiffs {
- log.Debugf("k1:%v, v1:%v", k1,v1)
- for k2,v2 := range v1 {
- log.Debugf("k2:%v, v2:%v", k2,v2)
+ for k1, v1 := range coreGroupDiffs {
+ log.Debugf("k1:%v, v1:%v", k1, v1)
+ for k2, v2 := range v1 {
+ log.Debugf("k2:%v, v2:%v", k2, v2)
if v2 == nil { // Nothing to do here
continue
}
- if _,ok := srvrs[v2.node]; ok == true {
+ if _, ok := srvrs[v2.node]; ok == true {
coreGroupDiffs[k1][k2] = srvrs[v2.node][0]
if len(srvrs[v2.node]) > 1 { // remove one entry from the list
srvrs[v2.node] = append(srvrs[v2.node][:0], srvrs[v2.node][1:]...)
@@ -601,8 +598,8 @@
// entries and then reconcile the device ids on the core
// that's in the new entry with the device ids of it's
// active-active peer.
- for k1,v1 := range cores {
- for k2,v2 := range v1 {
+ for k1, v1 := range cores {
+ for k2, v2 := range v1 {
if newEntries[k1][k2] != nil {
// TODO: Missing is the case where bothe the primary
// and the secondary core crash and come back.
@@ -610,12 +607,12 @@
ids := queryPodDeviceIds(cores[k1][k2^1])
if len(ids) != 0 {
if reconcilePodDeviceIds(newEntries[k1][k2], ids) == false {
- log.Errorf("Attempt to reconcile ids on pod %v failed",newEntries[k1][k2])
+ log.Errorf("Attempt to reconcile ids on pod %v failed", newEntries[k1][k2])
}
}
// Send the affininty router new connection information
setConnection(client, "vcore", v2.backend, v2.connection, newEntries[k1][k2].ipAddr, 50057)
- // Copy the new entry information over
+ // Copy the new entry information over
cores[k1][k2].ipAddr = newEntries[k1][k2].ipAddr
cores[k1][k2].name = newEntries[k1][k2].name
cores[k1][k2].devIds = ids
@@ -628,7 +625,7 @@
// TODO: Break this using functions to simplify
// reading of the code.
// Find the core(s) that have changed addresses
- for k1,v1 := range cores {
+ for k1, v1 := range cores {
found = false
for _, v2 := range nPods {
if v1.ipAddr == v2.ipAddr {
@@ -641,9 +638,9 @@
}
}
// Now plug in the new addresses and set the connection
- for _,v1 := range nPods {
+ for _, v1 := range nPods {
found = false
- for _,v2 := range cores {
+ for _, v2 := range cores {
if v1.ipAddr == v2.ipAddr {
found = true
break
@@ -656,10 +653,10 @@
mia[0].name = v1.name
setConnection(client, "ro_vcore", mia[0].backend, mia[0].connection, v1.ipAddr, 50057)
// Now get rid of the mia entry just processed
- mia = append(mia[:0],mia[1:]...)
+ mia = append(mia[:0], mia[1:]...)
}
default:
- log.Error("Internal: Unexpected type in call to applyAddrDiffs");
+ log.Error("Internal: Unexpected type in call to applyAddrDiffs")
}
}
@@ -667,23 +664,23 @@
var byName map[string]*volthaPod = make(map[string]*volthaPod)
// Convinience
- for _,v := range rwPods {
+ for _, v := range rwPods {
byName[v.name] = v
}
- for k1,v1 := range coreGroups {
- for k2,_ := range v1 {
+ for k1, v1 := range coreGroups {
+ for k2, _ := range v1 {
coreGroups[k1][k2].devIds = byName[v1[k2].name].devIds
}
}
}
func startCoreMonitor(client pb.ConfigurationClient,
- clientset *kubernetes.Clientset,
- rwCoreFltr *regexp.Regexp,
- roCoreFltr *regexp.Regexp,
- coreGroups [][]*volthaPod,
- oRoPods []*volthaPod) error {
+ clientset *kubernetes.Clientset,
+ rwCoreFltr *regexp.Regexp,
+ roCoreFltr *regexp.Regexp,
+ coreGroups [][]*volthaPod,
+ oRoPods []*volthaPod) error {
// Now that initial allocation has been completed, monitor the pods
// for IP changes
// The main loop needs to do the following:
@@ -714,7 +711,7 @@
}
// We have all pods, check if any IP addresses
// have changed.
- for _,v := range rwPods {
+ for _, v := range rwPods {
if hasIpAddr(coreGroups, v.ipAddr) == false {
log.Debug("Address has changed...")
applyAddrDiffs(client, coreGroups, rwPods)
@@ -727,7 +724,7 @@
if len(roPods) != 3 {
continue
}
- for _,v := range roPods {
+ for _, v := range roPods {
if hasIpAddr(oRoPods, v.ipAddr) == false {
applyAddrDiffs(client, oRoPods, roPods)
break
@@ -740,14 +737,14 @@
func hasIpAddr(coreList interface{}, ipAddr string) bool {
switch cores := coreList.(type) {
case []*volthaPod:
- for _,v := range cores {
+ for _, v := range cores {
if v.ipAddr == ipAddr {
return true
}
}
case [][]*volthaPod:
- for _,v1 := range cores {
- for _,v2 := range v1 {
+ for _, v1 := range cores {
+ for _, v2 := range v1 {
if v2.ipAddr == ipAddr {
return true
}
@@ -759,7 +756,6 @@
return false
}
-
func main() {
// This is currently hard coded to a cluster with 3 servers
//var connections map[string]configConn = make(map[string]configConn)
@@ -767,7 +763,6 @@
var err error
var conn *grpc.ClientConn
-
// Set up the regular expression to identify the voltha cores
rwCoreFltr := regexp.MustCompile(`rw-core[0-9]-`)
roCoreFltr := regexp.MustCompile(`ro-core-`)
@@ -795,50 +790,50 @@
queryDeviceIds(rwPods)
// For debugging... comment out l8r
- for _,v := range rwPods {
+ for _, v := range rwPods {
log.Debugf("Pod list %v", *v)
}
coreGroups := groupPods1(rwPods)
// Assign the groupings to the the backends and connections
- for k,_ := range coreGroups {
- for k1,_ := range coreGroups[k] {
+ for k, _ := range coreGroups {
+ for k1, _ := range coreGroups[k] {
coreGroups[k][k1].cluster = "vcore"
- coreGroups[k][k1].backend = "vcore"+strconv.Itoa(k+1)
- coreGroups[k][k1].connection = "vcore"+strconv.Itoa(k+1)+strconv.Itoa(k1+1)
+ coreGroups[k][k1].backend = "vcore" + strconv.Itoa(k+1)
+ coreGroups[k][k1].connection = "vcore" + strconv.Itoa(k+1) + strconv.Itoa(k1+1)
}
}
log.Info("Core gouping completed")
// TODO: Debugging code, comment out for production
- for k,v := range coreGroups {
- for k2,v2 := range v {
+ for k, v := range coreGroups {
+ for k2, v2 := range v {
log.Debugf("Core group %d,%d: %v", k, k2, v2)
}
}
log.Info("Setting affinities")
// Now set the affinities for exising devices in the cores
- for _,v := range coreGroups {
+ for _, v := range coreGroups {
setAffinity(client, v[0].devIds, v[0].backend)
setAffinity(client, v[1].devIds, v[1].backend)
}
log.Info("Setting connections")
// Configure the backeds based on the calculated core groups
- for _,v := range coreGroups {
+ for _, v := range coreGroups {
setConnection(client, "vcore", v[0].backend, v[0].connection, v[0].ipAddr, 50057)
setConnection(client, "vcore", v[1].backend, v[1].connection, v[1].ipAddr, 50057)
}
// Process the read only pods
roPods := getVolthaPods(clientset, roCoreFltr)
- for k,v := range roPods {
+ for k, v := range roPods {
log.Debugf("Processing ro_pod %v", v)
- vN := "ro_vcore"+strconv.Itoa(k+1)
+ vN := "ro_vcore" + strconv.Itoa(k+1)
log.Debugf("Setting connection %s, %s, %s", vN, vN+"1", v.ipAddr)
roPods[k].cluster = "ro_core"
roPods[k].backend = vN
- roPods[k].connection = vN+"1"
+ roPods[k].connection = vN + "1"
setConnection(client, "ro_vcore", v.backend, v.connection, v.ipAddr, 50057)
}
@@ -847,7 +842,6 @@
log.Info("Starting core monitoring")
startCoreMonitor(client, clientset, rwCoreFltr,
- roCoreFltr, coreGroups, roPods) // Never returns
+ roCoreFltr, coreGroups, roPods) // Never returns
return
}
-
diff --git a/cli/main.go b/cli/main.go
index a20fbf4..9a9ebd5 100644
--- a/cli/main.go
+++ b/cli/main.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package main
diff --git a/cli/menu/devicemenu/deviceMenu.go b/cli/menu/devicemenu/deviceMenu.go
index a435f16..134c46d 100644
--- a/cli/menu/devicemenu/deviceMenu.go
+++ b/cli/menu/devicemenu/deviceMenu.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/edit.go b/cli/menu/devicemenu/edit.go
index c5c1e8e..137ec10 100644
--- a/cli/menu/devicemenu/edit.go
+++ b/cli/menu/devicemenu/edit.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/eof.go b/cli/menu/devicemenu/eof.go
index 4d752fb..e4309bf 100644
--- a/cli/menu/devicemenu/eof.go
+++ b/cli/menu/devicemenu/eof.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/flows.go b/cli/menu/devicemenu/flows.go
index 9a648b2..b9c3add 100644
--- a/cli/menu/devicemenu/flows.go
+++ b/cli/menu/devicemenu/flows.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/help.go b/cli/menu/devicemenu/help.go
index 340817d..6ebe8d9 100644
--- a/cli/menu/devicemenu/help.go
+++ b/cli/menu/devicemenu/help.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/history.go b/cli/menu/devicemenu/history.go
index b4d2423..009fb16 100644
--- a/cli/menu/devicemenu/history.go
+++ b/cli/menu/devicemenu/history.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/images.go b/cli/menu/devicemenu/images.go
index dc2105e..b859af2 100644
--- a/cli/menu/devicemenu/images.go
+++ b/cli/menu/devicemenu/images.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/img_activate.go b/cli/menu/devicemenu/img_activate.go
index b615c1b..d900401 100644
--- a/cli/menu/devicemenu/img_activate.go
+++ b/cli/menu/devicemenu/img_activate.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/img_dnld_cancel.go b/cli/menu/devicemenu/img_dnld_cancel.go
index cc9881d..7dbffd8 100644
--- a/cli/menu/devicemenu/img_dnld_cancel.go
+++ b/cli/menu/devicemenu/img_dnld_cancel.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/img_dnld_list.go b/cli/menu/devicemenu/img_dnld_list.go
index ea6d31f..06b0e48 100644
--- a/cli/menu/devicemenu/img_dnld_list.go
+++ b/cli/menu/devicemenu/img_dnld_list.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/img_dnld_request.go b/cli/menu/devicemenu/img_dnld_request.go
index 1a0ee98..c2cb739 100644
--- a/cli/menu/devicemenu/img_dnld_request.go
+++ b/cli/menu/devicemenu/img_dnld_request.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/img_dnld_status.go b/cli/menu/devicemenu/img_dnld_status.go
index 2177996..c338524 100644
--- a/cli/menu/devicemenu/img_dnld_status.go
+++ b/cli/menu/devicemenu/img_dnld_status.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/img_revert.go b/cli/menu/devicemenu/img_revert.go
index bac7b37..4d0aaa5 100644
--- a/cli/menu/devicemenu/img_revert.go
+++ b/cli/menu/devicemenu/img_revert.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/list.go b/cli/menu/devicemenu/list.go
index 053fec9..31804e5 100644
--- a/cli/menu/devicemenu/list.go
+++ b/cli/menu/devicemenu/list.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/pause.go b/cli/menu/devicemenu/pause.go
index 6fec137..3c77bfb 100644
--- a/cli/menu/devicemenu/pause.go
+++ b/cli/menu/devicemenu/pause.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/perf_config.go b/cli/menu/devicemenu/perf_config.go
index bdc9ab4..178a17e 100644
--- a/cli/menu/devicemenu/perf_config.go
+++ b/cli/menu/devicemenu/perf_config.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/ports.go b/cli/menu/devicemenu/ports.go
index 4b1aefb..2d9f276 100644
--- a/cli/menu/devicemenu/ports.go
+++ b/cli/menu/devicemenu/ports.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/py.go b/cli/menu/devicemenu/py.go
index 8a489fd..8667ed3 100644
--- a/cli/menu/devicemenu/py.go
+++ b/cli/menu/devicemenu/py.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/run.go b/cli/menu/devicemenu/run.go
index 081fddb..b8b780b 100644
--- a/cli/menu/devicemenu/run.go
+++ b/cli/menu/devicemenu/run.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/save.go b/cli/menu/devicemenu/save.go
index ba4ef89..dd531da 100644
--- a/cli/menu/devicemenu/save.go
+++ b/cli/menu/devicemenu/save.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/set.go b/cli/menu/devicemenu/set.go
index c8d8a15..c44e164 100644
--- a/cli/menu/devicemenu/set.go
+++ b/cli/menu/devicemenu/set.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/shell.go b/cli/menu/devicemenu/shell.go
index cffbcbd..ddfd8f8 100644
--- a/cli/menu/devicemenu/shell.go
+++ b/cli/menu/devicemenu/shell.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/shortcuts.go b/cli/menu/devicemenu/shortcuts.go
index 14596d0..0e3d120 100644
--- a/cli/menu/devicemenu/shortcuts.go
+++ b/cli/menu/devicemenu/shortcuts.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/devicemenu/show.go b/cli/menu/devicemenu/show.go
index b9e22bd..2e71fe0 100644
--- a/cli/menu/devicemenu/show.go
+++ b/cli/menu/devicemenu/show.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package devicemenu
diff --git a/cli/menu/mainmenu/alarmFilters.go b/cli/menu/mainmenu/alarmFilters.go
index 4a1b274..4473a87 100644
--- a/cli/menu/mainmenu/alarmFilters.go
+++ b/cli/menu/mainmenu/alarmFilters.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/arriveOnus.go b/cli/menu/mainmenu/arriveOnus.go
index 75e6263..06b699d 100644
--- a/cli/menu/mainmenu/arriveOnus.go
+++ b/cli/menu/mainmenu/arriveOnus.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/cmdEnvironment.go b/cli/menu/mainmenu/cmdEnvironment.go
index 0342e87..d3a54fe 100644
--- a/cli/menu/mainmenu/cmdEnvironment.go
+++ b/cli/menu/mainmenu/cmdEnvironment.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/delete.go b/cli/menu/mainmenu/delete.go
index 9b9f49b..c8d4226 100644
--- a/cli/menu/mainmenu/delete.go
+++ b/cli/menu/mainmenu/delete.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/deleteAllFlows.go b/cli/menu/mainmenu/deleteAllFlows.go
index 9888fa9..bf13cfb 100644
--- a/cli/menu/mainmenu/deleteAllFlows.go
+++ b/cli/menu/mainmenu/deleteAllFlows.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/device.go b/cli/menu/mainmenu/device.go
index 45b35a9..89a7b24 100644
--- a/cli/menu/mainmenu/device.go
+++ b/cli/menu/mainmenu/device.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/devices.go b/cli/menu/mainmenu/devices.go
index f5ef716..d40de7d 100644
--- a/cli/menu/mainmenu/devices.go
+++ b/cli/menu/mainmenu/devices.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/disable.go b/cli/menu/mainmenu/disable.go
index 45d86af..f0bfb0f 100644
--- a/cli/menu/mainmenu/disable.go
+++ b/cli/menu/mainmenu/disable.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/enable.go b/cli/menu/mainmenu/enable.go
index 3068c72..691407d 100644
--- a/cli/menu/mainmenu/enable.go
+++ b/cli/menu/mainmenu/enable.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/health.go b/cli/menu/mainmenu/health.go
index 562ff9b..b6ca362 100644
--- a/cli/menu/mainmenu/health.go
+++ b/cli/menu/mainmenu/health.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/injectEapolStart.go b/cli/menu/mainmenu/injectEapolStart.go
index 8015013..c71e958 100644
--- a/cli/menu/mainmenu/injectEapolStart.go
+++ b/cli/menu/mainmenu/injectEapolStart.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/installAllControllerBoundFlows.go b/cli/menu/mainmenu/installAllControllerBoundFlows.go
index c994831..6f394d7 100644
--- a/cli/menu/mainmenu/installAllControllerBoundFlows.go
+++ b/cli/menu/mainmenu/installAllControllerBoundFlows.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/installAllSampleFlows.go b/cli/menu/mainmenu/installAllSampleFlows.go
index a9d08e4..fedb7e3 100644
--- a/cli/menu/mainmenu/installAllSampleFlows.go
+++ b/cli/menu/mainmenu/installAllSampleFlows.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/installDhcpFlows.go b/cli/menu/mainmenu/installDhcpFlows.go
index 799867d..3b71b35 100644
--- a/cli/menu/mainmenu/installDhcpFlows.go
+++ b/cli/menu/mainmenu/installDhcpFlows.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/installEapolFlow.go b/cli/menu/mainmenu/installEapolFlow.go
index 2a76bae..5c68d98 100644
--- a/cli/menu/mainmenu/installEapolFlow.go
+++ b/cli/menu/mainmenu/installEapolFlow.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/launch.go b/cli/menu/mainmenu/launch.go
index 818e5a4..ec5a40b 100644
--- a/cli/menu/mainmenu/launch.go
+++ b/cli/menu/mainmenu/launch.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/load.go b/cli/menu/mainmenu/load.go
index 20fd7c7..8b462b0 100644
--- a/cli/menu/mainmenu/load.go
+++ b/cli/menu/mainmenu/load.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/log.go b/cli/menu/mainmenu/log.go
index d94c4cf..7f5a9bd 100644
--- a/cli/menu/mainmenu/log.go
+++ b/cli/menu/mainmenu/log.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/logicalDevice.go b/cli/menu/mainmenu/logicalDevice.go
index 9e306ec..2ba2874 100644
--- a/cli/menu/mainmenu/logicalDevice.go
+++ b/cli/menu/mainmenu/logicalDevice.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/logicalDevices.go b/cli/menu/mainmenu/logicalDevices.go
index 5d6f2fc..a178366 100644
--- a/cli/menu/mainmenu/logicalDevices.go
+++ b/cli/menu/mainmenu/logicalDevices.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/mainMenu.go b/cli/menu/mainmenu/mainMenu.go
index 3d57892..8d45917 100644
--- a/cli/menu/mainmenu/mainMenu.go
+++ b/cli/menu/mainmenu/mainMenu.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/omci.go b/cli/menu/mainmenu/omci.go
index 98d3328..d648960 100644
--- a/cli/menu/mainmenu/omci.go
+++ b/cli/menu/mainmenu/omci.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/pdb.go b/cli/menu/mainmenu/pdb.go
index 32b9035..ec44436 100644
--- a/cli/menu/mainmenu/pdb.go
+++ b/cli/menu/mainmenu/pdb.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/preprovisionOlt.go b/cli/menu/mainmenu/preprovisionOlt.go
index f185b14..effa8ed 100644
--- a/cli/menu/mainmenu/preprovisionOlt.go
+++ b/cli/menu/mainmenu/preprovisionOlt.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/reboot.go b/cli/menu/mainmenu/reboot.go
index a0a4b12..7dcbd6e 100644
--- a/cli/menu/mainmenu/reboot.go
+++ b/cli/menu/mainmenu/reboot.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/relativeLoad.go b/cli/menu/mainmenu/relativeLoad.go
index ab492b4..1416e26 100644
--- a/cli/menu/mainmenu/relativeLoad.go
+++ b/cli/menu/mainmenu/relativeLoad.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/resetHistory.go b/cli/menu/mainmenu/resetHistory.go
index 61187a5..3fb86cd 100644
--- a/cli/menu/mainmenu/resetHistory.go
+++ b/cli/menu/mainmenu/resetHistory.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/restart.go b/cli/menu/mainmenu/restart.go
index 62ce7e1..a014f55 100644
--- a/cli/menu/mainmenu/restart.go
+++ b/cli/menu/mainmenu/restart.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/selfTest.go b/cli/menu/mainmenu/selfTest.go
index f25e6fb..3ef6675 100644
--- a/cli/menu/mainmenu/selfTest.go
+++ b/cli/menu/mainmenu/selfTest.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/sendSimulatedUpstreamEapol.go b/cli/menu/mainmenu/sendSimulatedUpstreamEapol.go
index c82b0df..22b1e47 100644
--- a/cli/menu/mainmenu/sendSimulatedUpstreamEapol.go
+++ b/cli/menu/mainmenu/sendSimulatedUpstreamEapol.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/test.go b/cli/menu/mainmenu/test.go
index 96e3f0a..768b0c2 100644
--- a/cli/menu/mainmenu/test.go
+++ b/cli/menu/mainmenu/test.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/menu/mainmenu/version.go b/cli/menu/mainmenu/version.go
index 7464c1b..20c30a4 100644
--- a/cli/menu/mainmenu/version.go
+++ b/cli/menu/mainmenu/version.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package mainmenu
diff --git a/cli/util/menuProcessor.go b/cli/util/menuProcessor.go
index c9ba837..52bf6bb 100644
--- a/cli/util/menuProcessor.go
+++ b/cli/util/menuProcessor.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package util
diff --git a/cli/util/parseCmd.go b/cli/util/parseCmd.go
index 06ad8b8..8f9d13d 100644
--- a/cli/util/parseCmd.go
+++ b/cli/util/parseCmd.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package util
diff --git a/cli/util/tableGen.go b/cli/util/tableGen.go
index 3b07ef0..8ead5fd 100644
--- a/cli/util/tableGen.go
+++ b/cli/util/tableGen.go
@@ -1,18 +1,18 @@
/*
- * Copyright 2018-present Open Networking Foundation
+ * Copyright 2018-present Open Networking Foundation
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
-*/
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
package util
diff --git a/common/log/log.go b/common/log/log.go
index 16fed74..33100dc 100644
--- a/common/log/log.go
+++ b/common/log/log.go
@@ -179,7 +179,6 @@
return ErrorLevel
}
-
func getDefaultConfig(outputType string, level int, defaultFields Fields) zp.Config {
return zp.Config{
Level: intToAtomicLevel(level),
diff --git a/common/ponresourcemanager/ponresourcemanager.go b/common/ponresourcemanager/ponresourcemanager.go
index 2a3cae6..c37307b 100755
--- a/common/ponresourcemanager/ponresourcemanager.go
+++ b/common/ponresourcemanager/ponresourcemanager.go
@@ -17,184 +17,184 @@
package ponresourcemanager
import (
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
- bitmap "github.com/boljen/go-bitmap"
- "github.com/opencord/voltha-go/common/log"
- "github.com/opencord/voltha-go/db/kvstore"
- "github.com/opencord/voltha-go/db/model"
- tp "github.com/opencord/voltha-go/common/techprofile"
+ bitmap "github.com/boljen/go-bitmap"
+ "github.com/opencord/voltha-go/common/log"
+ tp "github.com/opencord/voltha-go/common/techprofile"
+ "github.com/opencord/voltha-go/db/kvstore"
+ "github.com/opencord/voltha-go/db/model"
)
const (
- //Constants to identify resource pool
- UNI_ID = "UNI_ID"
- ONU_ID = "ONU_ID"
- ALLOC_ID = "ALLOC_ID"
- GEMPORT_ID = "GEMPORT_ID"
- FLOW_ID = "FLOW_ID"
+ //Constants to identify resource pool
+ UNI_ID = "UNI_ID"
+ ONU_ID = "ONU_ID"
+ ALLOC_ID = "ALLOC_ID"
+ GEMPORT_ID = "GEMPORT_ID"
+ FLOW_ID = "FLOW_ID"
- //Constants for passing command line arugments
- OLT_MODEL_ARG = "--olt_model"
- PATH_PREFIX = "service/voltha/resource_manager/{%s}"
- /*The resource ranges for a given device model should be placed
- at 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
- path on the KV store.
- If Resource Range parameters are to be read from the external KV store,
- they are expected to be stored in the following format.
- Note: All parameters are MANDATORY for now.
- constants used as keys to reference the resource range parameters from
- and external KV store.
- */
- UNI_ID_START_IDX = "uni_id_start"
- UNI_ID_END_IDX = "uni_id_end"
- ONU_ID_START_IDX = "onu_id_start"
- ONU_ID_END_IDX = "onu_id_end"
- ONU_ID_SHARED_IDX = "onu_id_shared"
- ALLOC_ID_START_IDX = "alloc_id_start"
- ALLOC_ID_END_IDX = "alloc_id_end"
- ALLOC_ID_SHARED_IDX = "alloc_id_shared"
- GEMPORT_ID_START_IDX = "gemport_id_start"
- GEMPORT_ID_END_IDX = "gemport_id_end"
- GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
- FLOW_ID_START_IDX = "flow_id_start"
- FLOW_ID_END_IDX = "flow_id_end"
- FLOW_ID_SHARED_IDX = "flow_id_shared"
- NUM_OF_PON_PORT = "pon_ports"
+ //Constants for passing command line arugments
+ OLT_MODEL_ARG = "--olt_model"
+ PATH_PREFIX = "service/voltha/resource_manager/{%s}"
+ /*The resource ranges for a given device model should be placed
+ at 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
+ path on the KV store.
+ If Resource Range parameters are to be read from the external KV store,
+ they are expected to be stored in the following format.
+ Note: All parameters are MANDATORY for now.
+ constants used as keys to reference the resource range parameters from
+ and external KV store.
+ */
+ UNI_ID_START_IDX = "uni_id_start"
+ UNI_ID_END_IDX = "uni_id_end"
+ ONU_ID_START_IDX = "onu_id_start"
+ ONU_ID_END_IDX = "onu_id_end"
+ ONU_ID_SHARED_IDX = "onu_id_shared"
+ ALLOC_ID_START_IDX = "alloc_id_start"
+ ALLOC_ID_END_IDX = "alloc_id_end"
+ ALLOC_ID_SHARED_IDX = "alloc_id_shared"
+ GEMPORT_ID_START_IDX = "gemport_id_start"
+ GEMPORT_ID_END_IDX = "gemport_id_end"
+ GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
+ FLOW_ID_START_IDX = "flow_id_start"
+ FLOW_ID_END_IDX = "flow_id_end"
+ FLOW_ID_SHARED_IDX = "flow_id_shared"
+ NUM_OF_PON_PORT = "pon_ports"
- /*
- The KV store backend is initialized with a path prefix and we need to
- provide only the suffix.
- */
- PON_RESOURCE_RANGE_CONFIG_PATH = "resource_ranges/%s"
+ /*
+ The KV store backend is initialized with a path prefix and we need to
+ provide only the suffix.
+ */
+ PON_RESOURCE_RANGE_CONFIG_PATH = "resource_ranges/%s"
- //resource path suffix
- //Path on the KV store for storing alloc id ranges and resource pool for a given interface
- //Format: <device_id>/alloc_id_pool/<pon_intf_id>
- ALLOC_ID_POOL_PATH = "{%s}/alloc_id_pool/{%d}"
- //Path on the KV store for storing gemport id ranges and resource pool for a given interface
- //Format: <device_id>/gemport_id_pool/<pon_intf_id>
- GEMPORT_ID_POOL_PATH = "{%s}/gemport_id_pool/{%d}"
- //Path on the KV store for storing onu id ranges and resource pool for a given interface
- //Format: <device_id>/onu_id_pool/<pon_intf_id>
- ONU_ID_POOL_PATH = "{%s}/onu_id_pool/{%d}"
- //Path on the KV store for storing flow id ranges and resource pool for a given interface
- //Format: <device_id>/flow_id_pool/<pon_intf_id>
- FLOW_ID_POOL_PATH = "{%s}/flow_id_pool/{%d}"
+ //resource path suffix
+ //Path on the KV store for storing alloc id ranges and resource pool for a given interface
+ //Format: <device_id>/alloc_id_pool/<pon_intf_id>
+ ALLOC_ID_POOL_PATH = "{%s}/alloc_id_pool/{%d}"
+ //Path on the KV store for storing gemport id ranges and resource pool for a given interface
+ //Format: <device_id>/gemport_id_pool/<pon_intf_id>
+ GEMPORT_ID_POOL_PATH = "{%s}/gemport_id_pool/{%d}"
+ //Path on the KV store for storing onu id ranges and resource pool for a given interface
+ //Format: <device_id>/onu_id_pool/<pon_intf_id>
+ ONU_ID_POOL_PATH = "{%s}/onu_id_pool/{%d}"
+ //Path on the KV store for storing flow id ranges and resource pool for a given interface
+ //Format: <device_id>/flow_id_pool/<pon_intf_id>
+ FLOW_ID_POOL_PATH = "{%s}/flow_id_pool/{%d}"
- //Path on the KV store for storing list of alloc IDs for a given ONU
- //Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
- ALLOC_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/alloc_ids"
+ //Path on the KV store for storing list of alloc IDs for a given ONU
+ //Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
+ ALLOC_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/alloc_ids"
- //Path on the KV store for storing list of gemport IDs for a given ONU
- //Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
- GEMPORT_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/gemport_ids"
+ //Path on the KV store for storing list of gemport IDs for a given ONU
+ //Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
+ GEMPORT_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/gemport_ids"
- //Path on the KV store for storing list of Flow IDs for a given ONU
- //Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
- FLOW_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/flow_ids"
+ //Path on the KV store for storing list of Flow IDs for a given ONU
+ //Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
+ FLOW_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/flow_ids"
- //Flow Id info: Use to store more metadata associated with the flow_id
- //Format: <device_id>/<(pon_intf_id, onu_id)>/flow_id_info/<flow_id>
- FLOW_ID_INFO_PATH = "{%s}/{%s}/flow_id_info/{%d}"
+ //Flow Id info: Use to store more metadata associated with the flow_id
+ //Format: <device_id>/<(pon_intf_id, onu_id)>/flow_id_info/<flow_id>
+ FLOW_ID_INFO_PATH = "{%s}/{%s}/flow_id_info/{%d}"
- //Constants for internal usage.
- PON_INTF_ID = "pon_intf_id"
- START_IDX = "start_idx"
- END_IDX = "end_idx"
- POOL = "pool"
- NUM_OF_PON_INTF = 16
+ //Constants for internal usage.
+ PON_INTF_ID = "pon_intf_id"
+ START_IDX = "start_idx"
+ END_IDX = "end_idx"
+ POOL = "pool"
+ NUM_OF_PON_INTF = 16
- KVSTORE_RETRY_TIMEOUT = 5
+ KVSTORE_RETRY_TIMEOUT = 5
)
//type ResourceTypeIndex string
//type ResourceType string
type PONResourceManager struct {
- //Implements APIs to initialize/allocate/release alloc/gemport/onu IDs.
- Technology string
- DeviceType string
- DeviceID string
- Backend string // ETCD, or consul
- Host string // host ip of the KV store
- Port int // port number for the KV store
- OLTModel string
- KVStore *model.Backend
- TechProfileMgr *tp.TechProfileMgr
+ //Implements APIs to initialize/allocate/release alloc/gemport/onu IDs.
+ Technology string
+ DeviceType string
+ DeviceID string
+ Backend string // ETCD, or consul
+ Host string // host ip of the KV store
+ Port int // port number for the KV store
+ OLTModel string
+ KVStore *model.Backend
+ TechProfileMgr *tp.TechProfileMgr
- // Below attribute, pon_resource_ranges, should be initialized
- // by reading from KV store.
- PonResourceRanges map[string]interface{}
- SharedResourceMgrs map[string]*PONResourceManager
- SharedIdxByType map[string]string
- IntfIDs []uint32 // list of pon interface IDs
+ // Below attribute, pon_resource_ranges, should be initialized
+ // by reading from KV store.
+ PonResourceRanges map[string]interface{}
+ SharedResourceMgrs map[string]*PONResourceManager
+ SharedIdxByType map[string]string
+ IntfIDs []uint32 // list of pon interface IDs
}
func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
- log.Infow("kv-store-type", log.Fields{"store": storeType})
- switch storeType {
- case "consul":
- return kvstore.NewConsulClient(address, timeout)
- case "etcd":
- return kvstore.NewEtcdClient(address, timeout)
- }
- return nil, errors.New("unsupported-kv-store")
+ log.Infow("kv-store-type", log.Fields{"store": storeType})
+ switch storeType {
+ case "consul":
+ return kvstore.NewConsulClient(address, timeout)
+ case "etcd":
+ return kvstore.NewEtcdClient(address, timeout)
+ }
+ return nil, errors.New("unsupported-kv-store")
}
func SetKVClient(Technology string, Backend string, Host string, Port int) *model.Backend {
- addr := Host + ":" + strconv.Itoa(Port)
- // TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
- // issue between kv store and backend , core is not calling NewBackend directly
- kvClient, err := newKVClient(Backend, addr, KVSTORE_RETRY_TIMEOUT)
- if err != nil {
- log.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
- return nil
- }
- kvbackend := &model.Backend{
- Client: kvClient,
- StoreType: Backend,
- Host: Host,
- Port: Port,
- Timeout: KVSTORE_RETRY_TIMEOUT,
- PathPrefix: fmt.Sprintf(PATH_PREFIX, Technology)}
+ addr := Host + ":" + strconv.Itoa(Port)
+ // TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+ // issue between kv store and backend , core is not calling NewBackend directly
+ kvClient, err := newKVClient(Backend, addr, KVSTORE_RETRY_TIMEOUT)
+ if err != nil {
+ log.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
+ return nil
+ }
+ kvbackend := &model.Backend{
+ Client: kvClient,
+ StoreType: Backend,
+ Host: Host,
+ Port: Port,
+ Timeout: KVSTORE_RETRY_TIMEOUT,
+ PathPrefix: fmt.Sprintf(PATH_PREFIX, Technology)}
- return kvbackend
+ return kvbackend
}
// NewPONResourceManager creates a new PON resource manager.
func NewPONResourceManager(Technology string, DeviceType string, DeviceID string, Backend string, Host string, Port int) (*PONResourceManager, error) {
- var PONMgr PONResourceManager
- PONMgr.Technology = Technology
- PONMgr.DeviceType = DeviceType
- PONMgr.DeviceID = DeviceID
- PONMgr.Backend = Backend
- PONMgr.Host = Host
- PONMgr.Port = Port
- PONMgr.KVStore = SetKVClient(Technology, Backend, Host, Port)
- if PONMgr.KVStore == nil {
- log.Error("KV Client initilization failed")
- return nil, errors.New("Failed to init KV client")
- }
- // Initialize techprofile for this technology
- if PONMgr.TechProfileMgr,_ = tp.NewTechProfile(&PONMgr);PONMgr.TechProfileMgr == nil{
- log.Error("Techprofile initialization failed")
- return nil,errors.New("Failed to init tech profile")
- }
- PONMgr.PonResourceRanges = make(map[string]interface{})
- PONMgr.SharedResourceMgrs = make(map[string]*PONResourceManager)
- PONMgr.SharedIdxByType = make(map[string]string)
- PONMgr.SharedIdxByType[ONU_ID] = ONU_ID_SHARED_IDX
- PONMgr.SharedIdxByType[ALLOC_ID] = ALLOC_ID_SHARED_IDX
- PONMgr.SharedIdxByType[GEMPORT_ID] = GEMPORT_ID_SHARED_IDX
- PONMgr.SharedIdxByType[FLOW_ID] = FLOW_ID_SHARED_IDX
- PONMgr.IntfIDs = make([]uint32, NUM_OF_PON_INTF)
- PONMgr.OLTModel = DeviceType
- return &PONMgr, nil
+ var PONMgr PONResourceManager
+ PONMgr.Technology = Technology
+ PONMgr.DeviceType = DeviceType
+ PONMgr.DeviceID = DeviceID
+ PONMgr.Backend = Backend
+ PONMgr.Host = Host
+ PONMgr.Port = Port
+ PONMgr.KVStore = SetKVClient(Technology, Backend, Host, Port)
+ if PONMgr.KVStore == nil {
+ log.Error("KV Client initilization failed")
+ return nil, errors.New("Failed to init KV client")
+ }
+ // Initialize techprofile for this technology
+ if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(&PONMgr); PONMgr.TechProfileMgr == nil {
+ log.Error("Techprofile initialization failed")
+ return nil, errors.New("Failed to init tech profile")
+ }
+ PONMgr.PonResourceRanges = make(map[string]interface{})
+ PONMgr.SharedResourceMgrs = make(map[string]*PONResourceManager)
+ PONMgr.SharedIdxByType = make(map[string]string)
+ PONMgr.SharedIdxByType[ONU_ID] = ONU_ID_SHARED_IDX
+ PONMgr.SharedIdxByType[ALLOC_ID] = ALLOC_ID_SHARED_IDX
+ PONMgr.SharedIdxByType[GEMPORT_ID] = GEMPORT_ID_SHARED_IDX
+ PONMgr.SharedIdxByType[FLOW_ID] = FLOW_ID_SHARED_IDX
+ PONMgr.IntfIDs = make([]uint32, NUM_OF_PON_INTF)
+ PONMgr.OLTModel = DeviceType
+ return &PONMgr, nil
}
/*
@@ -205,909 +205,907 @@
*/
func (PONRMgr *PONResourceManager) InitResourceRangesFromKVStore() bool {
- //Initialize PON resource ranges with config fetched from kv store.
- //:return boolean: True if PON resource ranges initialized else false
- // Try to initialize the PON Resource Ranges from KV store based on the
- // OLT model key, if available
- if PONRMgr.OLTModel == "" {
- log.Error("Failed to get OLT model")
- return false
- }
- Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
- //get resource from kv store
- Result, err := PONRMgr.KVStore.Get(Path)
- if err != nil {
- log.Debugf("Error in fetching resource %s from KV strore", Path)
- return false
- }
- if Result == nil {
- log.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
- return false
- }
- //update internal ranges from kv ranges. If there are missing
- // values in the KV profile, continue to use the defaults
- Value, err := ToByte(Result.Value)
- if err != nil {
- log.Error("Failed to convert kvpair to byte string")
- return false
- }
- if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
- log.Error("Failed to Unmarshal json byte")
- return false
- }
- log.Debug("Init resource ranges from kvstore success")
- return true
+ //Initialize PON resource ranges with config fetched from kv store.
+ //:return boolean: True if PON resource ranges initialized else false
+ // Try to initialize the PON Resource Ranges from KV store based on the
+ // OLT model key, if available
+ if PONRMgr.OLTModel == "" {
+ log.Error("Failed to get OLT model")
+ return false
+ }
+ Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
+ //get resource from kv store
+ Result, err := PONRMgr.KVStore.Get(Path)
+ if err != nil {
+ log.Debugf("Error in fetching resource %s from KV strore", Path)
+ return false
+ }
+ if Result == nil {
+ log.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
+ return false
+ }
+ //update internal ranges from kv ranges. If there are missing
+ // values in the KV profile, continue to use the defaults
+ Value, err := ToByte(Result.Value)
+ if err != nil {
+ log.Error("Failed to convert kvpair to byte string")
+ return false
+ }
+ if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
+ log.Error("Failed to Unmarshal json byte")
+ return false
+ }
+ log.Debug("Init resource ranges from kvstore success")
+ return true
}
func (PONRMgr *PONResourceManager) UpdateRanges(StartIDx string, StartID uint32, EndIDx string, EndID uint32,
- SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
- /*
- Update the ranges for all reosurce type in the intermnal maps
- param: resource type start index
- param: start ID
- param: resource type end index
- param: end ID
- param: resource type shared index
- param: shared pool id
- param: global resource manager
- */
- log.Debugf("update ranges for %s, %d", StartIDx, StartID)
+ SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
+ /*
+ Update the ranges for all reosurce type in the intermnal maps
+ param: resource type start index
+ param: start ID
+ param: resource type end index
+ param: end ID
+ param: resource type shared index
+ param: shared pool id
+ param: global resource manager
+ */
+ log.Debugf("update ranges for %s, %d", StartIDx, StartID)
- if StartID != 0 {
- PONRMgr.PonResourceRanges[StartIDx] = StartID
- }
- if EndID != 0 {
- PONRMgr.PonResourceRanges[EndIDx] = EndID
- }
- //if SharedPoolID != 0 {
- PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
- //}
- if RMgr != nil {
- PONRMgr.SharedResourceMgrs[SharedIDx] = RMgr
- }
+ if StartID != 0 {
+ PONRMgr.PonResourceRanges[StartIDx] = StartID
+ }
+ if EndID != 0 {
+ PONRMgr.PonResourceRanges[EndIDx] = EndID
+ }
+ //if SharedPoolID != 0 {
+ PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
+ //}
+ if RMgr != nil {
+ PONRMgr.SharedResourceMgrs[SharedIDx] = RMgr
+ }
}
func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ONUIDStart uint32,
- ONUIDEnd uint32,
- ONUIDSharedPoolID uint32,
- AllocIDStart uint32,
- AllocIDEnd uint32,
- AllocIDSharedPoolID uint32,
- GEMPortIDStart uint32,
- GEMPortIDEnd uint32,
- GEMPortIDSharedPoolID uint32,
- FlowIDStart uint32,
- FlowIDEnd uint32,
- FlowIDSharedPoolID uint32,
- UNIIDStart uint32,
- UNIIDEnd uint32,
- NoOfPONPorts uint32,
- IntfIDs []uint32) bool {
+ ONUIDEnd uint32,
+ ONUIDSharedPoolID uint32,
+ AllocIDStart uint32,
+ AllocIDEnd uint32,
+ AllocIDSharedPoolID uint32,
+ GEMPortIDStart uint32,
+ GEMPortIDEnd uint32,
+ GEMPortIDSharedPoolID uint32,
+ FlowIDStart uint32,
+ FlowIDEnd uint32,
+ FlowIDSharedPoolID uint32,
+ UNIIDStart uint32,
+ UNIIDEnd uint32,
+ NoOfPONPorts uint32,
+ IntfIDs []uint32) bool {
- /*Initialize default PON resource ranges
+ /*Initialize default PON resource ranges
- :param onu_id_start_idx: onu id start index
- :param onu_id_end_idx: onu id end index
- :param onu_id_shared_pool_id: pool idx for id shared by all intfs or None for no sharing
- :param alloc_id_start_idx: alloc id start index
- :param alloc_id_end_idx: alloc id end index
- :param alloc_id_shared_pool_id: pool idx for alloc id shared by all intfs or None for no sharing
- :param gemport_id_start_idx: gemport id start index
- :param gemport_id_end_idx: gemport id end index
- :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
- :param flow_id_start_idx: flow id start index
- :param flow_id_end_idx: flow id end index
- :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
- :param num_of_pon_ports: number of PON ports
- :param intf_ids: interfaces serviced by this manager
- */
- PONRMgr.UpdateRanges(ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
- PONRMgr.UpdateRanges(ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
- PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
- PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
- PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
- log.Debug("Initialize default range values")
- var i uint32
- if IntfIDs == nil {
- for i = 0; i < NoOfPONPorts; i++ {
- PONRMgr.IntfIDs = append(PONRMgr.IntfIDs, i)
- }
- } else {
- PONRMgr.IntfIDs = IntfIDs
- }
- return true
+ :param onu_id_start_idx: onu id start index
+ :param onu_id_end_idx: onu id end index
+ :param onu_id_shared_pool_id: pool idx for id shared by all intfs or None for no sharing
+ :param alloc_id_start_idx: alloc id start index
+ :param alloc_id_end_idx: alloc id end index
+ :param alloc_id_shared_pool_id: pool idx for alloc id shared by all intfs or None for no sharing
+ :param gemport_id_start_idx: gemport id start index
+ :param gemport_id_end_idx: gemport id end index
+ :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
+ :param flow_id_start_idx: flow id start index
+ :param flow_id_end_idx: flow id end index
+ :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
+ :param num_of_pon_ports: number of PON ports
+ :param intf_ids: interfaces serviced by this manager
+ */
+ PONRMgr.UpdateRanges(ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
+ PONRMgr.UpdateRanges(ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
+ PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
+ PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
+ PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
+ log.Debug("Initialize default range values")
+ var i uint32
+ if IntfIDs == nil {
+ for i = 0; i < NoOfPONPorts; i++ {
+ PONRMgr.IntfIDs = append(PONRMgr.IntfIDs, i)
+ }
+ } else {
+ PONRMgr.IntfIDs = IntfIDs
+ }
+ return true
}
func (PONRMgr *PONResourceManager) InitDeviceResourcePool() error {
- //Initialize resource pool for all PON ports.
+ //Initialize resource pool for all PON ports.
- log.Debug("Init resource ranges")
+ log.Debug("Init resource ranges")
- var err error
- for _, Intf := range PONRMgr.IntfIDs {
- SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
- if SharedPoolID != 0 {
- Intf = SharedPoolID
- }
- if err = PONRMgr.InitResourceIDPool(Intf, ONU_ID,
- PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
- PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
- log.Error("Failed to init ONU ID resource pool")
- return err
- }
- if SharedPoolID != 0 {
- break
- }
- }
+ var err error
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if err = PONRMgr.InitResourceIDPool(Intf, ONU_ID,
+ PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
+ log.Error("Failed to init ONU ID resource pool")
+ return err
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
- for _, Intf := range PONRMgr.IntfIDs {
- SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
- if SharedPoolID != 0 {
- Intf = SharedPoolID
- }
- if err = PONRMgr.InitResourceIDPool(Intf, ALLOC_ID,
- PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
- PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
- log.Error("Failed to init ALLOC ID resource pool ")
- return err
- }
- if SharedPoolID != 0 {
- break
- }
- }
- for _, Intf := range PONRMgr.IntfIDs {
- SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
- if SharedPoolID != 0 {
- Intf = SharedPoolID
- }
- if err = PONRMgr.InitResourceIDPool(Intf, GEMPORT_ID,
- PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
- PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
- log.Error("Failed to init GEMPORT ID resource pool")
- return err
- }
- if SharedPoolID != 0 {
- break
- }
- }
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if err = PONRMgr.InitResourceIDPool(Intf, ALLOC_ID,
+ PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
+ log.Error("Failed to init ALLOC ID resource pool ")
+ return err
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if err = PONRMgr.InitResourceIDPool(Intf, GEMPORT_ID,
+ PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
+ log.Error("Failed to init GEMPORT ID resource pool")
+ return err
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
- for _, Intf := range PONRMgr.IntfIDs {
- SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
- if SharedPoolID != 0 {
- Intf = SharedPoolID
- }
- if err = PONRMgr.InitResourceIDPool(Intf, FLOW_ID,
- PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
- PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
- log.Error("Failed to init FLOW ID resource pool")
- return err
- }
- if SharedPoolID != 0 {
- break
- }
- }
- return err
+ for _, Intf := range PONRMgr.IntfIDs {
+ SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+ if SharedPoolID != 0 {
+ Intf = SharedPoolID
+ }
+ if err = PONRMgr.InitResourceIDPool(Intf, FLOW_ID,
+ PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
+ log.Error("Failed to init FLOW ID resource pool")
+ return err
+ }
+ if SharedPoolID != 0 {
+ break
+ }
+ }
+ return err
}
func (PONRMgr *PONResourceManager) InitResourceIDPool(Intf uint32, ResourceType string, StartID uint32, EndID uint32) error {
- /*Initialize Resource ID pool for a given Resource Type on a given PON Port
+ /*Initialize Resource ID pool for a given Resource Type on a given PON Port
- :param pon_intf_id: OLT PON interface id
- :param resource_type: String to identify type of resource
- :param start_idx: start index for onu id pool
- :param end_idx: end index for onu id pool
- :return boolean: True if resource id pool initialized else false
- */
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :param start_idx: start index for onu id pool
+ :param end_idx: end index for onu id pool
+ :return boolean: True if resource id pool initialized else false
+ */
- // delegate to the master instance if sharing enabled across instances
- SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
- if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
- return SharedResourceMgr.InitResourceIDPool(Intf, ResourceType, StartID, EndID)
- }
+ // delegate to the master instance if sharing enabled across instances
+ SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+ if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+ return SharedResourceMgr.InitResourceIDPool(Intf, ResourceType, StartID, EndID)
+ }
- Path := PONRMgr.GetPath(Intf, ResourceType)
- if Path == "" {
- log.Errorf("Failed to get path for resource type %s", ResourceType)
- return errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
- }
+ Path := PONRMgr.GetPath(Intf, ResourceType)
+ if Path == "" {
+ log.Errorf("Failed to get path for resource type %s", ResourceType)
+ return errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
+ }
- //In case of adapter reboot and reconciliation resource in kv store
- //checked for its presence if not kv store update happens
- Res, err := PONRMgr.GetResource(Path)
- if (err == nil) && (Res != nil) {
- log.Debugf("Resource %s already present in store ", Path)
- return nil
- } else {
- FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID)
- if err != nil {
- log.Errorf("Failed to format resource")
- return err
- }
- // Add resource as json in kv store.
- err = PONRMgr.KVStore.Put(Path, FormatResult)
- if err == nil {
- log.Debug("Successfuly posted to kv store")
- return err
- }
- }
+ //In case of adapter reboot and reconciliation resource in kv store
+ //checked for its presence if not kv store update happens
+ Res, err := PONRMgr.GetResource(Path)
+ if (err == nil) && (Res != nil) {
+ log.Debugf("Resource %s already present in store ", Path)
+ return nil
+ } else {
+ FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID)
+ if err != nil {
+ log.Errorf("Failed to format resource")
+ return err
+ }
+ // Add resource as json in kv store.
+ err = PONRMgr.KVStore.Put(Path, FormatResult)
+ if err == nil {
+ log.Debug("Successfuly posted to kv store")
+ return err
+ }
+ }
- log.Debug("Error initializing pool")
+ log.Debug("Error initializing pool")
- return err
+ return err
}
func (PONRMgr *PONResourceManager) FormatResource(IntfID uint32, StartIDx uint32, EndIDx uint32) ([]byte, error) {
- /*
- Format resource as json.
- :param pon_intf_id: OLT PON interface id
- :param start_idx: start index for id pool
- :param end_idx: end index for id pool
- :return dictionary: resource formatted as map
- */
- // Format resource as json to be stored in backend store
- Resource := make(map[string]interface{})
- Resource[PON_INTF_ID] = IntfID
- Resource[START_IDX] = StartIDx
- Resource[END_IDX] = EndIDx
- /*
- Resource pool stored in backend store as binary string.
- Tracking the resource allocation will be done by setting the bits \
- in the byte array. The index set will be the resource number allocated.
- */
- var TSData *bitmap.Threadsafe
- if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
- log.Error("Failed to create a bitmap")
- return nil, errors.New("Failed to create bitmap")
- }
- Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
+ /*
+ Format resource as json.
+ :param pon_intf_id: OLT PON interface id
+ :param start_idx: start index for id pool
+ :param end_idx: end index for id pool
+ :return dictionary: resource formatted as map
+ */
+ // Format resource as json to be stored in backend store
+ Resource := make(map[string]interface{})
+ Resource[PON_INTF_ID] = IntfID
+ Resource[START_IDX] = StartIDx
+ Resource[END_IDX] = EndIDx
+ /*
+ Resource pool stored in backend store as binary string.
+ Tracking the resource allocation will be done by setting the bits \
+ in the byte array. The index set will be the resource number allocated.
+ */
+ var TSData *bitmap.Threadsafe
+ if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
+ log.Error("Failed to create a bitmap")
+ return nil, errors.New("Failed to create bitmap")
+ }
+ Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
- Value, err := json.Marshal(Resource)
- if err != nil {
- log.Errorf("Failed to marshall resource")
- return nil, err
- }
- return Value, err
+ Value, err := json.Marshal(Resource)
+ if err != nil {
+ log.Errorf("Failed to marshall resource")
+ return nil, err
+ }
+ return Value, err
}
func (PONRMgr *PONResourceManager) GetResource(Path string) (map[string]interface{}, error) {
- /*
- Get resource from kv store.
+ /*
+ Get resource from kv store.
- :param path: path to get resource
- :return: resource if resource present in kv store else None
- */
- //get resource from kv store
+ :param path: path to get resource
+ :return: resource if resource present in kv store else None
+ */
+ //get resource from kv store
- var Value []byte
- Result := make(map[string]interface{})
- var Str string
+ var Value []byte
+ Result := make(map[string]interface{})
+ var Str string
- Resource, err := PONRMgr.KVStore.Get(Path)
- if (err != nil) || (Resource == nil) {
- log.Debugf("Resource unavailable at %s", Path)
- return nil, err
- }
+ Resource, err := PONRMgr.KVStore.Get(Path)
+ if (err != nil) || (Resource == nil) {
+ log.Debugf("Resource unavailable at %s", Path)
+ return nil, err
+ }
- Value, err = ToByte(Resource.Value)
+ Value, err = ToByte(Resource.Value)
- // decode resource fetched from backend store to dictionary
- err = json.Unmarshal(Value, &Result)
- if err != nil {
- log.Error("Failed to decode resource")
- return Result, err
- }
- /*
- resource pool in backend store stored as binary string whereas to
- access the pool to generate/release IDs it need to be converted
- as BitArray
- */
- Str, err = ToString(Result[POOL])
- if err != nil {
- log.Error("Failed to conver to kv pair to string")
- return Result, err
- }
- Decode64, _ := base64.StdEncoding.DecodeString(Str)
- Result[POOL], err = ToByte(Decode64)
- if err != nil {
- log.Error("Failed to convert resource pool to byte")
- return Result, err
- }
+ // decode resource fetched from backend store to dictionary
+ err = json.Unmarshal(Value, &Result)
+ if err != nil {
+ log.Error("Failed to decode resource")
+ return Result, err
+ }
+ /*
+ resource pool in backend store stored as binary string whereas to
+ access the pool to generate/release IDs it need to be converted
+ as BitArray
+ */
+ Str, err = ToString(Result[POOL])
+ if err != nil {
+ log.Error("Failed to conver to kv pair to string")
+ return Result, err
+ }
+ Decode64, _ := base64.StdEncoding.DecodeString(Str)
+ Result[POOL], err = ToByte(Decode64)
+ if err != nil {
+ log.Error("Failed to convert resource pool to byte")
+ return Result, err
+ }
- return Result, err
+ return Result, err
}
func (PONRMgr *PONResourceManager) GetPath(IntfID uint32, ResourceType string) string {
- /*
- Get path for given resource type.
- :param pon_intf_id: OLT PON interface id
- :param resource_type: String to identify type of resource
- :return: path for given resource type
- */
+ /*
+ Get path for given resource type.
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :return: path for given resource type
+ */
- /*
- Get the shared pool for the given resource type.
- all the resource ranges and the shared resource maps are initialized during the init.
- */
- SharedPoolID := PONRMgr.PonResourceRanges[PONRMgr.SharedIdxByType[ResourceType]].(uint32)
- if SharedPoolID != 0 {
- IntfID = SharedPoolID
- }
- var Path string
- if ResourceType == ONU_ID {
- Path = fmt.Sprintf(ONU_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
- } else if ResourceType == ALLOC_ID {
- Path = fmt.Sprintf(ALLOC_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
- } else if ResourceType == GEMPORT_ID {
- Path = fmt.Sprintf(GEMPORT_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
- } else if ResourceType == FLOW_ID {
- Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
- } else {
- log.Error("Invalid resource pool identifier")
- }
- return Path
+ /*
+ Get the shared pool for the given resource type.
+ all the resource ranges and the shared resource maps are initialized during the init.
+ */
+ SharedPoolID := PONRMgr.PonResourceRanges[PONRMgr.SharedIdxByType[ResourceType]].(uint32)
+ if SharedPoolID != 0 {
+ IntfID = SharedPoolID
+ }
+ var Path string
+ if ResourceType == ONU_ID {
+ Path = fmt.Sprintf(ONU_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+ } else if ResourceType == ALLOC_ID {
+ Path = fmt.Sprintf(ALLOC_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+ } else if ResourceType == GEMPORT_ID {
+ Path = fmt.Sprintf(GEMPORT_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+ } else if ResourceType == FLOW_ID {
+ Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+ } else {
+ log.Error("Invalid resource pool identifier")
+ }
+ return Path
}
func (PONRMgr *PONResourceManager) GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) {
- /*
- Create alloc/gemport/onu/flow id for given OLT PON interface.
- :param pon_intf_id: OLT PON interface id
- :param resource_type: String to identify type of resource
- :param num_of_id: required number of ids
- :return list/uint32/None: list, uint32 or None if resource type is
- alloc_id/gemport_id, onu_id or invalid type respectively
- */
- if NumIDs < 1 {
- log.Error("Invalid number of resources requested")
- return nil, errors.New(fmt.Sprintf("Invalid number of resources requested %d", NumIDs))
- }
- // delegate to the master instance if sharing enabled across instances
+ /*
+ Create alloc/gemport/onu/flow id for given OLT PON interface.
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :param num_of_id: required number of ids
+ :return list/uint32/None: list, uint32 or None if resource type is
+ alloc_id/gemport_id, onu_id or invalid type respectively
+ */
+ if NumIDs < 1 {
+ log.Error("Invalid number of resources requested")
+ return nil, errors.New(fmt.Sprintf("Invalid number of resources requested %d", NumIDs))
+ }
+ // delegate to the master instance if sharing enabled across instances
- SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
- if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
- return SharedResourceMgr.GetResourceID(IntfID, ResourceType, NumIDs)
- }
+ SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+ if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+ return SharedResourceMgr.GetResourceID(IntfID, ResourceType, NumIDs)
+ }
- Path := PONRMgr.GetPath(IntfID, ResourceType)
- if Path == "" {
- log.Errorf("Failed to get path for resource type %s", ResourceType)
- return nil, errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
- }
- log.Debugf("Get resource for type %s on path %s", ResourceType, Path)
- var Result []uint32
- var NextID uint32
- Resource, err := PONRMgr.GetResource(Path)
- if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
- if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
- log.Error("Failed to Generate ID")
- return Result, err
- }
- Result = append(Result, NextID)
- } else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
- if NumIDs == 1 {
- if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
- log.Error("Failed to Generate ID")
- return Result, err
- }
- Result = append(Result, NextID)
- } else {
- for NumIDs > 0 {
- if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
- log.Error("Failed to Generate ID")
- return Result, err
- }
- Result = append(Result, NextID)
- NumIDs--
- }
- }
- } else {
- log.Error("get resource failed")
- return Result, err
- }
+ Path := PONRMgr.GetPath(IntfID, ResourceType)
+ if Path == "" {
+ log.Errorf("Failed to get path for resource type %s", ResourceType)
+ return nil, errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
+ }
+ log.Debugf("Get resource for type %s on path %s", ResourceType, Path)
+ var Result []uint32
+ var NextID uint32
+ Resource, err := PONRMgr.GetResource(Path)
+ if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
+ if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+ log.Error("Failed to Generate ID")
+ return Result, err
+ }
+ Result = append(Result, NextID)
+ } else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
+ if NumIDs == 1 {
+ if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+ log.Error("Failed to Generate ID")
+ return Result, err
+ }
+ Result = append(Result, NextID)
+ } else {
+ for NumIDs > 0 {
+ if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+ log.Error("Failed to Generate ID")
+ return Result, err
+ }
+ Result = append(Result, NextID)
+ NumIDs--
+ }
+ }
+ } else {
+ log.Error("get resource failed")
+ return Result, err
+ }
- //Update resource in kv store
- if PONRMgr.UpdateResource(Path, Resource) != nil {
- log.Errorf("Failed to update resource %s", Path)
- return nil, errors.New(fmt.Sprintf("Failed to update resource %s", Path))
- }
- return Result, nil
+ //Update resource in kv store
+ if PONRMgr.UpdateResource(Path, Resource) != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return nil, errors.New(fmt.Sprintf("Failed to update resource %s", Path))
+ }
+ return Result, nil
}
func checkValidResourceType(ResourceType string) bool {
- KnownResourceTypes := []string{ONU_ID, ALLOC_ID, GEMPORT_ID, FLOW_ID}
+ KnownResourceTypes := []string{ONU_ID, ALLOC_ID, GEMPORT_ID, FLOW_ID}
- for _, v := range KnownResourceTypes {
- if v == ResourceType {
- return true
- }
- }
- return false
+ for _, v := range KnownResourceTypes {
+ if v == ResourceType {
+ return true
+ }
+ }
+ return false
}
func (PONRMgr *PONResourceManager) FreeResourceID(IntfID uint32, ResourceType string, ReleaseContent []uint32) bool {
- /*
- Release alloc/gemport/onu/flow id for given OLT PON interface.
- :param pon_intf_id: OLT PON interface id
- :param resource_type: String to identify type of resource
- :param release_content: required number of ids
- :return boolean: True if all IDs in given release_content release else False
- */
- if checkValidResourceType(ResourceType) == false {
- log.Error("Invalid resource type")
- return false
- }
- if ReleaseContent == nil {
- log.Debug("Nothing to release")
- return true
- }
- // delegate to the master instance if sharing enabled across instances
- SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
- if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
- return SharedResourceMgr.FreeResourceID(IntfID, ResourceType, ReleaseContent)
- }
- Path := PONRMgr.GetPath(IntfID, ResourceType)
- if Path == "" {
- log.Error("Failed to get path")
- return false
- }
- Resource, err := PONRMgr.GetResource(Path)
- if err != nil {
- log.Error("Failed to get resource")
- return false
- }
- for _, Val := range ReleaseContent {
- PONRMgr.ReleaseID(Resource, Val)
- }
- if PONRMgr.UpdateResource(Path, Resource) != nil {
- log.Errorf("Free resource for %s failed", Path)
- return false
- }
- return true
+ /*
+ Release alloc/gemport/onu/flow id for given OLT PON interface.
+ :param pon_intf_id: OLT PON interface id
+ :param resource_type: String to identify type of resource
+ :param release_content: required number of ids
+ :return boolean: True if all IDs in given release_content release else False
+ */
+ if checkValidResourceType(ResourceType) == false {
+ log.Error("Invalid resource type")
+ return false
+ }
+ if ReleaseContent == nil {
+ log.Debug("Nothing to release")
+ return true
+ }
+ // delegate to the master instance if sharing enabled across instances
+ SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+ if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+ return SharedResourceMgr.FreeResourceID(IntfID, ResourceType, ReleaseContent)
+ }
+ Path := PONRMgr.GetPath(IntfID, ResourceType)
+ if Path == "" {
+ log.Error("Failed to get path")
+ return false
+ }
+ Resource, err := PONRMgr.GetResource(Path)
+ if err != nil {
+ log.Error("Failed to get resource")
+ return false
+ }
+ for _, Val := range ReleaseContent {
+ PONRMgr.ReleaseID(Resource, Val)
+ }
+ if PONRMgr.UpdateResource(Path, Resource) != nil {
+ log.Errorf("Free resource for %s failed", Path)
+ return false
+ }
+ return true
}
func (PONRMgr *PONResourceManager) UpdateResource(Path string, Resource map[string]interface{}) error {
- /*
- Update resource in resource kv store.
- :param path: path to update resource
- :param resource: resource need to be updated
- :return boolean: True if resource updated in kv store else False
- */
- // TODO resource[POOL] = resource[POOL].bin
- Value, err := json.Marshal(Resource)
- if err != nil {
- log.Error("failed to Marshal")
- return err
- }
- err = PONRMgr.KVStore.Put(Path, Value)
- if err != nil {
- log.Error("failed to put data to kv store %s", Path)
- return err
- }
- return nil
+ /*
+ Update resource in resource kv store.
+ :param path: path to update resource
+ :param resource: resource need to be updated
+ :return boolean: True if resource updated in kv store else False
+ */
+ // TODO resource[POOL] = resource[POOL].bin
+ Value, err := json.Marshal(Resource)
+ if err != nil {
+ log.Error("failed to Marshal")
+ return err
+ }
+ err = PONRMgr.KVStore.Put(Path, Value)
+ if err != nil {
+ log.Error("failed to put data to kv store %s", Path)
+ return err
+ }
+ return nil
}
func (PONRMgr *PONResourceManager) ClearResourceIDPool(IntfID uint32, ResourceType string) bool {
- /*
- Clear Resource Pool for a given Resource Type on a given PON Port.
- :return boolean: True if removed else False
- */
+ /*
+ Clear Resource Pool for a given Resource Type on a given PON Port.
+ :return boolean: True if removed else False
+ */
- // delegate to the master instance if sharing enabled across instances
- SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
- if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
- return SharedResourceMgr.ClearResourceIDPool(IntfID, ResourceType)
- }
- Path := PONRMgr.GetPath(IntfID, ResourceType)
- if Path == "" {
- log.Error("Failed to get path")
- return false
- }
+ // delegate to the master instance if sharing enabled across instances
+ SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+ if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+ return SharedResourceMgr.ClearResourceIDPool(IntfID, ResourceType)
+ }
+ Path := PONRMgr.GetPath(IntfID, ResourceType)
+ if Path == "" {
+ log.Error("Failed to get path")
+ return false
+ }
- if err := PONRMgr.KVStore.Delete(Path); err != nil {
- log.Errorf("Failed to delete resource %s", Path)
- return false
- }
- log.Debugf("Cleared resource %s", Path)
- return true
+ if err := PONRMgr.KVStore.Delete(Path); err != nil {
+ log.Errorf("Failed to delete resource %s", Path)
+ return false
+ }
+ log.Debugf("Cleared resource %s", Path)
+ return true
}
func (PONRMgr PONResourceManager) InitResourceMap(PONIntfONUID string) {
- /*
- Initialize resource map
- :param pon_intf_onu_id: reference of PON interface id and onu id
- */
- // initialize pon_intf_onu_id tuple to alloc_ids map
- AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
- var AllocIDs []byte
- Result := PONRMgr.KVStore.Put(AllocIDPath, AllocIDs)
- if Result != nil {
- log.Error("Failed to update the KV store")
- return
- }
- // initialize pon_intf_onu_id tuple to gemport_ids map
- GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
- var GEMPortIDs []byte
- Result = PONRMgr.KVStore.Put(GEMPortIDPath, GEMPortIDs)
- if Result != nil {
- log.Error("Failed to update the KV store")
- return
- }
+ /*
+ Initialize resource map
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ */
+ // initialize pon_intf_onu_id tuple to alloc_ids map
+ AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ var AllocIDs []byte
+ Result := PONRMgr.KVStore.Put(AllocIDPath, AllocIDs)
+ if Result != nil {
+ log.Error("Failed to update the KV store")
+ return
+ }
+ // initialize pon_intf_onu_id tuple to gemport_ids map
+ GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ var GEMPortIDs []byte
+ Result = PONRMgr.KVStore.Put(GEMPortIDPath, GEMPortIDs)
+ if Result != nil {
+ log.Error("Failed to update the KV store")
+ return
+ }
}
func (PONRMgr PONResourceManager) RemoveResourceMap(PONIntfONUID string) bool {
- /*
- Remove resource map
- :param pon_intf_onu_id: reference of PON interface id and onu id
- */
- // remove pon_intf_onu_id tuple to alloc_ids map
- var err error
- AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
- if err = PONRMgr.KVStore.Delete(AllocIDPath); err != nil {
- log.Errorf("Failed to remove resource %s", AllocIDPath)
- return false
- }
- // remove pon_intf_onu_id tuple to gemport_ids map
- GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
- err = PONRMgr.KVStore.Delete(GEMPortIDPath)
- if err != nil {
- log.Errorf("Failed to remove resource %s", GEMPortIDPath)
- return false
- }
+ /*
+ Remove resource map
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ */
+ // remove pon_intf_onu_id tuple to alloc_ids map
+ var err error
+ AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ if err = PONRMgr.KVStore.Delete(AllocIDPath); err != nil {
+ log.Errorf("Failed to remove resource %s", AllocIDPath)
+ return false
+ }
+ // remove pon_intf_onu_id tuple to gemport_ids map
+ GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ err = PONRMgr.KVStore.Delete(GEMPortIDPath)
+ if err != nil {
+ log.Errorf("Failed to remove resource %s", GEMPortIDPath)
+ return false
+ }
- FlowIDPath := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
- if FlowIDs, err := PONRMgr.KVStore.List(FlowIDPath); err != nil {
- for _, Flow := range FlowIDs {
- FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow)
- if err = PONRMgr.KVStore.Delete(FlowIDInfoPath); err != nil {
- log.Errorf("Failed to remove resource %s", FlowIDInfoPath)
- return false
- }
- }
- }
+ FlowIDPath := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+ if FlowIDs, err := PONRMgr.KVStore.List(FlowIDPath); err != nil {
+ for _, Flow := range FlowIDs {
+ FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow.Value)
+ if err = PONRMgr.KVStore.Delete(FlowIDInfoPath); err != nil {
+ log.Errorf("Failed to remove resource %s", FlowIDInfoPath)
+ return false
+ }
+ }
+ }
- if err = PONRMgr.KVStore.Delete(FlowIDPath); err != nil {
- log.Errorf("Failed to remove resource %s", FlowIDPath)
- return false
- }
+ if err = PONRMgr.KVStore.Delete(FlowIDPath); err != nil {
+ log.Errorf("Failed to remove resource %s", FlowIDPath)
+ return false
+ }
- return true
+ return true
}
func (PONRMgr *PONResourceManager) GetCurrentAllocIDForOnu(IntfONUID string) []uint32 {
- /*
- Get currently configured alloc ids for given pon_intf_onu_id
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :return list: List of alloc_ids if available, else None
- */
- Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ /*
+ Get currently configured alloc ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :return list: List of alloc_ids if available, else None
+ */
+ Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
- var Data []uint32
- Value, err := PONRMgr.KVStore.Get(Path)
- if err == nil {
- if Value != nil {
- Val,err := ToByte(Value.Value)
- if err != nil{
- log.Errorw("Failed to convert into byte array",log.Fields{"error":err})
- return Data
- }
- if err = json.Unmarshal(Val, &Data); err != nil {
- log.Error("Failed to unmarshal",log.Fields{"error":err})
- return Data
- }
- }
- }
- return Data
+ var Data []uint32
+ Value, err := PONRMgr.KVStore.Get(Path)
+ if err == nil {
+ if Value != nil {
+ Val, err := ToByte(Value.Value)
+ if err != nil {
+ log.Errorw("Failed to convert into byte array", log.Fields{"error": err})
+ return Data
+ }
+ if err = json.Unmarshal(Val, &Data); err != nil {
+ log.Error("Failed to unmarshal", log.Fields{"error": err})
+ return Data
+ }
+ }
+ }
+ return Data
}
func (PONRMgr *PONResourceManager) GetCurrentGEMPortIDsForOnu(IntfONUID string) []uint32 {
- /*
- Get currently configured gemport ids for given pon_intf_onu_id
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :return list: List of gemport IDs if available, else None
- */
+ /*
+ Get currently configured gemport ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :return list: List of gemport IDs if available, else None
+ */
- Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
- log.Debugf("Getting current gemports for %s", Path)
- var Data []uint32
- Value, err := PONRMgr.KVStore.Get(Path)
- if err == nil {
- if Value != nil {
- Val, _ := ToByte(Value.Value)
- if err = json.Unmarshal(Val, &Data); err != nil {
- log.Errorw("Failed to unmarshal",log.Fields{"error":err})
- return Data
- }
- }
- } else {
- log.Errorf("Failed to get data from kvstore for %s", Path)
- }
- return Data
+ Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ log.Debugf("Getting current gemports for %s", Path)
+ var Data []uint32
+ Value, err := PONRMgr.KVStore.Get(Path)
+ if err == nil {
+ if Value != nil {
+ Val, _ := ToByte(Value.Value)
+ if err = json.Unmarshal(Val, &Data); err != nil {
+ log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+ return Data
+ }
+ }
+ } else {
+ log.Errorf("Failed to get data from kvstore for %s", Path)
+ }
+ return Data
}
func (PONRMgr *PONResourceManager) GetCurrentFlowIDsForOnu(IntfONUID string) []uint32 {
- /*
- Get currently configured flow ids for given pon_intf_onu_id
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :return list: List of Flow IDs if available, else None
- */
+ /*
+ Get currently configured flow ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :return list: List of Flow IDs if available, else None
+ */
- Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
- var Data []uint32
- Value, err := PONRMgr.KVStore.Get(Path)
- if err == nil {
- if Value != nil {
- Val, _ := ToByte(Value.Value)
- if err = json.Unmarshal(Val, &Data); err != nil {
- log.Error("Failed to unmarshal")
- return Data
- }
- }
- }
- return Data
+ var Data []uint32
+ Value, err := PONRMgr.KVStore.Get(Path)
+ if err == nil {
+ if Value != nil {
+ Val, _ := ToByte(Value.Value)
+ if err = json.Unmarshal(Val, &Data); err != nil {
+ log.Error("Failed to unmarshal")
+ return Data
+ }
+ }
+ }
+ return Data
}
-func (PONRMgr *PONResourceManager) GetFlowIDInfo(IntfONUID string, FlowID uint32, Data interface{})error{
- /*
- Get flow details configured for the ONU.
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :param flow_id: Flow Id reference
- :param Data: Result
- :return error: nil if no error in getting from KV store
- */
+func (PONRMgr *PONResourceManager) GetFlowIDInfo(IntfONUID string, FlowID uint32, Data interface{}) error {
+ /*
+ Get flow details configured for the ONU.
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param flow_id: Flow Id reference
+ :param Data: Result
+ :return error: nil if no error in getting from KV store
+ */
- Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+ Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
- Value, err := PONRMgr.KVStore.Get(Path)
- if err == nil {
- if Value != nil {
- Val,err := ToByte(Value.Value)
- if err != nil{
- log.Errorw("Failed to convert flowinfo into byte array",log.Fields{"error":err})
- return err
- }
- if err = json.Unmarshal(Val, Data); err != nil {
- log.Errorw("Failed to unmarshal",log.Fields{"error":err})
- return err
- }
- }
- }
- return err
+ Value, err := PONRMgr.KVStore.Get(Path)
+ if err == nil {
+ if Value != nil {
+ Val, err := ToByte(Value.Value)
+ if err != nil {
+ log.Errorw("Failed to convert flowinfo into byte array", log.Fields{"error": err})
+ return err
+ }
+ if err = json.Unmarshal(Val, Data); err != nil {
+ log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+ return err
+ }
+ }
+ }
+ return err
}
func (PONRMgr *PONResourceManager) RemoveFlowIDInfo(IntfONUID string, FlowID uint32) bool {
- /*
- Get flow_id details configured for the ONU.
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :param flow_id: Flow Id reference
- */
- Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+ /*
+ Get flow_id details configured for the ONU.
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param flow_id: Flow Id reference
+ */
+ Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
- if err := PONRMgr.KVStore.Delete(Path); err != nil {
- log.Errorf("Falied to remove resource %s", Path)
- return false
- }
- return true
+ if err := PONRMgr.KVStore.Delete(Path); err != nil {
+ log.Errorf("Falied to remove resource %s", Path)
+ return false
+ }
+ return true
}
func (PONRMgr *PONResourceManager) UpdateAllocIdsForOnu(IntfONUID string, AllocIDs []uint32) error {
- /*
- Update currently configured alloc ids for given pon_intf_onu_id
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :param alloc_ids: list of alloc ids
- */
- var Value []byte
- var err error
- Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
- Value, err = json.Marshal(AllocIDs)
- if err != nil {
- log.Error("failed to Marshal")
- return err
- }
+ /*
+ Update currently configured alloc ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param alloc_ids: list of alloc ids
+ */
+ var Value []byte
+ var err error
+ Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ Value, err = json.Marshal(AllocIDs)
+ if err != nil {
+ log.Error("failed to Marshal")
+ return err
+ }
- if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
- log.Errorf("Failed to update resource %s", Path)
- return err
- }
- return err
+ if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return err
+ }
+ return err
}
func (PONRMgr *PONResourceManager) UpdateGEMPortIDsForOnu(IntfONUID string, GEMPortIDs []uint32) error {
- /*
- Update currently configured gemport ids for given pon_intf_onu_id
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :param gemport_ids: list of gem port ids
- */
+ /*
+ Update currently configured gemport ids for given pon_intf_onu_id
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param gemport_ids: list of gem port ids
+ */
- var Value []byte
- var err error
- Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
- log.Debugf("Updating gemport ids for %s", Path)
- Value, err = json.Marshal(GEMPortIDs)
- if err != nil {
- log.Error("failed to Marshal")
- return err
- }
+ var Value []byte
+ var err error
+ Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ log.Debugf("Updating gemport ids for %s", Path)
+ Value, err = json.Marshal(GEMPortIDs)
+ if err != nil {
+ log.Error("failed to Marshal")
+ return err
+ }
- if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
- log.Errorf("Failed to update resource %s", Path)
- return err
- }
- return err
+ if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return err
+ }
+ return err
}
func checkForFlowIDInList(FlowIDList []uint32, FlowID uint32) (bool, uint32) {
- /*
- Check for a flow id in a given list of flow IDs.
- :param FLowIDList: List of Flow IDs
- :param FlowID: Flowd to check in the list
- : return true and the index if present false otherwise.
- */
+ /*
+ Check for a flow id in a given list of flow IDs.
+ :param FLowIDList: List of Flow IDs
+ :param FlowID: Flowd to check in the list
+ : return true and the index if present false otherwise.
+ */
- for idx, _ := range FlowIDList {
- if FlowID == FlowIDList[idx] {
- return true, uint32(idx)
- }
- }
- return false, 0
+ for idx, _ := range FlowIDList {
+ if FlowID == FlowIDList[idx] {
+ return true, uint32(idx)
+ }
+ }
+ return false, 0
}
func (PONRMgr *PONResourceManager) UpdateFlowIDForOnu(IntfONUID string, FlowID uint32, Add bool) error {
- /*
- Update the flow_id list of the ONU (add or remove flow_id from the list)
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :param flow_id: flow ID
- :param add: Boolean flag to indicate whether the flow_id should be
- added or removed from the list. Defaults to adding the flow.
- */
- var Value []byte
- var err error
- var RetVal bool
- var IDx uint32
- Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
- FlowIDs := PONRMgr.GetCurrentFlowIDsForOnu(IntfONUID)
+ /*
+ Update the flow_id list of the ONU (add or remove flow_id from the list)
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param flow_id: flow ID
+ :param add: Boolean flag to indicate whether the flow_id should be
+ added or removed from the list. Defaults to adding the flow.
+ */
+ var Value []byte
+ var err error
+ var RetVal bool
+ var IDx uint32
+ Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+ FlowIDs := PONRMgr.GetCurrentFlowIDsForOnu(IntfONUID)
- if Add {
- if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == true {
- return err
- }
- FlowIDs = append(FlowIDs, FlowID)
- } else {
- if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == false {
- return err
- }
- // delete the index and shift
- FlowIDs = append(FlowIDs[:IDx], FlowIDs[IDx+1:]...)
- }
- Value, err = json.Marshal(FlowIDs)
- if err != nil {
- log.Error("Failed to Marshal")
- return err
- }
+ if Add {
+ if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == true {
+ return err
+ }
+ FlowIDs = append(FlowIDs, FlowID)
+ } else {
+ if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == false {
+ return err
+ }
+ // delete the index and shift
+ FlowIDs = append(FlowIDs[:IDx], FlowIDs[IDx+1:]...)
+ }
+ Value, err = json.Marshal(FlowIDs)
+ if err != nil {
+ log.Error("Failed to Marshal")
+ return err
+ }
- if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
- log.Errorf("Failed to update resource %s", Path)
- return err
- }
- return err
+ if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return err
+ }
+ return err
}
-func (PONRMgr *PONResourceManager) UpdateFlowIDInfoForOnu(IntfONUID string, FlowID uint32, FlowData interface {}) error {
- /*
- Update any metadata associated with the flow_id. The flow_data could be json
- or any of other data structure. The resource manager doesnt care
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :param flow_id: Flow ID
- :param flow_data: Flow data blob
- */
- var Value []byte
- var err error
- Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
- Value, err = json.Marshal(FlowData)
- if err != nil {
- log.Error("failed to Marshal")
- return err
- }
+func (PONRMgr *PONResourceManager) UpdateFlowIDInfoForOnu(IntfONUID string, FlowID uint32, FlowData interface{}) error {
+ /*
+ Update any metadata associated with the flow_id. The flow_data could be json
+ or any of other data structure. The resource manager doesnt care
+ :param pon_intf_onu_id: reference of PON interface id and onu id
+ :param flow_id: Flow ID
+ :param flow_data: Flow data blob
+ */
+ var Value []byte
+ var err error
+ Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+ Value, err = json.Marshal(FlowData)
+ if err != nil {
+ log.Error("failed to Marshal")
+ return err
+ }
- if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
- log.Errorf("Failed to update resource %s", Path)
- return err
- }
- return err
+ if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
+ log.Errorf("Failed to update resource %s", Path)
+ return err
+ }
+ return err
}
func (PONRMgr *PONResourceManager) GenerateNextID(Resource map[string]interface{}) (uint32, error) {
- /*
- Generate unique id having OFFSET as start
- :param resource: resource used to generate ID
- :return uint32: generated id
- */
- ByteArray, err := ToByte(Resource[POOL])
- if err != nil {
- log.Error("Failed to convert resource to byte array")
- return 0, err
- }
- Data := bitmap.TSFromData(ByteArray, false)
- if Data == nil {
- log.Error("Failed to get data from byte array")
- return 0, errors.New("Failed to get data from byte array")
- }
+ /*
+ Generate unique id having OFFSET as start
+ :param resource: resource used to generate ID
+ :return uint32: generated id
+ */
+ ByteArray, err := ToByte(Resource[POOL])
+ if err != nil {
+ log.Error("Failed to convert resource to byte array")
+ return 0, err
+ }
+ Data := bitmap.TSFromData(ByteArray, false)
+ if Data == nil {
+ log.Error("Failed to get data from byte array")
+ return 0, errors.New("Failed to get data from byte array")
+ }
- Len := Data.Len()
- var Idx int
- for Idx = 0; Idx < Len; Idx++ {
- Val := Data.Get(Idx)
- if Val == false {
- break
- }
- }
- Data.Set(Idx, true)
- res := uint32(Resource[START_IDX].(float64))
- Resource[POOL] = Data.Data(false)
- log.Debugf("Generated ID for %d", (uint32(Idx) + res))
- return (uint32(Idx) + res), err
+ Len := Data.Len()
+ var Idx int
+ for Idx = 0; Idx < Len; Idx++ {
+ Val := Data.Get(Idx)
+ if Val == false {
+ break
+ }
+ }
+ Data.Set(Idx, true)
+ res := uint32(Resource[START_IDX].(float64))
+ Resource[POOL] = Data.Data(false)
+ log.Debugf("Generated ID for %d", (uint32(Idx) + res))
+ return (uint32(Idx) + res), err
}
func (PONRMgr *PONResourceManager) ReleaseID(Resource map[string]interface{}, Id uint32) bool {
- /*
- Release unique id having OFFSET as start index.
- :param resource: resource used to release ID
- :param unique_id: id need to be released
- */
- ByteArray, err := ToByte(Resource[POOL])
- if err != nil {
- log.Error("Failed to convert resource to byte array")
- return false
- }
- Data := bitmap.TSFromData(ByteArray, false)
- if Data == nil {
- log.Error("Failed to get resource pool")
- return false
- }
- var Idx uint32
- Idx = Id - uint32(Resource[START_IDX].(float64))
- Data.Set(int(Idx), false)
- Resource[POOL] = Data.Data(false)
+ /*
+ Release unique id having OFFSET as start index.
+ :param resource: resource used to release ID
+ :param unique_id: id need to be released
+ */
+ ByteArray, err := ToByte(Resource[POOL])
+ if err != nil {
+ log.Error("Failed to convert resource to byte array")
+ return false
+ }
+ Data := bitmap.TSFromData(ByteArray, false)
+ if Data == nil {
+ log.Error("Failed to get resource pool")
+ return false
+ }
+ var Idx uint32
+ Idx = Id - uint32(Resource[START_IDX].(float64))
+ Data.Set(int(Idx), false)
+ Resource[POOL] = Data.Data(false)
- return true
+ return true
}
-func (PONRMgr *PONResourceManager) GetTechnology()string{
- return PONRMgr.Technology
+func (PONRMgr *PONResourceManager) GetTechnology() string {
+ return PONRMgr.Technology
}
-func (PONRMgr *PONResourceManager) GetResourceTypeAllocID()string{
- return ALLOC_ID
+func (PONRMgr *PONResourceManager) GetResourceTypeAllocID() string {
+ return ALLOC_ID
}
-func (PONRMgr *PONResourceManager) GetResourceTypeGemPortID()string{
- return GEMPORT_ID
+func (PONRMgr *PONResourceManager) GetResourceTypeGemPortID() string {
+ return GEMPORT_ID
}
-
-
// ToByte converts an interface value to a []byte. The interface should either be of
// a string type or []byte. Otherwise, an error is returned.
func ToByte(value interface{}) ([]byte, error) {
- switch t := value.(type) {
- case []byte:
- return value.([]byte), nil
- case string:
- return []byte(value.(string)), nil
- default:
- return nil, fmt.Errorf("unexpected-type-%T", t)
- }
+ switch t := value.(type) {
+ case []byte:
+ return value.([]byte), nil
+ case string:
+ return []byte(value.(string)), nil
+ default:
+ return nil, fmt.Errorf("unexpected-type-%T", t)
+ }
}
// ToString converts an interface value to a string. The interface should either be of
// a string type or []byte. Otherwise, an error is returned.
func ToString(value interface{}) (string, error) {
- switch t := value.(type) {
- case []byte:
- return string(value.([]byte)), nil
- case string:
- return value.(string), nil
- default:
- return "", fmt.Errorf("unexpected-type-%T", t)
- }
+ switch t := value.(type) {
+ case []byte:
+ return string(value.([]byte)), nil
+ case string:
+ return value.(string), nil
+ default:
+ return "", fmt.Errorf("unexpected-type-%T", t)
+ }
}
diff --git a/common/techprofile/tech_profile.go b/common/techprofile/tech_profile.go
index 2a256e9..9f7bebf 100644
--- a/common/techprofile/tech_profile.go
+++ b/common/techprofile/tech_profile.go
@@ -122,7 +122,7 @@
const MAX_GEM_PAYLOAD = "max_gem_payload_size"
type InstanceControl struct {
- Onu string `json:ONU"`
+ Onu string `json:"ONU"`
Uni string `json:"uni"`
MaxGemPayloadSize string `json:"max_gem_payload_size"`
}
diff --git a/db/kvstore/etcdclient.go b/db/kvstore/etcdclient.go
index 0b97039..6935296 100644
--- a/db/kvstore/etcdclient.go
+++ b/db/kvstore/etcdclient.go
@@ -455,6 +455,7 @@
func (c *EtcdClient) AcquireLock(lockName string, timeout int) error {
duration := GetDuration(timeout)
ctx, cancel := context.WithTimeout(context.Background(), duration)
+ defer cancel()
session, _ := v3Concurrency.NewSession(c.ectdAPI, v3Concurrency.WithContext(ctx))
mu := v3Concurrency.NewMutex(session, "/devicelock_"+lockName)
if err := mu.Lock(context.Background()); err != nil {
@@ -462,7 +463,6 @@
return err
}
c.addLockName(lockName, mu, session)
- cancel()
return nil
}
diff --git a/db/model/base_test.go b/db/model/base_test.go
index 9d8a1a9..623d24b 100644
--- a/db/model/base_test.go
+++ b/db/model/base_test.go
@@ -33,28 +33,32 @@
DbTimeout int
}
-func commonCallback(args ...interface{}) interface{} {
- log.Infof("Running common callback - arg count: %s", len(args))
+var callbackMutex sync.Mutex
+
+func commonChanCallback(args ...interface{}) interface{} {
+ log.Infof("Running common callback - arg count: %d", len(args))
//for i := 0; i < len(args); i++ {
// log.Infof("ARG %d : %+v", i, args[i])
//}
- mutex := sync.Mutex{}
- mutex.Lock()
- defer mutex.Unlock()
+ callbackMutex.Lock()
+ defer callbackMutex.Unlock()
- execStatus := args[1].(*bool)
+ execDoneChan := args[1].(*chan struct{})
// Inform the caller that the callback was executed
- *execStatus = true
- log.Infof("Changed value of exec status to true - stack:%s", string(debug.Stack()))
+ if *execDoneChan != nil {
+ log.Infof("Sending completion indication - stack:%s", string(debug.Stack()))
+ close(*execDoneChan)
+ *execDoneChan = nil
+ }
return nil
}
func commonCallback2(args ...interface{}) interface{} {
- log.Infof("Running common2 callback - arg count: %s %+v", len(args), args)
+ log.Infof("Running common2 callback - arg count: %d %+v", len(args), args)
return nil
}
diff --git a/db/model/node.go b/db/model/node.go
index 3908c4e..7bfdca0 100644
--- a/db/model/node.go
+++ b/db/model/node.go
@@ -309,8 +309,7 @@
// If there is not request to reconcile, try to get it from memory
if !reconcile {
- if result = n.getPath(rev.GetBranch().GetLatest(), path, depth);
- result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
+ if result = n.getPath(rev.GetBranch().GetLatest(), path, depth); result != nil && reflect.ValueOf(result).IsValid() && !reflect.ValueOf(result).IsNil() {
return result
}
}
@@ -577,7 +576,6 @@
return rev
}
-
return branch.GetLatest()
}
diff --git a/db/model/non_persisted_revision.go b/db/model/non_persisted_revision.go
index d501c66..0ccc58e 100644
--- a/db/model/non_persisted_revision.go
+++ b/db/model/non_persisted_revision.go
@@ -440,4 +440,3 @@
func (pr *NonPersistedRevision) StorageDrop(txid string, includeConfig bool) {
// stub ... required by interface
}
-
diff --git a/db/model/persisted_revision.go b/db/model/persisted_revision.go
index cf7ff9e..a56b776 100644
--- a/db/model/persisted_revision.go
+++ b/db/model/persisted_revision.go
@@ -33,9 +33,9 @@
Revision
Compress bool
- events chan *kvstore.Event `json:"-"`
- kvStore *Backend `json:"-"`
- mutex sync.RWMutex `json:"-"`
+ events chan *kvstore.Event
+ kvStore *Backend
+ mutex sync.RWMutex
isStored bool
isWatched bool
}
@@ -472,4 +472,4 @@
}
return response
-}
\ No newline at end of file
+}
diff --git a/db/model/proxy.go b/db/model/proxy.go
index eb3cb71..b45fb1d 100644
--- a/db/model/proxy.go
+++ b/db/model/proxy.go
@@ -248,7 +248,7 @@
pac.getProxy().Operation = op
}(PROXY_GET)
- log.Debugw("proxy-operation--update", log.Fields{"operation":p.Operation})
+ log.Debugw("proxy-operation--update", log.Fields{"operation": p.Operation})
return pac.Update(fullPath, data, strict, txid, controlled)
}
@@ -285,7 +285,7 @@
pac.SetProxy(p)
- log.Debugw("proxy-operation--add", log.Fields{"operation":p.Operation})
+ log.Debugw("proxy-operation--add", log.Fields{"operation": p.Operation})
return pac.Add(fullPath, data, txid, controlled)
}
@@ -319,7 +319,7 @@
p.Operation = PROXY_GET
}()
- log.Debugw("proxy-operation--add", log.Fields{"operation":p.Operation})
+ log.Debugw("proxy-operation--add", log.Fields{"operation": p.Operation})
return pac.Add(fullPath, data, txid, controlled)
}
@@ -353,7 +353,7 @@
p.Operation = PROXY_GET
}()
- log.Debugw("proxy-operation--remove", log.Fields{"operation":p.Operation})
+ log.Debugw("proxy-operation--remove", log.Fields{"operation": p.Operation})
return pac.Remove(fullPath, txid, controlled)
}
@@ -388,7 +388,7 @@
p.Operation = PROXY_GET
}()
- log.Debugw("proxy-operation--create-proxy", log.Fields{"operation":p.Operation})
+ log.Debugw("proxy-operation--create-proxy", log.Fields{"operation": p.Operation})
return pac.CreateProxy(fullPath, exclusive, controlled)
}
diff --git a/db/model/proxy_load_test.go b/db/model/proxy_load_test.go
index 47df98c..f44a6ae 100644
--- a/db/model/proxy_load_test.go
+++ b/db/model/proxy_load_test.go
@@ -43,7 +43,7 @@
}
type proxyLoadTest struct {
sync.RWMutex
- addedDevices [] string
+ addedDevices []string
updatedFirmwares []proxyLoadChanges
updatedFlows []proxyLoadChanges
preAddExecuted bool
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
index 1e93243..f583b99 100644
--- a/db/model/proxy_test.go
+++ b/db/model/proxy_test.go
@@ -27,6 +27,7 @@
"reflect"
"strconv"
"testing"
+ "time"
)
var (
@@ -110,24 +111,31 @@
TestProxy_DeviceId = "0001" + hex.EncodeToString(devIDBin)[:12]
TestProxy_Device.Id = TestProxy_DeviceId
- preAddExecuted := false
- postAddExecuted := false
+ preAddExecuted := make(chan struct{})
+ postAddExecuted := make(chan struct{})
+ preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
devicesProxy := TestProxy_Root.node.CreateProxy("/devices", false)
devicesProxy.RegisterCallback(PRE_ADD, commonCallback2, "PRE_ADD Device container changes")
devicesProxy.RegisterCallback(POST_ADD, commonCallback2, "POST_ADD Device container changes")
// Register ADD instructions callbacks
- TestProxy_Root_Device.RegisterCallback(PRE_ADD, commonCallback, "PRE_ADD instructions", &preAddExecuted)
- TestProxy_Root_Device.RegisterCallback(POST_ADD, commonCallback, "POST_ADD instructions", &postAddExecuted)
+ TestProxy_Root_Device.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
+ TestProxy_Root_Device.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
- // Add the device
if added := TestProxy_Root_Device.Add("/devices", TestProxy_Device, ""); added == nil {
t.Error("Failed to add device")
} else {
t.Logf("Added device : %+v", added)
}
+ if !verifyGotResponse(preAddExecuted) {
+ t.Error("PRE_ADD callback was not executed")
+ }
+ if !verifyGotResponse(postAddExecuted) {
+ t.Error("POST_ADD callback was not executed")
+ }
+
// Verify that the added device can now be retrieved
if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_DeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find added device")
@@ -135,33 +143,41 @@
djson, _ := json.Marshal(d)
t.Logf("Found device: %s", string(djson))
}
-
- if !preAddExecuted {
- t.Error("PRE_ADD callback was not executed")
- }
- if !postAddExecuted {
- t.Error("POST_ADD callback was not executed")
- }
}
func TestProxy_1_1_2_Add_ExistingDevice(t *testing.T) {
TestProxy_Device.Id = TestProxy_DeviceId
- added := TestProxy_Root_Device.Add("/devices", TestProxy_Device, "");
+ added := TestProxy_Root_Device.Add("/devices", TestProxy_Device, "")
if added.(proto.Message).String() != reflect.ValueOf(TestProxy_Device).Interface().(proto.Message).String() {
t.Errorf("Devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
}
}
+func verifyGotResponse(callbackIndicator <-chan struct{}) bool {
+ timeout := time.After(1 * time.Second)
+ // Wait until the channel closes, or we time out
+ select {
+ case <-callbackIndicator:
+ // Received response successfully
+ return true
+
+ case <-timeout:
+ // Got a timeout! fail with a timeout error
+ return false
+ }
+}
+
func TestProxy_1_1_3_Add_NewAdapter(t *testing.T) {
TestProxy_AdapterId = "test-adapter"
TestProxy_Adapter.Id = TestProxy_AdapterId
- preAddExecuted := false
- postAddExecuted := false
+ preAddExecuted := make(chan struct{})
+ postAddExecuted := make(chan struct{})
+ preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
// Register ADD instructions callbacks
- TestProxy_Root_Adapter.RegisterCallback(PRE_ADD, commonCallback, "PRE_ADD instructions for adapters", &preAddExecuted)
- TestProxy_Root_Adapter.RegisterCallback(POST_ADD, commonCallback, "POST_ADD instructions for adapters", &postAddExecuted)
+ TestProxy_Root_Adapter.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions for adapters", &preAddExecutedPtr)
+ TestProxy_Root_Adapter.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions for adapters", &postAddExecutedPtr)
// Add the adapter
if added := TestProxy_Root_Adapter.Add("/adapters", TestProxy_Adapter, ""); added == nil {
@@ -170,6 +186,8 @@
t.Logf("Added adapter : %+v", added)
}
+ verifyGotResponse(postAddExecuted)
+
// Verify that the added device can now be retrieved
if d := TestProxy_Root_Adapter.Get("/adapters/"+TestProxy_AdapterId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find added adapter")
@@ -178,10 +196,10 @@
t.Logf("Found adapter: %s", string(djson))
}
- if !preAddExecuted {
+ if !verifyGotResponse(preAddExecuted) {
t.Error("PRE_ADD callback was not executed")
}
- if !postAddExecuted {
+ if !verifyGotResponse(postAddExecuted) {
t.Error("POST_ADD callback was not executed")
}
}
@@ -209,8 +227,10 @@
func TestProxy_1_3_1_Update_Device(t *testing.T) {
var fwVersion int
- preUpdateExecuted := false
- postUpdateExecuted := false
+
+ preUpdateExecuted := make(chan struct{})
+ postUpdateExecuted := make(chan struct{})
+ preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
if retrieved := TestProxy_Root_Device.Get("/devices/"+TestProxy_TargetDeviceId, 1, false, ""); retrieved == nil {
t.Error("Failed to get device")
@@ -228,13 +248,13 @@
TestProxy_Root_Device.RegisterCallback(
PRE_UPDATE,
- commonCallback,
- "PRE_UPDATE instructions (root proxy)", &preUpdateExecuted,
+ commonChanCallback,
+ "PRE_UPDATE instructions (root proxy)", &preUpdateExecutedPtr,
)
TestProxy_Root_Device.RegisterCallback(
POST_UPDATE,
- commonCallback,
- "POST_UPDATE instructions (root proxy)", &postUpdateExecuted,
+ commonChanCallback,
+ "POST_UPDATE instructions (root proxy)", &postUpdateExecutedPtr,
)
if afterUpdate := TestProxy_Root_Device.Update("/devices/"+TestProxy_TargetDeviceId, retrieved, false, ""); afterUpdate == nil {
@@ -243,19 +263,19 @@
t.Logf("Updated device : %+v", afterUpdate)
}
+ if !verifyGotResponse(preUpdateExecuted) {
+ t.Error("PRE_UPDATE callback was not executed")
+ }
+ if !verifyGotResponse(postUpdateExecuted) {
+ t.Error("POST_UPDATE callback was not executed")
+ }
+
if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_TargetDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find updated device (root proxy)")
} else {
djson, _ := json.Marshal(d)
t.Logf("Found device (root proxy): %s raw: %+v", string(djson), d)
}
-
- if !preUpdateExecuted {
- t.Error("PRE_UPDATE callback was not executed")
- }
- if !postUpdateExecuted {
- t.Error("POST_UPDATE callback was not executed")
- }
}
}
@@ -265,18 +285,19 @@
flows := devFlowsProxy.Get("/", 0, false, "")
flows.(*openflow_13.Flows).Items[0].TableId = 2244
- preUpdateExecuted := false
- postUpdateExecuted := false
+ preUpdateExecuted := make(chan struct{})
+ postUpdateExecuted := make(chan struct{})
+ preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
devFlowsProxy.RegisterCallback(
PRE_UPDATE,
- commonCallback,
- "PRE_UPDATE instructions (flows proxy)", &preUpdateExecuted,
+ commonChanCallback,
+ "PRE_UPDATE instructions (flows proxy)", &preUpdateExecutedPtr,
)
devFlowsProxy.RegisterCallback(
POST_UPDATE,
- commonCallback,
- "POST_UPDATE instructions (flows proxy)", &postUpdateExecuted,
+ commonChanCallback,
+ "POST_UPDATE instructions (flows proxy)", &postUpdateExecutedPtr,
)
kvFlows := devFlowsProxy.Get("/", 0, false, "")
@@ -291,6 +312,13 @@
t.Logf("Updated flows : %+v", updated)
}
+ if !verifyGotResponse(preUpdateExecuted) {
+ t.Error("PRE_UPDATE callback was not executed")
+ }
+ if !verifyGotResponse(postUpdateExecuted) {
+ t.Error("POST_UPDATE callback was not executed")
+ }
+
if d := devFlowsProxy.Get("/", 0, false, ""); d == nil {
t.Error("Failed to find updated flows (flows proxy)")
} else {
@@ -304,18 +332,12 @@
djson, _ := json.Marshal(d)
t.Logf("Found flows (root proxy): %s", string(djson))
}
-
- if !preUpdateExecuted {
- t.Error("PRE_UPDATE callback was not executed")
- }
- if !postUpdateExecuted {
- t.Error("POST_UPDATE callback was not executed")
- }
}
func TestProxy_1_3_3_Update_Adapter(t *testing.T) {
- preUpdateExecuted := false
- postUpdateExecuted := false
+ preUpdateExecuted := make(chan struct{})
+ postUpdateExecuted := make(chan struct{})
+ preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
adaptersProxy := TestProxy_Root.node.CreateProxy("/adapters", false)
@@ -328,13 +350,13 @@
adaptersProxy.RegisterCallback(
PRE_UPDATE,
- commonCallback,
- "PRE_UPDATE instructions for adapters", &preUpdateExecuted,
+ commonChanCallback,
+ "PRE_UPDATE instructions for adapters", &preUpdateExecutedPtr,
)
adaptersProxy.RegisterCallback(
POST_UPDATE,
- commonCallback,
- "POST_UPDATE instructions for adapters", &postUpdateExecuted,
+ commonChanCallback,
+ "POST_UPDATE instructions for adapters", &postUpdateExecutedPtr,
)
if afterUpdate := adaptersProxy.Update("/"+TestProxy_AdapterId, retrieved, false, ""); afterUpdate == nil {
@@ -343,35 +365,36 @@
t.Logf("Updated adapter : %+v", afterUpdate)
}
+ if !verifyGotResponse(preUpdateExecuted) {
+ t.Error("PRE_UPDATE callback for adapter was not executed")
+ }
+ if !verifyGotResponse(postUpdateExecuted) {
+ t.Error("POST_UPDATE callback for adapter was not executed")
+ }
+
if d := TestProxy_Root_Adapter.Get("/adapters/"+TestProxy_AdapterId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find updated adapter (root proxy)")
} else {
djson, _ := json.Marshal(d)
t.Logf("Found adapter (root proxy): %s raw: %+v", string(djson), d)
}
-
- if !preUpdateExecuted {
- t.Error("PRE_UPDATE callback for adapter was not executed")
- }
- if !postUpdateExecuted {
- t.Error("POST_UPDATE callback for adapter was not executed")
- }
}
}
func TestProxy_1_4_1_Remove_Device(t *testing.T) {
- preRemoveExecuted := false
- postRemoveExecuted := false
+ preRemoveExecuted := make(chan struct{})
+ postRemoveExecuted := make(chan struct{})
+ preRemoveExecutedPtr, postRemoveExecutedPtr := preRemoveExecuted, postRemoveExecuted
TestProxy_Root_Device.RegisterCallback(
PRE_REMOVE,
- commonCallback,
- "PRE_REMOVE instructions (root proxy)", &preRemoveExecuted,
+ commonChanCallback,
+ "PRE_REMOVE instructions (root proxy)", &preRemoveExecutedPtr,
)
TestProxy_Root_Device.RegisterCallback(
POST_REMOVE,
- commonCallback,
- "POST_REMOVE instructions (root proxy)", &postRemoveExecuted,
+ commonChanCallback,
+ "POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
)
if removed := TestProxy_Root_Device.Remove("/devices/"+TestProxy_DeviceId, ""); removed == nil {
@@ -379,19 +402,20 @@
} else {
t.Logf("Removed device : %+v", removed)
}
+
+ if !verifyGotResponse(preRemoveExecuted) {
+ t.Error("PRE_REMOVE callback was not executed")
+ }
+ if !verifyGotResponse(postRemoveExecuted) {
+ t.Error("POST_REMOVE callback was not executed")
+ }
+
if d := TestProxy_Root_Device.Get("/devices/"+TestProxy_DeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
djson, _ := json.Marshal(d)
t.Errorf("Device was not removed - %s", djson)
} else {
t.Logf("Device was removed: %s", TestProxy_DeviceId)
}
-
- if !preRemoveExecuted {
- t.Error("PRE_REMOVE callback was not executed")
- }
- if !postRemoveExecuted {
- t.Error("POST_REMOVE callback was not executed")
- }
}
func TestProxy_2_1_1_Add_NewLogicalDevice(t *testing.T) {
@@ -400,12 +424,13 @@
TestProxy_LogicalDeviceId = "0001" + hex.EncodeToString(ldIDBin)[:12]
TestProxy_LogicalDevice.Id = TestProxy_LogicalDeviceId
- preAddExecuted := false
- postAddExecuted := false
+ preAddExecuted := make(chan struct{})
+ postAddExecuted := make(chan struct{})
+ preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
// Register
- TestProxy_Root_LogicalDevice.RegisterCallback(PRE_ADD, commonCallback, "PRE_ADD instructions", &preAddExecuted)
- TestProxy_Root_LogicalDevice.RegisterCallback(POST_ADD, commonCallback, "POST_ADD instructions", &postAddExecuted)
+ TestProxy_Root_LogicalDevice.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
+ TestProxy_Root_LogicalDevice.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
if added := TestProxy_Root_LogicalDevice.Add("/logical_devices", TestProxy_LogicalDevice, ""); added == nil {
t.Error("Failed to add logical device")
@@ -413,6 +438,8 @@
t.Logf("Added logical device : %+v", added)
}
+ verifyGotResponse(postAddExecuted)
+
if ld := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
t.Error("Failed to find added logical device")
} else {
@@ -420,10 +447,10 @@
t.Logf("Found logical device: %s", string(ldJSON))
}
- if !preAddExecuted {
+ if !verifyGotResponse(preAddExecuted) {
t.Error("PRE_ADD callback was not executed")
}
- if !postAddExecuted {
+ if !verifyGotResponse(postAddExecuted) {
t.Error("POST_ADD callback was not executed")
}
}
@@ -431,7 +458,7 @@
func TestProxy_2_1_2_Add_ExistingLogicalDevice(t *testing.T) {
TestProxy_LogicalDevice.Id = TestProxy_LogicalDeviceId
- added := TestProxy_Root_LogicalDevice.Add("/logical_devices", TestProxy_LogicalDevice, "");
+ added := TestProxy_Root_LogicalDevice.Add("/logical_devices", TestProxy_LogicalDevice, "")
if added.(proto.Message).String() != reflect.ValueOf(TestProxy_LogicalDevice).Interface().(proto.Message).String() {
t.Errorf("Logical devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
}
@@ -461,8 +488,9 @@
func TestProxy_2_3_1_Update_LogicalDevice(t *testing.T) {
var fwVersion int
- preUpdateExecuted := false
- postUpdateExecuted := false
+ preUpdateExecuted := make(chan struct{})
+ postUpdateExecuted := make(chan struct{})
+ preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
if retrieved := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); retrieved == nil {
t.Error("Failed to get logical device")
@@ -478,13 +506,13 @@
TestProxy_Root_LogicalDevice.RegisterCallback(
PRE_UPDATE,
- commonCallback,
- "PRE_UPDATE instructions (root proxy)", &preUpdateExecuted,
+ commonChanCallback,
+ "PRE_UPDATE instructions (root proxy)", &preUpdateExecutedPtr,
)
TestProxy_Root_LogicalDevice.RegisterCallback(
POST_UPDATE,
- commonCallback,
- "POST_UPDATE instructions (root proxy)", &postUpdateExecuted,
+ commonChanCallback,
+ "POST_UPDATE instructions (root proxy)", &postUpdateExecutedPtr,
)
retrieved.(*voltha.LogicalDevice).RootDeviceId = strconv.Itoa(fwVersion)
@@ -495,6 +523,14 @@
} else {
t.Logf("Updated logical device : %+v", afterUpdate)
}
+
+ if !verifyGotResponse(preUpdateExecuted) {
+ t.Error("PRE_UPDATE callback was not executed")
+ }
+ if !verifyGotResponse(postUpdateExecuted) {
+ t.Error("POST_UPDATE callback was not executed")
+ }
+
if d := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
t.Error("Failed to find updated logical device (root proxy)")
} else {
@@ -502,13 +538,6 @@
t.Logf("Found logical device (root proxy): %s raw: %+v", string(djson), d)
}
-
- if !preUpdateExecuted {
- t.Error("PRE_UPDATE callback was not executed")
- }
- if !postUpdateExecuted {
- t.Error("POST_UPDATE callback was not executed")
- }
}
}
@@ -557,18 +586,19 @@
}
func TestProxy_2_4_1_Remove_Device(t *testing.T) {
- preRemoveExecuted := false
- postRemoveExecuted := false
+ preRemoveExecuted := make(chan struct{})
+ postRemoveExecuted := make(chan struct{})
+ preRemoveExecutedPtr, postRemoveExecutedPtr := preRemoveExecuted, postRemoveExecuted
TestProxy_Root_LogicalDevice.RegisterCallback(
PRE_REMOVE,
- commonCallback,
- "PRE_REMOVE instructions (root proxy)", &preRemoveExecuted,
+ commonChanCallback,
+ "PRE_REMOVE instructions (root proxy)", &preRemoveExecutedPtr,
)
TestProxy_Root_LogicalDevice.RegisterCallback(
POST_REMOVE,
- commonCallback,
- "POST_REMOVE instructions (root proxy)", &postRemoveExecuted,
+ commonChanCallback,
+ "POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
)
if removed := TestProxy_Root_LogicalDevice.Remove("/logical_devices/"+TestProxy_LogicalDeviceId, ""); removed == nil {
@@ -576,19 +606,20 @@
} else {
t.Logf("Removed device : %+v", removed)
}
+
+ if !verifyGotResponse(preRemoveExecuted) {
+ t.Error("PRE_REMOVE callback was not executed")
+ }
+ if !verifyGotResponse(postRemoveExecuted) {
+ t.Error("POST_REMOVE callback was not executed")
+ }
+
if d := TestProxy_Root_LogicalDevice.Get("/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
djson, _ := json.Marshal(d)
t.Errorf("Device was not removed - %s", djson)
} else {
t.Logf("Device was removed: %s", TestProxy_LogicalDeviceId)
}
-
- if !preRemoveExecuted {
- t.Error("PRE_REMOVE callback was not executed")
- }
- if !postRemoveExecuted {
- t.Error("POST_REMOVE callback was not executed")
- }
}
// -----------------------------
diff --git a/db/model/root.go b/db/model/root.go
index 7a29c0b..338ef67 100644
--- a/db/model/root.go
+++ b/db/model/root.go
@@ -181,7 +181,6 @@
r.Proxy.ParentNode.Latest(txid).Finalize(false)
}
-
// Update modifies the content of an object at a given path with the provided data
func (r *root) Update(path string, data interface{}, strict bool, txid string, makeBranch MakeBranchFunction) Revision {
var result Revision
@@ -310,6 +309,6 @@
}
type rootData struct {
- Latest string `json:latest`
- Tags map[string]string `json:tags`
-}
\ No newline at end of file
+ Latest string `json:"latest"`
+ Tags map[string]string `json:"tags"`
+}
diff --git a/db/model/transaction_test.go b/db/model/transaction_test.go
index b6e324e..bc53791 100644
--- a/db/model/transaction_test.go
+++ b/db/model/transaction_test.go
@@ -36,6 +36,7 @@
TestTransaction_Root = NewRoot(&voltha.Voltha{}, nil)
TestTransaction_RootProxy = TestTransaction_Root.node.CreateProxy("/", false)
}
+
//func TestTransaction_1_GetDevices(t *testing.T) {
// getTx := TestTransaction_RootProxy.OpenTransaction()
//
diff --git a/db/model/utils.go b/db/model/utils.go
index f0fd618..b28e92f 100644
--- a/db/model/utils.go
+++ b/db/model/utils.go
@@ -36,8 +36,8 @@
// FindOwnerType will traverse a data structure and find the parent type of the specified object
func FindOwnerType(obj reflect.Value, name string, depth int, found bool) reflect.Type {
- prefix := ""
- for d:=0; d< depth; d++ {
+ prefix := ""
+ for d := 0; d < depth; d++ {
prefix += ">>"
}
k := obj.Kind()
diff --git a/docker/Dockerfile.arouterTest b/docker/Dockerfile.arouterTest
index 8d0f956..ef743a5 100644
--- a/docker/Dockerfile.arouterTest
+++ b/docker/Dockerfile.arouterTest
@@ -19,8 +19,8 @@
RUN cp vendor/github.com/opencord/voltha-protos/go/voltha.pb /build/tests/suites/
# Build
-RUN cd afrouter && go build -o /build/afrouter
-RUN cd tests/afrouter && go build -o /build/afrouterTest
+RUN cd afrouter && go build --tags integration -o /build/afrouter
+RUN cd tests/afrouter && go build --tags integration -o /build/afrouterTest
# Run tests
RUN cd /build/tests/suites && /build/afrouterTest -config main.json -logLevel 1
diff --git a/ro_core/core/core.go b/ro_core/core/core.go
index 5825574..90aadae 100644
--- a/ro_core/core/core.go
+++ b/ro_core/core/core.go
@@ -21,8 +21,8 @@
"github.com/opencord/voltha-go/common/log"
"github.com/opencord/voltha-go/db/kvstore"
"github.com/opencord/voltha-go/db/model"
- "github.com/opencord/voltha-protos/go/voltha"
"github.com/opencord/voltha-go/ro_core/config"
+ "github.com/opencord/voltha-protos/go/voltha"
"google.golang.org/grpc"
)
diff --git a/ro_core/core/device_agent.go b/ro_core/core/device_agent.go
index ca70d48..dfaf767 100644
--- a/ro_core/core/device_agent.go
+++ b/ro_core/core/device_agent.go
@@ -202,4 +202,4 @@
return device.GetImages(), nil
}
return nil, status.Errorf(codes.NotFound, "device-%s", agent.deviceId)
-}
\ No newline at end of file
+}
diff --git a/ro_core/core/device_manager.go b/ro_core/core/device_manager.go
index 9e67b72..f3a1f6c 100644
--- a/ro_core/core/device_manager.go
+++ b/ro_core/core/device_manager.go
@@ -176,7 +176,7 @@
if agent := dMgr.getDeviceAgent(deviceId); agent != nil {
return agent, nil
}
- return nil, status.Error(codes.NotFound, deviceId) // This should nto happen
+ return nil, status.Error(codes.NotFound, deviceId) // This should nto happen
}
// loadRootDeviceParentAndChildren loads the children and parents of a root device in memory
@@ -393,4 +393,3 @@
}
return childDeviceIds, nil
}
-
diff --git a/ro_core/core/grpc_nbi_api_handler.go b/ro_core/core/grpc_nbi_api_handler.go
index 8338507..6d50e9e 100644
--- a/ro_core/core/grpc_nbi_api_handler.go
+++ b/ro_core/core/grpc_nbi_api_handler.go
@@ -266,7 +266,7 @@
//@TODO useless stub, what should this actually do?
func (handler *APIHandler) GetMeterStatsOfLogicalDevice(
- ctx context.Context,
+ ctx context.Context,
in *common.ID,
) (*openflow_13.MeterStatsReply, error) {
log.Debug("GetMeterStatsOfLogicalDevice-stub")
@@ -275,8 +275,8 @@
//@TODO useless stub, what should this actually do?
func (handler *APIHandler) GetMibDeviceData(
- ctx context.Context,
- in *common.ID,
+ ctx context.Context,
+ in *common.ID,
) (*omci.MibDeviceData, error) {
log.Debug("GetMibDeviceData-stub")
return nil, nil
@@ -299,4 +299,3 @@
log.Debug("UpdateLogicalDeviceMeterTable-stub")
return nil, nil
}
-
diff --git a/ro_core/core/logical_device_manager.go b/ro_core/core/logical_device_manager.go
index 846df63..05b494a 100644
--- a/ro_core/core/logical_device_manager.go
+++ b/ro_core/core/logical_device_manager.go
@@ -219,5 +219,5 @@
return port, nil
}
}
- return nil, status.Errorf(codes.NotFound, "%s-$s", lPortId.Id, lPortId.PortId)
+ return nil, status.Errorf(codes.NotFound, "%s-%s", lPortId.Id, lPortId.PortId)
}
diff --git a/ro_core/core/model_proxy_manager.go b/ro_core/core/model_proxy_manager.go
index 6db4bef..e766b71 100644
--- a/ro_core/core/model_proxy_manager.go
+++ b/ro_core/core/model_proxy_manager.go
@@ -38,7 +38,7 @@
)
// String equivalent for data path agents
-var commonTypes = []string {
+var commonTypes = []string{
"Adapters",
"AlarmFilters",
"CoreInstances",
diff --git a/ro_core/main.go b/ro_core/main.go
index f80c77e..97015d4 100644
--- a/ro_core/main.go
+++ b/ro_core/main.go
@@ -22,9 +22,9 @@
grpcserver "github.com/opencord/voltha-go/common/grpc"
"github.com/opencord/voltha-go/common/log"
"github.com/opencord/voltha-go/db/kvstore"
- ic "github.com/opencord/voltha-protos/go/inter_container"
"github.com/opencord/voltha-go/ro_core/config"
c "github.com/opencord/voltha-go/ro_core/core"
+ ic "github.com/opencord/voltha-protos/go/inter_container"
"os"
"os/signal"
"strconv"
diff --git a/rw_core/core/grpc_nbi_api_handler.go b/rw_core/core/grpc_nbi_api_handler.go
index 8ff3f04..0f97cd3 100755
--- a/rw_core/core/grpc_nbi_api_handler.go
+++ b/rw_core/core/grpc_nbi_api_handler.go
@@ -841,8 +841,8 @@
}
if handler.competeForTransaction() {
- if txn, err := handler.takeRequestOwnership(ctx, &utils.DeviceID{Id:in.Id}, handler.longRunningRequestTimeout); err != nil {
- failedresponse := &common.OperationResp{Code:voltha.OperationResp_OPERATION_FAILURE}
+ if txn, err := handler.takeRequestOwnership(ctx, &utils.DeviceID{Id: in.Id}, handler.longRunningRequestTimeout); err != nil {
+ failedresponse := &common.OperationResp{Code: voltha.OperationResp_OPERATION_FAILURE}
return failedresponse, err
} else {
defer txn.Close()
diff --git a/rw_core/core/logical_device_manager.go b/rw_core/core/logical_device_manager.go
index 0b08321..2191745 100644
--- a/rw_core/core/logical_device_manager.go
+++ b/rw_core/core/logical_device_manager.go
@@ -272,7 +272,7 @@
return port, nil
}
}
- return nil, status.Errorf(codes.NotFound, "%s-$s", lPortId.Id, lPortId.PortId)
+ return nil, status.Errorf(codes.NotFound, "%s-%s", lPortId.Id, lPortId.PortId)
}
// updateLogicalPort sets up a logical port on the logical device based on the device port
diff --git a/tests/afrouter/suites/test2/test2.go b/tests/afrouter/suites/test2/test2.go
index 9f80d37..258cb64 100644
--- a/tests/afrouter/suites/test2/test2.go
+++ b/tests/afrouter/suites/test2/test2.go
@@ -1,3 +1,5 @@
+// +build integration
+
/*
* Copyright 2018-present Open Networking Foundation
diff --git a/tests/afrouter/suites/test3/test3.go b/tests/afrouter/suites/test3/test3.go
index 4c546ba..2fb561c 100644
--- a/tests/afrouter/suites/test3/test3.go
+++ b/tests/afrouter/suites/test3/test3.go
@@ -1,3 +1,5 @@
+// +build integration
+
/*
* Copyright 2018-present Open Networking Foundation
diff --git a/tests/afrouter/tester.go b/tests/afrouter/tester.go
index f3ba2aa..94972ed 100644
--- a/tests/afrouter/tester.go
+++ b/tests/afrouter/tester.go
@@ -1,3 +1,5 @@
+// +build integration
+
/*
* Copyright 2018-present Open Networking Foundation
diff --git a/tests/afrouter/tester/main.go b/tests/afrouter/tester/main.go
index 8190fe6..1037de0 100644
--- a/tests/afrouter/tester/main.go
+++ b/tests/afrouter/tester/main.go
@@ -1,3 +1,5 @@
+// +build integration
+
/*
* Copyright 2018-present Open Networking Foundation
@@ -26,9 +28,9 @@
func main() {
// Setup logging
/*
- if _, err := log.SetDefaultLogger(log.JSON, 0, nil); err != nil {
- log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
- }
+ if _, err := log.SetDefaultLogger(log.JSON, 0, nil); err != nil {
+ log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
+ }
*/
defer log.CleanUp()
diff --git a/tests/core/api/grpc_nbi_api_handler_client_test.go b/tests/core/api/grpc_nbi_api_handler_client_test.go
index 47e4472..4eb5d1c 100644
--- a/tests/core/api/grpc_nbi_api_handler_client_test.go
+++ b/tests/core/api/grpc_nbi_api_handler_client_test.go
@@ -1,3 +1,5 @@
+// +build integration
+
/*
* Copyright 2018-present Open Networking Foundation
@@ -13,13 +15,14 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package api
import (
"context"
"fmt"
- com "github.com/opencord/voltha-go/adapters/common"
"github.com/golang/protobuf/ptypes/empty"
+ com "github.com/opencord/voltha-go/adapters/common"
"github.com/opencord/voltha-go/common/log"
"github.com/opencord/voltha-protos/go/common"
"github.com/opencord/voltha-protos/go/openflow_13"
@@ -121,7 +124,7 @@
func startKafka() {
fmt.Println("Starting Kafka and Etcd ...")
command := "docker-compose"
- cmd := exec.Command(command, "-f", "../../compose/docker-compose-zk-kafka-test.yml", "up", "-d")
+ cmd := exec.Command(command, "-f", "../../../compose/docker-compose-zk-kafka-test.yml", "up", "-d")
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
@@ -130,7 +133,7 @@
func startEtcd() {
fmt.Println("Starting Etcd ...")
command := "docker-compose"
- cmd := exec.Command(command, "-f", "../../compose/docker-compose-etcd.yml", "up", "-d")
+ cmd := exec.Command(command, "-f", "../../../compose/docker-compose-etcd.yml", "up", "-d")
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
@@ -139,7 +142,7 @@
func stopKafka() {
fmt.Println("Stopping Kafka and Etcd ...")
command := "docker-compose"
- cmd := exec.Command(command, "-f", "../../compose/docker-compose-zk-kafka-test.yml", "down")
+ cmd := exec.Command(command, "-f", "../../../compose/docker-compose-zk-kafka-test.yml", "down")
if err := cmd.Run(); err != nil {
// ignore error - as this is mostly due network being left behind as its being used by other
// containers
@@ -150,7 +153,7 @@
func stopEtcd() {
fmt.Println("Stopping Etcd ...")
command := "docker-compose"
- cmd := exec.Command(command, "-f", "../../compose/docker-compose-etcd.yml", "down")
+ cmd := exec.Command(command, "-f", "../../../compose/docker-compose-etcd.yml", "down")
if err := cmd.Run(); err != nil {
// ignore error - as this is mostly due network being left behind as its being used by other
// containers
@@ -161,7 +164,7 @@
func startCore() {
fmt.Println("Starting voltha core ...")
command := "docker-compose"
- cmd := exec.Command(command, "-f", "../../compose/rw_core.yml", "up", "-d")
+ cmd := exec.Command(command, "-f", "../../../compose/rw_core.yml", "up", "-d")
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
@@ -170,7 +173,7 @@
func stopCore() {
fmt.Println("Stopping voltha core ...")
command := "docker-compose"
- cmd := exec.Command(command, "-f", "../../compose/rw_core.yml", "down")
+ cmd := exec.Command(command, "-f", "../../../compose/rw_core.yml", "down")
if err := cmd.Run(); err != nil {
// ignore error - as this is mostly due network being left behind as its being used by other
// containers
@@ -181,7 +184,7 @@
func startSimulatedOLTAndONUAdapters() {
fmt.Println("Starting simulated OLT and ONU adapters ...")
command := "docker-compose"
- cmd := exec.Command(command, "-f", "../../compose/adapters-simulated.yml", "up", "-d")
+ cmd := exec.Command(command, "-f", "../../../compose/adapters-simulated.yml", "up", "-d")
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
@@ -190,7 +193,7 @@
func stopSimulatedOLTAndONUAdapters() {
fmt.Println("Stopping simulated OLT and ONU adapters ...")
command := "docker-compose"
- cmd := exec.Command(command, "-f", "../../compose/adapters-simulated.yml", "down")
+ cmd := exec.Command(command, "-f", "../../../compose/adapters-simulated.yml", "down")
if err := cmd.Run(); err != nil {
// ignore error - as this is mostly due network being left behind as its being used by other
// containers
@@ -198,7 +201,6 @@
}
}
-
func TestListDeviceIds(t *testing.T) {
fmt.Println("Testing list Devices Ids ...")
//0. Start kafka and Ectd
@@ -335,7 +337,7 @@
for i := 0; i < numberOfOLTDevices; i++ {
ctx := context.Background()
randomMacAddress := strings.ToUpper(com.GetRandomMacAddress())
- device := &voltha.Device{Type: "simulated_olt", MacAddress:randomMacAddress}
+ device := &voltha.Device{Type: "simulated_olt", MacAddress: randomMacAddress}
response, err := stub.CreateDevice(ctx, device)
log.Infow("response", log.Fields{"res": response, "error": err})
assert.Nil(t, err)
@@ -345,7 +347,7 @@
//4. Enable all the devices
for id, _ := range devices {
ctx := context.Background()
- response, err := stub.EnableDevice(ctx, &common.ID{Id:id})
+ response, err := stub.EnableDevice(ctx, &common.ID{Id: id})
log.Infow("response", log.Fields{"res": response, "error": err})
assert.Nil(t, err)
}
@@ -366,7 +368,7 @@
log.Infow("response", log.Fields{"res": response, "error": err})
assert.Nil(t, err)
assert.Equal(t, len(devices)*2, len(response.Items))
- for _, d := range (response.Items) {
+ for _, d := range response.Items {
devices[d.Id] = d
assert.Equal(t, d.AdminState, voltha.AdminState_ENABLED)
}
@@ -377,7 +379,7 @@
log.Infow("response", log.Fields{"res": response, "error": lerr})
assert.Nil(t, lerr)
assert.Equal(t, numberOfOLTDevices, len(lresponse.Items))
- for _, ld := range (lresponse.Items) {
+ for _, ld := range lresponse.Items {
logicalDevices[ld.Id] = ld
// Ensure each logical device have two ports
assert.Equal(t, 2, len(ld.Ports))
@@ -387,7 +389,7 @@
for id, d := range devices {
ctx := context.Background()
if d.Type == "simulated_onu" {
- response, err := stub.DisableDevice(ctx, &common.ID{Id:id})
+ response, err := stub.DisableDevice(ctx, &common.ID{Id: id})
log.Infow("response", log.Fields{"res": response, "error": err})
assert.Nil(t, err)
}
@@ -401,7 +403,7 @@
log.Infow("response", log.Fields{"res": response, "error": err})
assert.Nil(t, err)
assert.Equal(t, len(devices), len(response.Items))
- for _, d := range (response.Items) {
+ for _, d := range response.Items {
if d.Type == "simulated_onu" {
assert.Equal(t, d.AdminState, voltha.AdminState_DISABLED)
devices[d.Id] = d
@@ -415,7 +417,7 @@
log.Infow("response", log.Fields{"res": response, "error": lerr})
assert.Nil(t, lerr)
assert.Equal(t, numberOfOLTDevices, len(lresponse.Items))
- for _, ld := range (lresponse.Items) {
+ for _, ld := range lresponse.Items {
logicalDevices[ld.Id] = ld
// Ensure each logical device have one port - only olt port
assert.Equal(t, 1, len(ld.Ports))
@@ -425,7 +427,7 @@
for id, d := range devices {
ctx := context.Background()
if d.Type == "simulated_onu" {
- response, err := stub.EnableDevice(ctx, &common.ID{Id:id})
+ response, err := stub.EnableDevice(ctx, &common.ID{Id: id})
log.Infow("response", log.Fields{"res": response, "error": err})
assert.Nil(t, err)
}
@@ -439,7 +441,7 @@
log.Infow("response", log.Fields{"res": response, "error": err})
assert.Nil(t, err)
assert.Equal(t, len(devices), len(response.Items))
- for _, d := range (response.Items) {
+ for _, d := range response.Items {
assert.Equal(t, d.AdminState, voltha.AdminState_ENABLED)
devices[d.Id] = d
}
@@ -474,7 +476,6 @@
//stopEtcd()
}
-
func TestGetDevice(t *testing.T) {
var id common.ID
id.Id = "anyid"
diff --git a/tests/core/concurrency/core_concurrency_test.go b/tests/core/concurrency/core_concurrency_test.go
index 42f5de0..5fbda49 100644
--- a/tests/core/concurrency/core_concurrency_test.go
+++ b/tests/core/concurrency/core_concurrency_test.go
@@ -1,3 +1,5 @@
+// +build integration
+
/*
* Copyright 2018-present Open Networking Foundation
@@ -62,7 +64,7 @@
devices = make(map[string]*voltha.Device)
}
-func connectToCore(port int) (voltha.VolthaServiceClient, error) {
+func connectToCore(port int) (voltha.VolthaServiceClient, error) {
grpcHostIP := os.Getenv("DOCKER_HOST_IP")
grpcHost := fmt.Sprintf("%s:%d", grpcHostIP, port)
conn, err := grpc.Dial(grpcHost, grpc.WithInsecure())
@@ -192,8 +194,7 @@
}
}
-
-func sendCreateDeviceRequest(ctx context.Context, stub voltha.VolthaServiceClient, device *voltha.Device, ch chan interface{} ) {
+func sendCreateDeviceRequest(ctx context.Context, stub voltha.VolthaServiceClient, device *voltha.Device, ch chan interface{}) {
fmt.Println("Sending create device ...")
if response, err := stub.CreateDevice(ctx, device); err != nil {
ch <- err
@@ -202,9 +203,9 @@
}
}
-func sendEnableDeviceRequest(ctx context.Context, stub voltha.VolthaServiceClient, deviceId string, ch chan interface{} ) {
+func sendEnableDeviceRequest(ctx context.Context, stub voltha.VolthaServiceClient, deviceId string, ch chan interface{}) {
fmt.Println("Sending enable device ...")
- if response, err := stub.EnableDevice(ctx, &common.ID{Id:deviceId}); err != nil {
+ if response, err := stub.EnableDevice(ctx, &common.ID{Id: deviceId}); err != nil {
ch <- err
} else {
ch <- response
@@ -253,7 +254,7 @@
ui := uuid.New()
ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs(volthaSerialNumberKey, ui.String()))
randomMacAddress := strings.ToUpper(com.GetRandomMacAddress())
- device := &voltha.Device{Type: "simulated_olt", MacAddress:randomMacAddress}
+ device := &voltha.Device{Type: "simulated_olt", MacAddress: randomMacAddress}
ch := make(chan interface{})
defer close(ch)
requestNum := 0
@@ -296,7 +297,7 @@
requestNum := 0
for _, stub := range stubs {
go sendEnableDeviceRequest(ctx, stub, deviceId, ch)
- requestNum +=1
+ requestNum += 1
}
receivedResponse := 0
var err error
@@ -308,7 +309,7 @@
if !ok {
} else if er, ok := res.(error); ok {
err = er
- } else if _ , ok := res.(*empty.Empty); ok {
+ } else if _, ok := res.(*empty.Empty); ok {
validResponseReceived = true
}
if receivedResponse == requestNum {
@@ -324,19 +325,22 @@
return nil
}
-
func TestConcurrentRequests(t *testing.T) {
fmt.Println("Testing Concurrent requests ...")
////0. Start kafka and Ectd
- //startKafka()
- //startEtcd()
+ startKafka()
+ defer stopKafka()
+ startEtcd()
+ defer stopKafka()
//
////1. Start the core
- //startCores()
+ startCores()
+ defer stopCores()
//
////2. Start the simulated adapters
- //startSimulatedOLTAndONUAdapters()
+ startSimulatedOLTAndONUAdapters()
+ defer stopSimulatedOLTAndONUAdapters()
//
//// Wait until the core and adapters sync up
//time.Sleep(10 * time.Second)
@@ -364,7 +368,6 @@
//stopEtcd()
}
-
func shutdown() {
for _, conn := range conns {
conn.Close()
diff --git a/tests/kafka/kafka_client_test.go b/tests/kafka/kafka_client_test.go
index b4fc0cf..f6eadff 100644
--- a/tests/kafka/kafka_client_test.go
+++ b/tests/kafka/kafka_client_test.go
@@ -1,3 +1,5 @@
+// +build integration
+
/*
* Copyright 2018-present Open Networking Foundation
diff --git a/tests/kafka/kafka_inter_container_messaging_test.go b/tests/kafka/kafka_inter_container_messaging_test.go
index d6a8e8b..6a6dbc2 100644
--- a/tests/kafka/kafka_inter_container_messaging_test.go
+++ b/tests/kafka/kafka_inter_container_messaging_test.go
@@ -1,3 +1,5 @@
+// +build integration
+
/*
* Copyright 2018-present Open Networking Foundation
@@ -21,9 +23,9 @@
"github.com/google/uuid"
"github.com/opencord/voltha-go/common/log"
kk "github.com/opencord/voltha-go/kafka"
+ rhp "github.com/opencord/voltha-go/rw_core/core"
ic "github.com/opencord/voltha-protos/go/inter_container"
"github.com/opencord/voltha-protos/go/voltha"
- rhp "github.com/opencord/voltha-go/rw_core/core"
"github.com/stretchr/testify/assert"
"os"
"testing"
@@ -34,6 +36,10 @@
Prerequite: Start the kafka/zookeeper containers.
*/
+const (
+ TEST_RPC_KEY = ""
+)
+
var coreKafkaProxy *kk.InterContainerProxy
var adapterKafkaProxy *kk.InterContainerProxy
var kafkaPartitionClient kk.Client
@@ -170,7 +176,7 @@
rpc := "IncorrectAPI"
topic := kk.Topic{Name: "Core"}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, false)
@@ -193,7 +199,7 @@
rpc := "GetDevice"
topic := kk.Topic{Name: "Core"}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, false)
@@ -220,7 +226,7 @@
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(ctx, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(ctx, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, true)
@@ -245,7 +251,7 @@
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(ctx, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(ctx, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, false)
@@ -268,7 +274,7 @@
topic := kk.Topic{Name: "Core"}
expectedResponse := &voltha.Device{Id: trnsId}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, true)
@@ -291,7 +297,7 @@
topic := kk.Topic{Name: "Core"}
expectedResponse := &voltha.Device{Id: trnsId}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, true)
@@ -318,7 +324,7 @@
rpc := "GetPorts"
topic := kk.Topic{Name: "Core"}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, true)
@@ -341,7 +347,7 @@
rpc := "GetPorts"
topic := kk.Topic{Name: "Core"}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, false)
@@ -385,7 +391,7 @@
rpc := "ChildDeviceDetected"
topic := kk.Topic{Name: "Core"}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, true)
@@ -424,7 +430,7 @@
rpc := "ChildDeviceDetected"
topic := kk.Topic{Name: "Core"}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, false, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, false, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, true)
@@ -453,7 +459,7 @@
rpc := "ChildDeviceDetected"
topic := kk.Topic{Name: "Core"}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, false)
@@ -487,7 +493,7 @@
rpc := "DeviceStateUpdate"
topic := kk.Topic{Name: "Core"}
start := time.Now()
- status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, args...)
+ status, result := adapterKafkaProxy.InvokeRPC(nil, rpc, &topic, &topic, true, TEST_RPC_KEY, args...)
elapsed := time.Since(start)
log.Infow("Result", log.Fields{"status": status, "result": result, "time": elapsed})
assert.Equal(t, status, true)