Scott Baker | 2c1c482 | 2019-10-16 11:02:41 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019-present Open Networking Foundation |
| 3 | |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | package techprofile |
| 18 | |
| 19 | import ( |
| 20 | "encoding/json" |
| 21 | "errors" |
| 22 | "fmt" |
| 23 | "strconv" |
| 24 | |
Scott Baker | 2c1c482 | 2019-10-16 11:02:41 -0700 | [diff] [blame] | 25 | "github.com/opencord/voltha-lib-go/pkg/db/kvstore" |
| 26 | "github.com/opencord/voltha-lib-go/pkg/db/model" |
Scott Baker | e73f91e | 2019-10-17 12:58:11 -0700 | [diff] [blame] | 27 | "github.com/opencord/voltha-lib-go/pkg/log" |
Scott Baker | 2c1c482 | 2019-10-16 11:02:41 -0700 | [diff] [blame] | 28 | tp_pb "github.com/opencord/voltha-protos/go/tech_profile" |
| 29 | ) |
| 30 | |
| 31 | // Interface to pon resource manager APIs |
| 32 | type iPonResourceMgr interface { |
| 33 | GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) |
| 34 | GetResourceTypeAllocID() string |
| 35 | GetResourceTypeGemPortID() string |
| 36 | GetTechnology() string |
| 37 | } |
| 38 | |
| 39 | type Direction int32 |
| 40 | |
| 41 | const ( |
| 42 | Direction_UPSTREAM Direction = 0 |
| 43 | Direction_DOWNSTREAM Direction = 1 |
| 44 | Direction_BIDIRECTIONAL Direction = 2 |
| 45 | ) |
| 46 | |
| 47 | var Direction_name = map[Direction]string{ |
| 48 | 0: "UPSTREAM", |
| 49 | 1: "DOWNSTREAM", |
| 50 | 2: "BIDIRECTIONAL", |
| 51 | } |
| 52 | |
| 53 | type SchedulingPolicy int32 |
| 54 | |
| 55 | const ( |
| 56 | SchedulingPolicy_WRR SchedulingPolicy = 0 |
| 57 | SchedulingPolicy_StrictPriority SchedulingPolicy = 1 |
| 58 | SchedulingPolicy_Hybrid SchedulingPolicy = 2 |
| 59 | ) |
| 60 | |
| 61 | var SchedulingPolicy_name = map[SchedulingPolicy]string{ |
| 62 | 0: "WRR", |
| 63 | 1: "StrictPriority", |
| 64 | 2: "Hybrid", |
| 65 | } |
| 66 | |
| 67 | type AdditionalBW int32 |
| 68 | |
| 69 | const ( |
| 70 | AdditionalBW_AdditionalBW_None AdditionalBW = 0 |
| 71 | AdditionalBW_AdditionalBW_NA AdditionalBW = 1 |
| 72 | AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2 |
| 73 | AdditionalBW_AdditionalBW_Auto AdditionalBW = 3 |
| 74 | ) |
| 75 | |
| 76 | var AdditionalBW_name = map[AdditionalBW]string{ |
| 77 | 0: "AdditionalBW_None", |
| 78 | 1: "AdditionalBW_NA", |
| 79 | 2: "AdditionalBW_BestEffort", |
| 80 | 3: "AdditionalBW_Auto", |
| 81 | } |
| 82 | |
| 83 | type DiscardPolicy int32 |
| 84 | |
| 85 | const ( |
| 86 | DiscardPolicy_TailDrop DiscardPolicy = 0 |
| 87 | DiscardPolicy_WTailDrop DiscardPolicy = 1 |
| 88 | DiscardPolicy_Red DiscardPolicy = 2 |
| 89 | DiscardPolicy_WRed DiscardPolicy = 3 |
| 90 | ) |
| 91 | |
| 92 | var DiscardPolicy_name = map[DiscardPolicy]string{ |
| 93 | 0: "TailDrop", |
| 94 | 1: "WTailDrop", |
| 95 | 2: "Red", |
| 96 | 3: "WRed", |
| 97 | } |
| 98 | |
| 99 | /* |
| 100 | type InferredAdditionBWIndication int32 |
| 101 | |
| 102 | const ( |
| 103 | InferredAdditionBWIndication_InferredAdditionBWIndication_None InferredAdditionBWIndication = 0 |
| 104 | InferredAdditionBWIndication_InferredAdditionBWIndication_Assured InferredAdditionBWIndication = 1 |
| 105 | InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2 |
| 106 | ) |
| 107 | |
| 108 | var InferredAdditionBWIndication_name = map[int32]string{ |
| 109 | 0: "InferredAdditionBWIndication_None", |
| 110 | 1: "InferredAdditionBWIndication_Assured", |
| 111 | 2: "InferredAdditionBWIndication_BestEffort", |
| 112 | } |
| 113 | */ |
| 114 | // instance control defaults |
| 115 | const ( |
| 116 | defaultOnuInstance = "multi-instance" |
| 117 | defaultUniInstance = "single-instance" |
| 118 | defaultNumGemPorts = 1 |
| 119 | defaultGemPayloadSize = "auto" |
| 120 | ) |
| 121 | |
| 122 | const MAX_GEM_PAYLOAD = "max_gem_payload_size" |
| 123 | |
| 124 | type InstanceControl struct { |
| 125 | Onu string `json:"ONU"` |
| 126 | Uni string `json:"uni"` |
| 127 | MaxGemPayloadSize string `json:"max_gem_payload_size"` |
| 128 | } |
| 129 | |
| 130 | // default discard config constants |
| 131 | const ( |
| 132 | defaultMinThreshold = 0 |
| 133 | defaultMaxThreshold = 0 |
| 134 | defaultMaxProbability = 0 |
| 135 | ) |
| 136 | |
| 137 | type DiscardConfig struct { |
| 138 | MinThreshold int `json:"min_threshold"` |
| 139 | MaxThreshold int `json:"max_threshold"` |
| 140 | MaxProbability int `json:"max_probability"` |
| 141 | } |
| 142 | |
| 143 | // default scheduler contants |
| 144 | const ( |
| 145 | defaultAdditionalBw = AdditionalBW_AdditionalBW_BestEffort |
| 146 | defaultPriority = 0 |
| 147 | defaultWeight = 0 |
| 148 | defaultQueueSchedPolicy = SchedulingPolicy_Hybrid |
| 149 | ) |
| 150 | |
| 151 | type Scheduler struct { |
| 152 | Direction string `json:"direction"` |
| 153 | AdditionalBw string `json:"additional_bw"` |
| 154 | Priority uint32 `json:"priority"` |
| 155 | Weight uint32 `json:"weight"` |
| 156 | QSchedPolicy string `json:"q_sched_policy"` |
| 157 | } |
| 158 | |
| 159 | // default GEM attribute constants |
| 160 | const ( |
| 161 | defaultAESEncryption = "True" |
| 162 | defaultPriorityQueue = 0 |
| 163 | defaultQueueWeight = 0 |
| 164 | defaultMaxQueueSize = "auto" |
| 165 | defaultdropPolicy = DiscardPolicy_TailDrop |
| 166 | defaultSchedulePolicy = SchedulingPolicy_WRR |
| 167 | ) |
| 168 | |
| 169 | type GemPortAttribute struct { |
| 170 | MaxQueueSize string `json:"max_q_size"` |
| 171 | PbitMap string `json:"pbit_map"` |
| 172 | AesEncryption string `json:"aes_encryption"` |
| 173 | SchedulingPolicy string `json:"scheduling_policy"` |
| 174 | PriorityQueue uint32 `json:"priority_q"` |
| 175 | Weight uint32 `json:"weight"` |
| 176 | DiscardPolicy string `json:"discard_policy"` |
| 177 | DiscardConfig DiscardConfig `json:"discard_config"` |
| 178 | } |
| 179 | |
| 180 | type iScheduler struct { |
| 181 | AllocID uint32 `json:"alloc_id"` |
| 182 | Direction string `json:"direction"` |
| 183 | AdditionalBw string `json:"additional_bw"` |
| 184 | Priority uint32 `json:"priority"` |
| 185 | Weight uint32 `json:"weight"` |
| 186 | QSchedPolicy string `json:"q_sched_policy"` |
| 187 | } |
| 188 | type iGemPortAttribute struct { |
| 189 | GemportID uint32 `json:"gemport_id"` |
| 190 | MaxQueueSize string `json:"max_q_size"` |
| 191 | PbitMap string `json:"pbit_map"` |
| 192 | AesEncryption string `json:"aes_encryption"` |
| 193 | SchedulingPolicy string `json:"scheduling_policy"` |
| 194 | PriorityQueue uint32 `json:"priority_q"` |
| 195 | Weight uint32 `json:"weight"` |
| 196 | DiscardPolicy string `json:"discard_policy"` |
| 197 | DiscardConfig DiscardConfig `json:"discard_config"` |
| 198 | } |
| 199 | |
| 200 | type TechProfileMgr struct { |
| 201 | config *TechProfileFlags |
| 202 | resourceMgr iPonResourceMgr |
| 203 | } |
| 204 | type DefaultTechProfile struct { |
| 205 | Name string `json:"name"` |
| 206 | ProfileType string `json:"profile_type"` |
| 207 | Version int `json:"version"` |
| 208 | NumGemPorts uint32 `json:"num_gem_ports"` |
| 209 | InstanceCtrl InstanceControl `json:"instance_control"` |
| 210 | UsScheduler Scheduler `json:"us_scheduler"` |
| 211 | DsScheduler Scheduler `json:"ds_scheduler"` |
| 212 | UpstreamGemPortAttributeList []GemPortAttribute `json:"upstream_gem_port_attribute_list"` |
| 213 | DownstreamGemPortAttributeList []GemPortAttribute `json:"downstream_gem_port_attribute_list"` |
| 214 | } |
| 215 | type TechProfile struct { |
| 216 | Name string `json:"name"` |
| 217 | SubscriberIdentifier string `json:"subscriber_identifier"` |
| 218 | ProfileType string `json:"profile_type"` |
| 219 | Version int `json:"version"` |
| 220 | NumGemPorts uint32 `json:"num_gem_ports"` |
| 221 | NumTconts uint32 `json:"num_of_tconts"` |
| 222 | InstanceCtrl InstanceControl `json:"instance_control"` |
| 223 | UsScheduler iScheduler `json:"us_scheduler"` |
| 224 | DsScheduler iScheduler `json:"ds_scheduler"` |
| 225 | UpstreamGemPortAttributeList []iGemPortAttribute `json:"upstream_gem_port_attribute_list"` |
| 226 | DownstreamGemPortAttributeList []iGemPortAttribute `json:"downstream_gem_port_attribute_list"` |
| 227 | } |
| 228 | |
| 229 | func (t *TechProfileMgr) SetKVClient() *model.Backend { |
| 230 | addr := t.config.KVStoreHost + ":" + strconv.Itoa(t.config.KVStorePort) |
| 231 | kvClient, err := newKVClient(t.config.KVStoreType, addr, t.config.KVStoreTimeout) |
| 232 | if err != nil { |
| 233 | log.Errorw("failed-to-create-kv-client", |
| 234 | log.Fields{ |
| 235 | "type": t.config.KVStoreType, "host": t.config.KVStoreHost, "port": t.config.KVStorePort, |
| 236 | "timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix, |
| 237 | "error": err.Error(), |
| 238 | }) |
| 239 | return nil |
| 240 | } |
| 241 | return &model.Backend{ |
| 242 | Client: kvClient, |
| 243 | StoreType: t.config.KVStoreType, |
| 244 | Host: t.config.KVStoreHost, |
| 245 | Port: t.config.KVStorePort, |
| 246 | Timeout: t.config.KVStoreTimeout, |
| 247 | PathPrefix: t.config.TPKVPathPrefix} |
| 248 | |
| 249 | /* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some |
| 250 | issue between kv store and backend , core is not calling NewBackend directly |
| 251 | kv := model.NewBackend(t.config.KVStoreType, t.config.KVStoreHost, t.config.KVStorePort, |
| 252 | t.config.KVStoreTimeout, kvStoreTechProfilePathPrefix) |
| 253 | */ |
| 254 | } |
| 255 | |
| 256 | func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) { |
| 257 | |
| 258 | log.Infow("kv-store", log.Fields{"storeType": storeType, "address": address}) |
| 259 | switch storeType { |
| 260 | case "consul": |
| 261 | return kvstore.NewConsulClient(address, timeout) |
| 262 | case "etcd": |
| 263 | return kvstore.NewEtcdClient(address, timeout) |
| 264 | } |
| 265 | return nil, errors.New("unsupported-kv-store") |
| 266 | } |
| 267 | |
| 268 | func NewTechProfile(resourceMgr iPonResourceMgr, KVStoreType string, KVStoreHost string, KVStorePort int) (*TechProfileMgr, error) { |
| 269 | var techprofileObj TechProfileMgr |
| 270 | log.Debug("Initializing techprofile Manager") |
| 271 | techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreHost, KVStorePort) |
| 272 | techprofileObj.config.KVBackend = techprofileObj.SetKVClient() |
| 273 | if techprofileObj.config.KVBackend == nil { |
| 274 | log.Error("Failed to initialize KV backend\n") |
| 275 | return nil, errors.New("KV backend init failed") |
| 276 | } |
| 277 | techprofileObj.resourceMgr = resourceMgr |
| 278 | log.Debug("Initializing techprofile object instance success") |
| 279 | return &techprofileObj, nil |
| 280 | } |
| 281 | |
| 282 | func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string { |
| 283 | return fmt.Sprintf(t.config.TPInstanceKVPath, t.resourceMgr.GetTechnology(), techProfiletblID, uniPortName) |
| 284 | } |
| 285 | |
| 286 | func (t *TechProfileMgr) GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error) { |
| 287 | var KvTpIns TechProfile |
| 288 | var resPtr *TechProfile = &KvTpIns |
| 289 | var err error |
| 290 | /*path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)*/ |
| 291 | log.Infow("Getting tech profile instance from KV store", log.Fields{"path": path}) |
| 292 | kvresult, err := t.config.KVBackend.Get(path) |
| 293 | if err != nil { |
| 294 | log.Errorw("Error while fetching tech-profile instance from KV backend", log.Fields{"key": path}) |
| 295 | return nil, err |
| 296 | } |
| 297 | if kvresult == nil { |
| 298 | log.Infow("Tech profile does not exist in KV store", log.Fields{"key": path}) |
| 299 | resPtr = nil |
| 300 | } else { |
| 301 | if value, err := kvstore.ToByte(kvresult.Value); err == nil { |
| 302 | if err = json.Unmarshal(value, resPtr); err != nil { |
| 303 | log.Errorw("Error while unmarshal KV result", log.Fields{"key": path, "value": value}) |
| 304 | } |
| 305 | } |
| 306 | } |
| 307 | return resPtr, err |
| 308 | } |
| 309 | |
| 310 | func (t *TechProfileMgr) addTechProfInstanceToKVStore(techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error { |
| 311 | path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName) |
| 312 | log.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance}) |
| 313 | tpInstanceJson, err := json.Marshal(*tpInstance) |
| 314 | if err == nil { |
| 315 | // Backend will convert JSON byte array into string format |
| 316 | log.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson}) |
| 317 | err = t.config.KVBackend.Put(path, tpInstanceJson) |
| 318 | } else { |
| 319 | log.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance}) |
| 320 | } |
| 321 | return err |
| 322 | } |
| 323 | func (t *TechProfileMgr) getTPFromKVStore(techProfiletblID uint32) *DefaultTechProfile { |
| 324 | var kvtechprofile DefaultTechProfile |
| 325 | key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID) |
| 326 | log.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key}) |
| 327 | kvresult, err := t.config.KVBackend.Get(key) |
| 328 | if err != nil { |
| 329 | log.Errorw("Error while fetching value from KV store", log.Fields{"key": key}) |
| 330 | return nil |
| 331 | } |
| 332 | if kvresult != nil { |
| 333 | /* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/ |
| 334 | if value, err := kvstore.ToByte(kvresult.Value); err == nil { |
| 335 | if err = json.Unmarshal(value, &kvtechprofile); err == nil { |
| 336 | log.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile}) |
| 337 | return &kvtechprofile |
| 338 | } |
| 339 | } |
| 340 | } |
| 341 | return nil |
| 342 | } |
| 343 | func (t *TechProfileMgr) CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) *TechProfile { |
| 344 | var tpInstance *TechProfile |
| 345 | log.Infow("Creating tech profile instance ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId}) |
| 346 | tp := t.getTPFromKVStore(techProfiletblID) |
| 347 | if tp != nil { |
| 348 | log.Infow("Creating tech profile instance with profile from KV store", log.Fields{"tpid": techProfiletblID}) |
| 349 | } else { |
| 350 | tp = t.getDefaultTechProfile() |
| 351 | log.Infow("Creating tech profile instance with default values", log.Fields{"tpid": techProfiletblID}) |
| 352 | } |
| 353 | tpInstance = t.allocateTPInstance(uniPortName, tp, intfId, t.config.DefaultNumTconts) |
| 354 | if err := t.addTechProfInstanceToKVStore(techProfiletblID, uniPortName, tpInstance); err != nil { |
| 355 | log.Errorw("Error in adding tech profile instance to KV ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName}) |
| 356 | return nil |
| 357 | } |
| 358 | log.Infow("Added tech profile instance to KV store successfully ", |
| 359 | log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId}) |
| 360 | return tpInstance |
| 361 | } |
| 362 | |
| 363 | func (t *TechProfileMgr) DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error { |
| 364 | path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName) |
| 365 | return t.config.KVBackend.Delete(path) |
| 366 | } |
| 367 | |
| 368 | func (t *TechProfileMgr) allocateTPInstance(uniPortName string, tp *DefaultTechProfile, intfId uint32, numOfTconts uint32) *TechProfile { |
| 369 | |
| 370 | var usGemPortAttributeList []iGemPortAttribute |
| 371 | var dsGemPortAttributeList []iGemPortAttribute |
| 372 | var tcontIDs []uint32 |
| 373 | var gemPorts []uint32 |
| 374 | var err error |
| 375 | |
| 376 | log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numOfTconts": numOfTconts, "numGem": tp.NumGemPorts}) |
| 377 | if numOfTconts > 1 { |
| 378 | log.Errorw("Multiple Tconts not supported currently", log.Fields{"uniPortName": uniPortName, "intfId": intfId}) |
| 379 | return nil |
| 380 | } |
| 381 | if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), numOfTconts); err != nil { |
| 382 | log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts}) |
| 383 | return nil |
| 384 | } |
| 385 | log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts}) |
| 386 | if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil { |
| 387 | log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts}) |
| 388 | return nil |
| 389 | } |
| 390 | log.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts}) |
| 391 | for index := 0; index < int(tp.NumGemPorts); index++ { |
| 392 | usGemPortAttributeList = append(usGemPortAttributeList, |
| 393 | iGemPortAttribute{GemportID: gemPorts[index], |
| 394 | MaxQueueSize: tp.UpstreamGemPortAttributeList[index].MaxQueueSize, |
| 395 | PbitMap: tp.UpstreamGemPortAttributeList[index].PbitMap, |
| 396 | AesEncryption: tp.UpstreamGemPortAttributeList[index].AesEncryption, |
| 397 | SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy, |
| 398 | PriorityQueue: tp.UpstreamGemPortAttributeList[index].PriorityQueue, |
| 399 | Weight: tp.UpstreamGemPortAttributeList[index].Weight, |
| 400 | DiscardPolicy: tp.UpstreamGemPortAttributeList[index].DiscardPolicy, |
| 401 | DiscardConfig: tp.UpstreamGemPortAttributeList[index].DiscardConfig}) |
| 402 | dsGemPortAttributeList = append(dsGemPortAttributeList, |
| 403 | iGemPortAttribute{GemportID: gemPorts[index], |
| 404 | MaxQueueSize: tp.DownstreamGemPortAttributeList[index].MaxQueueSize, |
| 405 | PbitMap: tp.DownstreamGemPortAttributeList[index].PbitMap, |
| 406 | AesEncryption: tp.DownstreamGemPortAttributeList[index].AesEncryption, |
| 407 | SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy, |
| 408 | PriorityQueue: tp.DownstreamGemPortAttributeList[index].PriorityQueue, |
| 409 | Weight: tp.DownstreamGemPortAttributeList[index].Weight, |
| 410 | DiscardPolicy: tp.DownstreamGemPortAttributeList[index].DiscardPolicy, |
| 411 | DiscardConfig: tp.DownstreamGemPortAttributeList[index].DiscardConfig}) |
| 412 | } |
| 413 | return &TechProfile{ |
| 414 | SubscriberIdentifier: uniPortName, |
| 415 | Name: tp.Name, |
| 416 | ProfileType: tp.ProfileType, |
| 417 | Version: tp.Version, |
| 418 | NumGemPorts: tp.NumGemPorts, |
| 419 | NumTconts: numOfTconts, |
| 420 | InstanceCtrl: tp.InstanceCtrl, |
| 421 | UsScheduler: iScheduler{ |
| 422 | AllocID: tcontIDs[0], |
| 423 | Direction: tp.UsScheduler.Direction, |
| 424 | AdditionalBw: tp.UsScheduler.AdditionalBw, |
| 425 | Priority: tp.UsScheduler.Priority, |
| 426 | Weight: tp.UsScheduler.Weight, |
| 427 | QSchedPolicy: tp.UsScheduler.QSchedPolicy}, |
| 428 | DsScheduler: iScheduler{ |
| 429 | AllocID: tcontIDs[0], |
| 430 | Direction: tp.DsScheduler.Direction, |
| 431 | AdditionalBw: tp.DsScheduler.AdditionalBw, |
| 432 | Priority: tp.DsScheduler.Priority, |
| 433 | Weight: tp.DsScheduler.Weight, |
| 434 | QSchedPolicy: tp.DsScheduler.QSchedPolicy}, |
| 435 | UpstreamGemPortAttributeList: usGemPortAttributeList, |
| 436 | DownstreamGemPortAttributeList: dsGemPortAttributeList} |
| 437 | } |
| 438 | |
| 439 | func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile { |
| 440 | |
| 441 | var usGemPortAttributeList []GemPortAttribute |
| 442 | var dsGemPortAttributeList []GemPortAttribute |
| 443 | |
| 444 | for _, pbit := range t.config.DefaultPbits { |
| 445 | log.Debugw("Creating GEM port", log.Fields{"pbit": pbit}) |
| 446 | usGemPortAttributeList = append(usGemPortAttributeList, |
| 447 | GemPortAttribute{ |
| 448 | MaxQueueSize: defaultMaxQueueSize, |
| 449 | PbitMap: pbit, |
| 450 | AesEncryption: defaultAESEncryption, |
| 451 | SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy], |
| 452 | PriorityQueue: defaultPriorityQueue, |
| 453 | Weight: defaultQueueWeight, |
| 454 | DiscardPolicy: DiscardPolicy_name[defaultdropPolicy], |
| 455 | DiscardConfig: DiscardConfig{ |
| 456 | MinThreshold: defaultMinThreshold, |
| 457 | MaxThreshold: defaultMaxThreshold, |
| 458 | MaxProbability: defaultMaxProbability}}) |
| 459 | dsGemPortAttributeList = append(dsGemPortAttributeList, |
| 460 | GemPortAttribute{ |
| 461 | MaxQueueSize: defaultMaxQueueSize, |
| 462 | PbitMap: pbit, |
| 463 | AesEncryption: defaultAESEncryption, |
| 464 | SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy], |
| 465 | PriorityQueue: defaultPriorityQueue, |
| 466 | Weight: defaultQueueWeight, |
| 467 | DiscardPolicy: DiscardPolicy_name[defaultdropPolicy], |
| 468 | DiscardConfig: DiscardConfig{ |
| 469 | MinThreshold: defaultMinThreshold, |
| 470 | MaxThreshold: defaultMaxThreshold, |
| 471 | MaxProbability: defaultMaxProbability}}) |
| 472 | } |
| 473 | return &DefaultTechProfile{ |
| 474 | Name: t.config.DefaultTPName, |
| 475 | ProfileType: t.resourceMgr.GetTechnology(), |
| 476 | Version: t.config.TPVersion, |
| 477 | NumGemPorts: uint32(len(usGemPortAttributeList)), |
| 478 | InstanceCtrl: InstanceControl{ |
| 479 | Onu: defaultOnuInstance, |
| 480 | Uni: defaultUniInstance, |
| 481 | MaxGemPayloadSize: defaultGemPayloadSize}, |
| 482 | UsScheduler: Scheduler{ |
| 483 | Direction: Direction_name[Direction_UPSTREAM], |
| 484 | AdditionalBw: AdditionalBW_name[defaultAdditionalBw], |
| 485 | Priority: defaultPriority, |
| 486 | Weight: defaultWeight, |
| 487 | QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]}, |
| 488 | DsScheduler: Scheduler{ |
| 489 | Direction: Direction_name[Direction_DOWNSTREAM], |
| 490 | AdditionalBw: AdditionalBW_name[defaultAdditionalBw], |
| 491 | Priority: defaultPriority, |
| 492 | Weight: defaultWeight, |
| 493 | QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]}, |
| 494 | UpstreamGemPortAttributeList: usGemPortAttributeList, |
| 495 | DownstreamGemPortAttributeList: dsGemPortAttributeList} |
| 496 | } |
| 497 | |
| 498 | func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 { |
| 499 | var result int32 = -1 |
| 500 | |
| 501 | if paramType == "direction" { |
| 502 | for key, val := range tp_pb.Direction_value { |
| 503 | if key == paramKey { |
| 504 | result = val |
| 505 | } |
| 506 | } |
| 507 | } else if paramType == "discard_policy" { |
| 508 | for key, val := range tp_pb.DiscardPolicy_value { |
| 509 | if key == paramKey { |
| 510 | result = val |
| 511 | } |
| 512 | } |
| 513 | } else if paramType == "sched_policy" { |
| 514 | for key, val := range tp_pb.SchedulingPolicy_value { |
| 515 | if key == paramKey { |
| 516 | log.Debugw("Got value in proto", log.Fields{"key": key, "value": val}) |
| 517 | result = val |
| 518 | } |
| 519 | } |
| 520 | } else if paramType == "additional_bw" { |
| 521 | for key, val := range tp_pb.AdditionalBW_value { |
| 522 | if key == paramKey { |
| 523 | result = val |
| 524 | } |
| 525 | } |
| 526 | } else { |
| 527 | log.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey}) |
| 528 | return -1 |
| 529 | } |
| 530 | log.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result}) |
| 531 | return result |
| 532 | } |
| 533 | |
| 534 | func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig { |
| 535 | dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction)) |
| 536 | if dir == -1 { |
| 537 | log.Fatal("Error in getting Proto for direction for upstream scheduler") |
| 538 | return nil |
| 539 | } |
| 540 | bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw)) |
| 541 | if bw == -1 { |
| 542 | log.Fatal("Error in getting Proto for bandwidth for upstream scheduler") |
| 543 | return nil |
| 544 | } |
| 545 | policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy)) |
| 546 | if policy == -1 { |
| 547 | log.Fatal("Error in getting Proto for scheduling policy for upstream scheduler") |
| 548 | return nil |
| 549 | } |
| 550 | return &tp_pb.SchedulerConfig{ |
| 551 | Direction: dir, |
| 552 | AdditionalBw: bw, |
| 553 | Priority: tpInstance.UsScheduler.Priority, |
| 554 | Weight: tpInstance.UsScheduler.Weight, |
| 555 | SchedPolicy: policy} |
| 556 | } |
| 557 | |
| 558 | func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig { |
| 559 | |
| 560 | dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction)) |
| 561 | if dir == -1 { |
| 562 | log.Fatal("Error in getting Proto for direction for downstream scheduler") |
| 563 | return nil |
| 564 | } |
| 565 | bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw)) |
| 566 | if bw == -1 { |
| 567 | log.Fatal("Error in getting Proto for bandwidth for downstream scheduler") |
| 568 | return nil |
| 569 | } |
| 570 | policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy)) |
| 571 | if policy == -1 { |
| 572 | log.Fatal("Error in getting Proto for scheduling policy for downstream scheduler") |
| 573 | return nil |
| 574 | } |
| 575 | |
| 576 | return &tp_pb.SchedulerConfig{ |
| 577 | Direction: dir, |
| 578 | AdditionalBw: bw, |
| 579 | Priority: tpInstance.DsScheduler.Priority, |
| 580 | Weight: tpInstance.DsScheduler.Weight, |
| 581 | SchedPolicy: policy} |
| 582 | } |
| 583 | |
| 584 | func (t *TechProfileMgr) GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig, |
| 585 | ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler { |
| 586 | |
| 587 | tSched := &tp_pb.TrafficScheduler{ |
| 588 | Direction: SchedCfg.Direction, |
| 589 | AllocId: tpInstance.UsScheduler.AllocID, |
| 590 | TrafficShapingInfo: ShapingCfg, |
| 591 | Scheduler: SchedCfg} |
| 592 | |
| 593 | return tSched |
| 594 | } |
| 595 | |
| 596 | func (tpm *TechProfileMgr) GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) []*tp_pb.TrafficQueue { |
| 597 | |
| 598 | var encryp bool |
| 599 | if Dir == tp_pb.Direction_UPSTREAM { |
| 600 | // upstream GEM ports |
| 601 | NumGemPorts := len(tp.UpstreamGemPortAttributeList) |
| 602 | GemPorts := make([]*tp_pb.TrafficQueue, 0) |
| 603 | for Count := 0; Count < NumGemPorts; Count++ { |
| 604 | if tp.UpstreamGemPortAttributeList[Count].AesEncryption == "True" { |
| 605 | encryp = true |
| 606 | } else { |
| 607 | encryp = false |
| 608 | } |
| 609 | GemPorts = append(GemPorts, &tp_pb.TrafficQueue{ |
| 610 | Direction: tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.UsScheduler.Direction)), |
| 611 | GemportId: tp.UpstreamGemPortAttributeList[Count].GemportID, |
| 612 | PbitMap: tp.UpstreamGemPortAttributeList[Count].PbitMap, |
| 613 | AesEncryption: encryp, |
| 614 | SchedPolicy: tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)), |
| 615 | Priority: tp.UpstreamGemPortAttributeList[Count].PriorityQueue, |
| 616 | Weight: tp.UpstreamGemPortAttributeList[Count].Weight, |
| 617 | DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)), |
| 618 | }) |
| 619 | } |
| 620 | log.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts}) |
| 621 | return GemPorts |
| 622 | } else if Dir == tp_pb.Direction_DOWNSTREAM { |
| 623 | //downstream GEM ports |
| 624 | NumGemPorts := len(tp.DownstreamGemPortAttributeList) |
| 625 | GemPorts := make([]*tp_pb.TrafficQueue, 0) |
| 626 | for Count := 0; Count < NumGemPorts; Count++ { |
| 627 | if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" { |
| 628 | encryp = true |
| 629 | } else { |
| 630 | encryp = false |
| 631 | } |
| 632 | GemPorts = append(GemPorts, &tp_pb.TrafficQueue{ |
| 633 | Direction: tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)), |
| 634 | GemportId: tp.DownstreamGemPortAttributeList[Count].GemportID, |
| 635 | PbitMap: tp.DownstreamGemPortAttributeList[Count].PbitMap, |
| 636 | AesEncryption: encryp, |
| 637 | SchedPolicy: tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)), |
| 638 | Priority: tp.DownstreamGemPortAttributeList[Count].PriorityQueue, |
| 639 | Weight: tp.DownstreamGemPortAttributeList[Count].Weight, |
| 640 | DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)), |
| 641 | }) |
| 642 | } |
| 643 | log.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts}) |
| 644 | return GemPorts |
| 645 | } |
| 646 | return nil |
| 647 | } |
| 648 | |
| 649 | func (tpm *TechProfileMgr) GetUsTrafficScheduler(tp *TechProfile) *tp_pb.TrafficScheduler { |
| 650 | UsScheduler := tpm.GetUsScheduler(tp) |
| 651 | |
| 652 | return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction, |
| 653 | AllocId: tp.UsScheduler.AllocID, |
| 654 | Scheduler: UsScheduler} |
| 655 | } |
| 656 | |
| 657 | func (t *TechProfileMgr) GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32 { |
| 658 | /* |
| 659 | Function to get the Gemport ID mapped to a pbit. |
| 660 | */ |
| 661 | if Dir == tp_pb.Direction_UPSTREAM { |
| 662 | // upstream GEM ports |
| 663 | NumGemPorts := len(tp.UpstreamGemPortAttributeList) |
| 664 | for Count := 0; Count < NumGemPorts; Count++ { |
| 665 | NumPbitMaps := len(tp.UpstreamGemPortAttributeList[Count].PbitMap) |
| 666 | for ICount := 2; ICount < NumPbitMaps; ICount++ { |
| 667 | if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil { |
| 668 | if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set |
| 669 | log.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[Count].GemportID}) |
| 670 | return tp.UpstreamGemPortAttributeList[Count].GemportID |
| 671 | } |
| 672 | } |
| 673 | } |
| 674 | } |
| 675 | } else if Dir == tp_pb.Direction_DOWNSTREAM { |
| 676 | //downstream GEM ports |
| 677 | NumGemPorts := len(tp.DownstreamGemPortAttributeList) |
| 678 | for Count := 0; Count < NumGemPorts; Count++ { |
| 679 | NumPbitMaps := len(tp.DownstreamGemPortAttributeList[Count].PbitMap) |
| 680 | for ICount := 2; ICount < NumPbitMaps; ICount++ { |
| 681 | if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil { |
| 682 | if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set |
| 683 | log.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[Count].GemportID}) |
| 684 | return tp.DownstreamGemPortAttributeList[Count].GemportID |
| 685 | } |
| 686 | } |
| 687 | } |
| 688 | } |
| 689 | } |
| 690 | log.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit}) |
| 691 | return 0 |
| 692 | } |