Mannikraj Kodilingam | 2379882 | 2019-04-10 14:34:01 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2019-present Open Networking Foundation |
| 3 | |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | package techprofile |
| 18 | |
| 19 | import ( |
| 20 | "encoding/json" |
| 21 | "errors" |
| 22 | "fmt" |
| 23 | "strconv" |
| 24 | |
| 25 | "github.com/opencord/voltha-go/common/log" |
| 26 | "github.com/opencord/voltha-go/db/kvstore" |
| 27 | "github.com/opencord/voltha-go/db/model" |
| 28 | openolt_pb "github.com/opencord/voltha-protos/go/openolt" |
| 29 | ) |
| 30 | |
| 31 | // Interface to pon resource manager APIs |
| 32 | type iPonResourceMgr interface { |
| 33 | GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) |
| 34 | GetResourceTypeAllocID() string |
| 35 | GetResourceTypeGemPortID() string |
| 36 | GetTechnology() string |
| 37 | } |
| 38 | |
| 39 | type Direction int32 |
| 40 | |
| 41 | const ( |
| 42 | Direction_UPSTREAM Direction = 0 |
| 43 | Direction_DOWNSTREAM Direction = 1 |
| 44 | Direction_BIDIRECTIONAL Direction = 2 |
| 45 | ) |
| 46 | |
| 47 | var Direction_name = map[Direction]string{ |
| 48 | 0: "UPSTREAM", |
| 49 | 1: "DOWNSTREAM", |
| 50 | 2: "BIDIRECTIONAL", |
| 51 | } |
| 52 | |
| 53 | type SchedulingPolicy int32 |
| 54 | |
| 55 | const ( |
| 56 | SchedulingPolicy_WRR SchedulingPolicy = 0 |
| 57 | SchedulingPolicy_StrictPriority SchedulingPolicy = 1 |
| 58 | SchedulingPolicy_Hybrid SchedulingPolicy = 2 |
| 59 | ) |
| 60 | |
| 61 | var SchedulingPolicy_name = map[SchedulingPolicy]string{ |
| 62 | 0: "WRR", |
| 63 | 1: "StrictPriority", |
| 64 | 2: "Hybrid", |
| 65 | } |
| 66 | |
| 67 | type AdditionalBW int32 |
| 68 | |
| 69 | const ( |
| 70 | AdditionalBW_AdditionalBW_None AdditionalBW = 0 |
| 71 | AdditionalBW_AdditionalBW_NA AdditionalBW = 1 |
| 72 | AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2 |
| 73 | AdditionalBW_AdditionalBW_Auto AdditionalBW = 3 |
| 74 | ) |
| 75 | |
| 76 | var AdditionalBW_name = map[AdditionalBW]string{ |
| 77 | 0: "AdditionalBW_None", |
| 78 | 1: "AdditionalBW_NA", |
| 79 | 2: "AdditionalBW_BestEffort", |
| 80 | 3: "AdditionalBW_Auto", |
| 81 | } |
| 82 | |
| 83 | type DiscardPolicy int32 |
| 84 | |
| 85 | const ( |
| 86 | DiscardPolicy_TailDrop DiscardPolicy = 0 |
| 87 | DiscardPolicy_WTailDrop DiscardPolicy = 1 |
| 88 | DiscardPolicy_Red DiscardPolicy = 2 |
| 89 | DiscardPolicy_WRed DiscardPolicy = 3 |
| 90 | ) |
| 91 | |
| 92 | var DiscardPolicy_name = map[DiscardPolicy]string{ |
| 93 | 0: "TailDrop", |
| 94 | 1: "WTailDrop", |
| 95 | 2: "Red", |
| 96 | 3: "WRed", |
| 97 | } |
| 98 | |
| 99 | /* |
| 100 | type InferredAdditionBWIndication int32 |
| 101 | |
| 102 | const ( |
| 103 | InferredAdditionBWIndication_InferredAdditionBWIndication_None InferredAdditionBWIndication = 0 |
| 104 | InferredAdditionBWIndication_InferredAdditionBWIndication_Assured InferredAdditionBWIndication = 1 |
| 105 | InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2 |
| 106 | ) |
| 107 | |
| 108 | var InferredAdditionBWIndication_name = map[int32]string{ |
| 109 | 0: "InferredAdditionBWIndication_None", |
| 110 | 1: "InferredAdditionBWIndication_Assured", |
| 111 | 2: "InferredAdditionBWIndication_BestEffort", |
| 112 | } |
| 113 | */ |
| 114 | // instance control defaults |
| 115 | const ( |
| 116 | defaultOnuInstance = "multi-instance" |
| 117 | defaultUniInstance = "single-instance" |
| 118 | defaultNumGemPorts = 1 |
| 119 | defaultGemPayloadSize = "auto" |
| 120 | ) |
| 121 | |
| 122 | const MAX_GEM_PAYLOAD = "max_gem_payload_size" |
| 123 | |
| 124 | type InstanceControl struct { |
Kent Hagerman | 0ab4cb2 | 2019-04-24 13:13:35 -0400 | [diff] [blame] | 125 | Onu string `json:"ONU"` |
Mannikraj Kodilingam | 2379882 | 2019-04-10 14:34:01 +0530 | [diff] [blame] | 126 | Uni string `json:"uni"` |
| 127 | MaxGemPayloadSize string `json:"max_gem_payload_size"` |
| 128 | } |
| 129 | |
| 130 | // default discard config constants |
| 131 | const ( |
| 132 | defaultMinThreshold = 0 |
| 133 | defaultMaxThreshold = 0 |
| 134 | defaultMaxProbability = 0 |
| 135 | ) |
| 136 | |
| 137 | type DiscardConfig struct { |
| 138 | MinThreshold int `json:"min_threshold"` |
| 139 | MaxThreshold int `json:"max_threshold"` |
| 140 | MaxProbability int `json:"max_probability"` |
| 141 | } |
| 142 | |
| 143 | // default scheduler contants |
| 144 | const ( |
| 145 | defaultAddtionalBw = AdditionalBW_AdditionalBW_Auto |
| 146 | defaultPriority = 0 |
| 147 | defaultWeight = 0 |
| 148 | defaultQueueSchedPolicy = SchedulingPolicy_Hybrid |
| 149 | ) |
| 150 | |
| 151 | type Scheduler struct { |
| 152 | Direction string `json:"direction"` |
| 153 | AdditionalBw string `json:"additional_bw"` |
| 154 | Priority uint32 `json:"priority"` |
| 155 | Weight uint32 `json:"weight"` |
| 156 | QSchedPolicy string `json:"q_sched_policy"` |
| 157 | } |
| 158 | |
| 159 | // default GEM attribute constants |
| 160 | const ( |
| 161 | defaultAESEncryption = "True" |
| 162 | defaultPriorityQueue = 0 |
| 163 | defaultQueueWeight = 0 |
| 164 | defaultMaxQueueSize = "auto" |
| 165 | defaultdropPolicy = DiscardPolicy_TailDrop |
| 166 | defaultSchedulePolicy = SchedulingPolicy_WRR |
| 167 | ) |
| 168 | |
| 169 | type GemPortAttribute struct { |
| 170 | MaxQueueSize string `json:"max_q_size"` |
| 171 | PbitMap string `json:"pbit_map"` |
| 172 | AesEncryption string `json:"aes_encryption"` |
| 173 | SchedulingPolicy string `json:"scheduling_policy"` |
| 174 | PriorityQueue int `json:"priority_q"` |
| 175 | Weight int `json:"weight"` |
| 176 | DiscardPolicy string `json:"discard_policy"` |
| 177 | DiscardConfig DiscardConfig `json:"discard_config"` |
| 178 | } |
| 179 | |
| 180 | type iScheduler struct { |
| 181 | AllocID uint32 `json:"alloc_id"` |
| 182 | Scheduler Scheduler `json:"scheduler"` |
| 183 | } |
| 184 | type iGemPortAttribute struct { |
| 185 | GemportID uint32 `json:"gem_port_id"` |
| 186 | GemAttribute GemPortAttribute `json:"gem_attribute"` |
| 187 | } |
| 188 | |
| 189 | type TechProfileMgr struct { |
| 190 | config *TechProfileFlags |
| 191 | resourceMgr iPonResourceMgr |
| 192 | } |
| 193 | type DefaultTechProfile struct { |
| 194 | Name string `json:"name"` |
| 195 | ProfileType string `json:"profile_type"` |
| 196 | Version int `json:"version"` |
| 197 | NumGemPorts uint32 `json:"num_gem_ports"` |
| 198 | InstanceCtrl InstanceControl `json:"instance_control"` |
| 199 | UsScheduler Scheduler `json:"us_scheduler"` |
| 200 | DsScheduler Scheduler `json:"ds_scheduler"` |
| 201 | UpstreamGemPortAttributeList []GemPortAttribute `json:"upstream_gem_port_attribute_list"` |
| 202 | DownstreamGemPortAttributeList []GemPortAttribute `json:"downstream_gem_port_attribute_list"` |
| 203 | } |
| 204 | type TechProfile struct { |
| 205 | Name string `json:"name"` |
| 206 | SubscriberIdentifier string `json:"subscriber_identifier"` |
| 207 | ProfileType string `json:"profile_type"` |
| 208 | Version int `json:"version"` |
| 209 | NumGemPorts uint32 `json:"num_gem_ports"` |
| 210 | NumTconts uint32 `json:"num_tconts"` |
| 211 | InstanceCtrl InstanceControl `json:"instance_control"` |
| 212 | UsScheduler iScheduler `json:"us_scheduler"` |
| 213 | DsScheduler iScheduler `json:"ds_scheduler"` |
| 214 | UpstreamGemPortAttributeList []iGemPortAttribute `json:"upstream_gem_port_attribute_list"` |
| 215 | DownstreamGemPortAttributeList []iGemPortAttribute `json:"downstream_gem_port_attribute_list"` |
| 216 | } |
| 217 | |
| 218 | func (t *TechProfileMgr) SetKVClient() *model.Backend { |
| 219 | addr := t.config.KVStoreHost + ":" + strconv.Itoa(t.config.KVStorePort) |
| 220 | kvClient, err := newKVClient(t.config.KVStoreType, addr, t.config.KVStoreTimeout) |
| 221 | if err != nil { |
| 222 | log.Errorw("failed-to-create-kv-client", |
| 223 | log.Fields{ |
| 224 | "type": t.config.KVStoreType, "host": t.config.KVStoreHost, "port": t.config.KVStorePort, |
| 225 | "timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix, |
| 226 | "error": err.Error(), |
| 227 | }) |
| 228 | return nil |
| 229 | } |
| 230 | return &model.Backend{ |
| 231 | Client: kvClient, |
| 232 | StoreType: t.config.KVStoreType, |
| 233 | Host: t.config.KVStoreHost, |
| 234 | Port: t.config.KVStorePort, |
| 235 | Timeout: t.config.KVStoreTimeout, |
| 236 | PathPrefix: t.config.TPKVPathPrefix} |
| 237 | |
| 238 | /* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some |
| 239 | issue between kv store and backend , core is not calling NewBackend directly |
| 240 | kv := model.NewBackend(t.config.KVStoreType, t.config.KVStoreHost, t.config.KVStorePort, |
| 241 | t.config.KVStoreTimeout, kvStoreTechProfilePathPrefix) |
| 242 | */ |
| 243 | } |
| 244 | |
| 245 | func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) { |
| 246 | |
| 247 | log.Infow("kv-store-type", log.Fields{"store": storeType}) |
| 248 | switch storeType { |
| 249 | case "consul": |
| 250 | return kvstore.NewConsulClient(address, timeout) |
| 251 | case "etcd": |
| 252 | return kvstore.NewEtcdClient(address, timeout) |
| 253 | } |
| 254 | return nil, errors.New("unsupported-kv-store") |
| 255 | } |
| 256 | |
| 257 | func NewTechProfile(resourceMgr iPonResourceMgr) (*TechProfileMgr, error) { |
| 258 | var techprofileObj TechProfileMgr |
| 259 | log.Debug("Initializing techprofile Manager") |
| 260 | techprofileObj.config = NewTechProfileFlags() |
| 261 | techprofileObj.config.KVBackend = techprofileObj.SetKVClient() |
| 262 | if techprofileObj.config.KVBackend == nil { |
| 263 | log.Error("Failed to initialize KV backend\n") |
| 264 | return nil, errors.New("KV backend init failed") |
| 265 | } |
| 266 | techprofileObj.resourceMgr = resourceMgr |
| 267 | log.Debug("Initializing techprofile object instance success") |
| 268 | return &techprofileObj, nil |
| 269 | } |
| 270 | |
| 271 | func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string { |
| 272 | return fmt.Sprintf(t.config.TPInstanceKVPath, t.resourceMgr.GetTechnology(), techProfiletblID, uniPortName) |
| 273 | } |
| 274 | |
| 275 | func (t *TechProfileMgr) GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error) { |
| 276 | var KvTpIns TechProfile |
| 277 | var resPtr *TechProfile = &KvTpIns |
| 278 | var err error |
| 279 | /*path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)*/ |
| 280 | log.Infow("Getting tech profile instance from KV store", log.Fields{"path": path}) |
| 281 | kvresult, err := t.config.KVBackend.Get(path) |
| 282 | if err != nil { |
| 283 | log.Errorw("Error while fetching tech-profile instance from KV backend", log.Fields{"key": path}) |
| 284 | return nil, err |
| 285 | } |
| 286 | if kvresult == nil { |
| 287 | log.Infow("Tech profile does not exist in KV store", log.Fields{"key": path}) |
| 288 | resPtr = nil |
| 289 | } else { |
| 290 | if value, err := kvstore.ToByte(kvresult.Value); err == nil { |
| 291 | if err = json.Unmarshal(value, resPtr); err != nil { |
| 292 | log.Errorw("Error while unmarshal KV result", log.Fields{"key": path, "value": value}) |
| 293 | } |
| 294 | } |
| 295 | } |
| 296 | return resPtr, err |
| 297 | } |
| 298 | |
| 299 | func (t *TechProfileMgr) addTechProfInstanceToKVStore(techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error { |
| 300 | path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName) |
| 301 | log.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance}) |
| 302 | tpInstanceJson, err := json.Marshal(*tpInstance) |
| 303 | if err == nil { |
| 304 | // Backend will convert JSON byte array into string format |
| 305 | log.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson}) |
| 306 | err = t.config.KVBackend.Put(path, tpInstanceJson) |
| 307 | } else { |
| 308 | log.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance}) |
| 309 | } |
| 310 | return err |
| 311 | } |
| 312 | func (t *TechProfileMgr) getTPFromKVStore(techProfiletblID uint32) *DefaultTechProfile { |
| 313 | var kvtechprofile DefaultTechProfile |
| 314 | key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID) |
| 315 | log.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key}) |
| 316 | kvresult, err := t.config.KVBackend.Get(key) |
| 317 | if err != nil { |
| 318 | log.Errorw("Error while fetching value from KV store", log.Fields{"key": key}) |
| 319 | return nil |
| 320 | } |
| 321 | if kvresult != nil { |
| 322 | /* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/ |
| 323 | if value, err := kvstore.ToByte(kvresult.Value); err == nil { |
| 324 | if err = json.Unmarshal(value, &kvtechprofile); err == nil { |
| 325 | log.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile}) |
| 326 | return &kvtechprofile |
| 327 | } |
| 328 | } |
| 329 | } |
| 330 | return nil |
| 331 | } |
| 332 | func (t *TechProfileMgr) CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) *TechProfile { |
| 333 | var tpInstance *TechProfile |
| 334 | log.Infow("Creating tech profile instance ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId}) |
| 335 | tp := t.getTPFromKVStore(techProfiletblID) |
| 336 | if tp != nil { |
| 337 | log.Infow("Creating tech profile instance with profile from KV store", log.Fields{"tpid": techProfiletblID}) |
| 338 | } else { |
| 339 | tp = t.getDefaultTechProfile() |
| 340 | log.Infow("Creating tech profile instance with default values", log.Fields{"tpid": techProfiletblID}) |
| 341 | } |
| 342 | tpInstance = t.allocateTPInstance(uniPortName, tp, intfId, t.config.DefaultNumTconts) |
| 343 | if err := t.addTechProfInstanceToKVStore(techProfiletblID, uniPortName, tpInstance); err != nil { |
| 344 | log.Errorw("Error in adding tech profile instance to KV ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName}) |
| 345 | return nil |
| 346 | } |
| 347 | log.Infow("Added tech profile instance to KV store successfully ", |
| 348 | log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId}) |
| 349 | return tpInstance |
| 350 | } |
| 351 | |
| 352 | func (t *TechProfileMgr) DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error { |
| 353 | path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName) |
| 354 | return t.config.KVBackend.Delete(path) |
| 355 | } |
| 356 | |
| 357 | func (t *TechProfileMgr) allocateTPInstance(uniPortName string, tp *DefaultTechProfile, intfId uint32, numOfTconts uint32) *TechProfile { |
| 358 | |
| 359 | var usGemPortAttributeList []iGemPortAttribute |
| 360 | var dsGemPortAttributeList []iGemPortAttribute |
| 361 | var tcontIDs []uint32 |
| 362 | var gemPorts []uint32 |
| 363 | var err error |
| 364 | |
| 365 | log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numOfTconts": numOfTconts, "numGem": tp.NumGemPorts}) |
| 366 | if numOfTconts > 1 { |
| 367 | log.Errorw("Multiple Tconts not supported currently", log.Fields{"uniPortName": uniPortName, "intfId": intfId}) |
| 368 | return nil |
| 369 | } |
| 370 | if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), numOfTconts); err != nil { |
| 371 | log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts}) |
| 372 | return nil |
| 373 | } |
| 374 | fmt.Println("Num GEM ports in TP:", tp.NumGemPorts) |
| 375 | if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil { |
| 376 | log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts}) |
| 377 | return nil |
| 378 | } |
| 379 | log.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts}) |
| 380 | for index := 0; index < int(tp.NumGemPorts); index++ { |
| 381 | usGemPortAttributeList = append(usGemPortAttributeList, |
| 382 | iGemPortAttribute{GemportID: gemPorts[index], |
| 383 | GemAttribute: tp.UpstreamGemPortAttributeList[index]}) |
| 384 | dsGemPortAttributeList = append(dsGemPortAttributeList, |
| 385 | iGemPortAttribute{GemportID: gemPorts[index], |
| 386 | GemAttribute: tp.DownstreamGemPortAttributeList[index]}) |
| 387 | } |
| 388 | return &TechProfile{ |
| 389 | SubscriberIdentifier: uniPortName, |
| 390 | Name: tp.Name, |
| 391 | ProfileType: tp.ProfileType, |
| 392 | Version: tp.Version, |
| 393 | NumGemPorts: tp.NumGemPorts, |
| 394 | NumTconts: numOfTconts, |
| 395 | InstanceCtrl: tp.InstanceCtrl, |
| 396 | UsScheduler: iScheduler{ |
| 397 | AllocID: tcontIDs[0], |
| 398 | Scheduler: tp.UsScheduler}, |
| 399 | DsScheduler: iScheduler{ |
| 400 | AllocID: tcontIDs[0], |
| 401 | Scheduler: tp.DsScheduler}, |
| 402 | UpstreamGemPortAttributeList: usGemPortAttributeList, |
| 403 | DownstreamGemPortAttributeList: dsGemPortAttributeList} |
| 404 | } |
| 405 | |
| 406 | func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile { |
| 407 | |
| 408 | var usGemPortAttributeList []GemPortAttribute |
| 409 | var dsGemPortAttributeList []GemPortAttribute |
| 410 | |
| 411 | for _, pbit := range t.config.DefaultPbits { |
| 412 | usGemPortAttributeList = append(usGemPortAttributeList, |
| 413 | GemPortAttribute{ |
| 414 | MaxQueueSize: defaultMaxQueueSize, |
| 415 | PbitMap: pbit, |
| 416 | AesEncryption: defaultAESEncryption, |
| 417 | SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy], |
| 418 | PriorityQueue: defaultPriorityQueue, |
| 419 | Weight: defaultQueueWeight, |
| 420 | DiscardPolicy: DiscardPolicy_name[defaultdropPolicy], |
| 421 | DiscardConfig: DiscardConfig{ |
| 422 | MinThreshold: defaultMinThreshold, |
| 423 | MaxThreshold: defaultMaxThreshold, |
| 424 | MaxProbability: defaultMaxProbability}}) |
| 425 | dsGemPortAttributeList = append(dsGemPortAttributeList, |
| 426 | GemPortAttribute{ |
| 427 | MaxQueueSize: defaultMaxQueueSize, |
| 428 | PbitMap: pbit, |
| 429 | AesEncryption: defaultAESEncryption, |
| 430 | SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy], |
| 431 | PriorityQueue: defaultPriorityQueue, |
| 432 | Weight: defaultQueueWeight, |
| 433 | DiscardPolicy: DiscardPolicy_name[defaultdropPolicy], |
| 434 | DiscardConfig: DiscardConfig{ |
| 435 | MinThreshold: defaultMinThreshold, |
| 436 | MaxThreshold: defaultMaxThreshold, |
| 437 | MaxProbability: defaultMaxProbability}}) |
| 438 | } |
| 439 | return &DefaultTechProfile{ |
| 440 | Name: t.config.DefaultTPName, |
| 441 | ProfileType: t.resourceMgr.GetTechnology(), |
| 442 | Version: t.config.TPVersion, |
| 443 | NumGemPorts: uint32(len(usGemPortAttributeList)), |
| 444 | InstanceCtrl: InstanceControl{ |
| 445 | Onu: defaultOnuInstance, |
| 446 | Uni: defaultUniInstance, |
| 447 | MaxGemPayloadSize: defaultGemPayloadSize}, |
| 448 | UsScheduler: Scheduler{ |
| 449 | Direction: Direction_name[Direction_UPSTREAM], |
| 450 | AdditionalBw: AdditionalBW_name[defaultAddtionalBw], |
| 451 | Priority: defaultPriority, |
| 452 | Weight: defaultWeight, |
| 453 | QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]}, |
| 454 | DsScheduler: Scheduler{ |
| 455 | Direction: Direction_name[Direction_DOWNSTREAM], |
| 456 | AdditionalBw: AdditionalBW_name[defaultAddtionalBw], |
| 457 | Priority: defaultPriority, |
| 458 | Weight: defaultWeight, |
| 459 | QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]}, |
| 460 | UpstreamGemPortAttributeList: usGemPortAttributeList, |
| 461 | DownstreamGemPortAttributeList: dsGemPortAttributeList} |
| 462 | } |
| 463 | |
| 464 | func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 { |
| 465 | var result int32 = -1 |
| 466 | |
| 467 | if paramType == "direction" { |
| 468 | for key, val := range openolt_pb.Direction_value { |
| 469 | if key == paramKey { |
| 470 | result = val |
| 471 | } |
| 472 | } |
| 473 | } else if paramType == "discard_policy" { |
| 474 | for key, val := range openolt_pb.DiscardPolicy_value { |
| 475 | if key == paramKey { |
| 476 | result = val |
| 477 | } |
| 478 | } |
| 479 | } else if paramType == "sched_policy" { |
| 480 | for key, val := range openolt_pb.SchedulingPolicy_value { |
| 481 | if key == paramKey { |
| 482 | log.Debugw("Got value in proto", log.Fields{"key": key, "value": val}) |
| 483 | result = val |
| 484 | } |
| 485 | } |
| 486 | } else if paramType == "additional_bw" { |
| 487 | for key, val := range openolt_pb.AdditionalBW_value { |
| 488 | if key == paramKey { |
| 489 | result = val |
| 490 | } |
| 491 | } |
| 492 | } else { |
| 493 | log.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey}) |
| 494 | return -1 |
| 495 | } |
| 496 | log.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result}) |
| 497 | return result |
| 498 | } |
| 499 | |
| 500 | func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler { |
| 501 | dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Scheduler.Direction)) |
| 502 | if dir == -1 { |
| 503 | log.Fatal("Error in getting Proto for direction for upstream scheduler") |
| 504 | return nil |
| 505 | } |
| 506 | bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.Scheduler.AdditionalBw)) |
| 507 | if bw == -1 { |
| 508 | log.Fatal("Error in getting Proto for bandwidth for upstream scheduler") |
| 509 | return nil |
| 510 | } |
| 511 | policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.Scheduler.QSchedPolicy)) |
| 512 | if policy == -1 { |
| 513 | log.Fatal("Error in getting Proto for scheduling policy for upstream scheduler") |
| 514 | return nil |
| 515 | } |
| 516 | return &openolt_pb.Scheduler{ |
| 517 | Direction: dir, |
| 518 | AdditionalBw: bw, |
| 519 | Priority: tpInstance.UsScheduler.Scheduler.Priority, |
| 520 | Weight: tpInstance.UsScheduler.Scheduler.Weight, |
| 521 | SchedPolicy: policy} |
| 522 | } |
| 523 | |
| 524 | func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler { |
| 525 | |
| 526 | dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Scheduler.Direction)) |
| 527 | if dir == -1 { |
| 528 | log.Fatal("Error in getting Proto for direction for downstream scheduler") |
| 529 | return nil |
| 530 | } |
| 531 | bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.Scheduler.AdditionalBw)) |
| 532 | if bw == -1 { |
| 533 | log.Fatal("Error in getting Proto for bandwidth for downstream scheduler") |
| 534 | return nil |
| 535 | } |
| 536 | policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.Scheduler.QSchedPolicy)) |
| 537 | if policy == -1 { |
| 538 | log.Fatal("Error in getting Proto for scheduling policy for downstream scheduler") |
| 539 | return nil |
| 540 | } |
| 541 | |
| 542 | return &openolt_pb.Scheduler{ |
| 543 | Direction: dir, |
| 544 | AdditionalBw: bw, |
| 545 | Priority: tpInstance.DsScheduler.Scheduler.Priority, |
| 546 | Weight: tpInstance.DsScheduler.Scheduler.Weight, |
| 547 | SchedPolicy: policy} |
| 548 | } |
| 549 | |
| 550 | func (t *TechProfileMgr) GetTconts(tpInstance *TechProfile, usSched *openolt_pb.Scheduler, dsSched *openolt_pb.Scheduler) []*openolt_pb.Tcont { |
| 551 | if usSched == nil { |
| 552 | if usSched = t.GetUsScheduler(tpInstance); usSched == nil { |
| 553 | log.Fatal("Error in getting upstream scheduler from techprofile") |
| 554 | return nil |
| 555 | } |
| 556 | } |
| 557 | if dsSched == nil { |
| 558 | if dsSched = t.GetDsScheduler(tpInstance); dsSched == nil { |
| 559 | log.Fatal("Error in getting downstream scheduler from techprofile") |
| 560 | return nil |
| 561 | } |
| 562 | } |
| 563 | tconts := []*openolt_pb.Tcont{} |
| 564 | // TODO: Fix me , UPSTREAM direction is not proper |
| 565 | // upstream scheduler |
| 566 | tcont_us := &openolt_pb.Tcont{ |
| 567 | Direction: usSched.Direction, |
| 568 | AllocId: tpInstance.UsScheduler.AllocID, |
| 569 | Scheduler: usSched} /*TrafficShapingInfo: ? */ |
| 570 | tconts = append(tconts, tcont_us) |
| 571 | |
| 572 | // downstream scheduler |
| 573 | tcont_ds := &openolt_pb.Tcont{ |
| 574 | Direction: dsSched.Direction, |
| 575 | AllocId: tpInstance.DsScheduler.AllocID, |
| 576 | Scheduler: dsSched} |
| 577 | |
| 578 | tconts = append(tconts, tcont_ds) |
| 579 | return tconts |
| 580 | } |