VOL-351: Assign unique ID to Channelgroup xPON object for HA
Change-Id: I14bb71dde475aaaafc7c8b53ec9fbc3d1a3dc3cc
diff --git a/common/utils/indexpool.py b/common/utils/indexpool.py
new file mode 100644
index 0000000..24f57ff
--- /dev/null
+++ b/common/utils/indexpool.py
@@ -0,0 +1,36 @@
+from bitstring import BitArray
+import structlog
+
+log = structlog.get_logger()
+
+class IndexPool(object):
+ def __init__(self, max_entries, offset):
+ self.max_entries = max_entries
+ self.offset = offset
+ self.indices = BitArray(self.max_entries)
+
+ def get_next(self):
+ try:
+ _pos = self.indices.find('0b0')
+ self.indices.set(1, _pos)
+ return self.offset + _pos[0]
+ except IndexError:
+ log.info("exception-fail-to-allocate-id-all-bits-in-use")
+ return None
+
+ def release(self, index):
+ index -= self.offset
+ _pos = (index,)
+ try:
+ self.indices.set(0, _pos)
+ except IndexError:
+ log.info("bit-position-{}-out-of-range".format(index))
+
+ #index or multiple indices to set all of them to 1 - need to be a tuple
+ def pre_allocate(self, index):
+ if(isinstance(index, tuple)):
+ _lst = list(index)
+ for i in range(len(_lst)):
+ _lst[i] -= self.offset
+ index = tuple(_lst)
+ self.indices.set(1, index)
diff --git a/requirements.txt b/requirements.txt
index d62ddd5..99c94b2 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,6 @@
argparse==1.2.1
arrow>=0.10.0
+bitstring
cmd2==0.7.0
colorama>=0.2.5
cython==0.24.1
diff --git a/tests/utests/common/utils/test_indexpool.py b/tests/utests/common/utils/test_indexpool.py
new file mode 100644
index 0000000..609ed2f
--- /dev/null
+++ b/tests/utests/common/utils/test_indexpool.py
@@ -0,0 +1,35 @@
+from unittest import TestCase, main
+from common.utils.indexpool import IndexPool
+
+class TestIndexPool(TestCase):
+ pool = IndexPool(8, 0)
+ def test_01_get_next(self):
+ self.assertEqual(self.pool.indices.bin, '00000000')
+ for i in range(8):
+ self.assertEqual(self.pool.get_next(), i)
+ #to check if there's any bit left after using all 8 bits
+ self.assertIsNone(self.pool.get_next())
+
+ def test_02_pre_allocate(self):
+ _pool2 = IndexPool(8, 0)
+ self.assertEqual(_pool2.indices.bin, '00000000')
+ _pool2.pre_allocate((0,1,2,))
+ self.assertEqual(_pool2.indices.bin, '11100000')
+
+ def test_03_release(self):
+ self.pool.release(5)
+ self.assertEqual(self.pool.indices.bin, '11111011')
+ self.pool.release(10)
+ self.assertEqual(self.pool.indices.bin, '11111011')
+ self.pool.release(0)
+ self.assertEqual(self.pool.indices.bin, '01111011')
+
+ def test_04_check_offset(self):
+ _offset = 5
+ self.pool = IndexPool(8, _offset)
+ for i in range(8):
+ self.assertEqual(self.pool.get_next(), _offset + i)
+
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/voltha/core/xpon_handler.py b/voltha/core/xpon_handler.py
index 0bcd323..17408e9 100644
--- a/voltha/core/xpon_handler.py
+++ b/voltha/core/xpon_handler.py
@@ -32,6 +32,8 @@
from voltha.protos.device_pb2 import Device
from voltha.protos.common_pb2 import AdminState
+from common.utils.indexpool import IndexPool
+
from requests.api import request
log = structlog.get_logger()
@@ -41,6 +43,19 @@
def __init__(self, core):
self.core = core
self.root = None
+ '''
+ Pool for handling channel group indices
+ @TODO: As per current persistency & HA design, each VOLTHA instance
+ maintains a separate independent database. Since channel-groups are
+ broadcast to all the VOLTHA instances in the cluster, the
+ xpon_handler in each instance will independently try to allocate a
+ unique index. This approach works OK for XGS-PON since CG<->CTerm
+ relationship is 1:1 for XGS-PON(Since a device can only be served
+ by one VOLTHA instance and thereby CTerm). This needs to be further
+ investigated wrt persistency & HA design evolution, for a better
+ approach in future.
+ '''
+ self.cg_pool = IndexPool(2**12, 0)
def start(self, root):
log.debug('starting xpon_handler')
@@ -51,6 +66,13 @@
items = self.root.get('/channel_groups')
return AllChannelgroupConfig(channelgroup_config=items)
+ def get_channel_group_config(self, request, context):
+ log.info('grpc-request', request=request)
+ item = self.root.get('/channel_groups/{}'.format(request.name))
+ if(isinstance(item, ChannelgroupConfig)):
+ return item
+ return Empty()
+
def create_channel_group(self, request, context):
log.info('grpc-request', request=request)
@@ -58,10 +80,16 @@
assert isinstance(request, ChannelgroupConfig)
assert self.validate_interface(request, context)
log.debug('creating-channel-group', name=request.name)
+ _id = self.cg_pool.get_next()
+ assert _id != None
+ request.cg_index = _id
self.root.add('/channel_groups', request)
return Empty()
except AssertionError, e:
+ context.set_details(
+ 'Fail to allocate id to \'{}\''.format(request.name))
+ context.set_code(StatusCode.INVALID_ARGUMENT)
return Empty()
except ValueError:
context.set_details(
@@ -82,7 +110,10 @@
try:
assert isinstance(request, ChannelgroupConfig)
assert self.validate_interface(request, context)
+ channelgroup = self.get_channel_group_config(request, context)
+ assert channelgroup.name == request.name
+ request.cg_index = channelgroup.cg_index
path = '/channel_groups/{}'.format(request.name)
log.debug('updating-channel-group', name=request.name)
self.root.update(path, request, strict=True)
@@ -118,9 +149,13 @@
assert request.name not in known_channel_group_ref
reference = "channel pair"
assert request.name not in known_channel_group_ref_1
+ channelgroup = self.get_channel_group_config(request, context)
+ assert channelgroup.name == request.name
+
path = '/channel_groups/{}'.format(request.name)
log.debug('removing-channel-group', name=request.name)
self.root.remove(path)
+ self.cg_pool.release(channelgroup.cg_index)
return Empty()
diff --git a/voltha/protos/bbf_fiber_base.proto b/voltha/protos/bbf_fiber_base.proto
index 0534e12..b7f7689 100644
--- a/voltha/protos/bbf_fiber_base.proto
+++ b/voltha/protos/bbf_fiber_base.proto
@@ -22,6 +22,7 @@
ietf_interfaces.Interface interface = 2;
ChannelgroupConfigData data = 3;
string name = 4;
+ uint32 cg_index = 5 [(voltha.access) = READ_ONLY];
}
message ChannelpartitionConfig