Update the Netconf custom rpc as follows:
1) Create a message/field YANG reference. This is used to keep the
XML tags in the same order as the fields appear in the YANG schema.
This applies only for custom RPCs (one of Netconf twist)
2) Annotate the proto RPCs with custom annotations which are used
when constructing an XML response
Change-Id: I07a8a3f2a44b7081c78e00dab05734a7c6b0a358
diff --git a/netconf/constants.py b/netconf/constants.py
index 976ddff..8e6bf92 100644
--- a/netconf/constants.py
+++ b/netconf/constants.py
@@ -28,6 +28,9 @@
CERTS_DIRECTORY = 'security/certificates'
CLIENT_CRED_DIRECTORY = 'security/client_credentials'
+ # YANG message definition file - generated file
+ YANG_MESSAGE_DEFINITIONS_FILE='yang_message_defs.py'
+
# Datastores
RUNNING = "running"
CANDIDATE = "candidate"
diff --git a/netconf/grpc_client/grpc_client.py b/netconf/grpc_client/grpc_client.py
index 9dd7f87..341f180 100644
--- a/netconf/grpc_client/grpc_client.py
+++ b/netconf/grpc_client/grpc_client.py
@@ -44,6 +44,7 @@
from google.protobuf import descriptor
import base64
import math
+import collections
_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
@@ -304,15 +305,72 @@
response = yield func(self, params, metadata)
- log.info('rpc-result', service=service, method=method,
- response=response)
+ # Get the XML tag to use in the response
+ xml_tag = mapper.get_xml_tag(service, method)
- returnValue(response)
+ # Get the XML list item name used in the response
+ list_item_name = mapper.get_list_items_name(service, method)
+
+ # Get the YANG defined fields (and their order) for that service
+ # and method
+ fields = mapper.get_fields_from_yang_defs(service, method)
+
+ # TODO: This needs to be investigated further since the Netconf
+ # Client shows a formatting error in the code below is uncommented.
+ # Check if this represents a List and whether the field name is
+ # items. In the response (a dictionary), if a list named 'items'
+ # is returned then 'items' can either:
+ # 1) represent a list of items being returned where 'items' is just
+ # a name to represent a list. In this case, this name will be
+ # discarded
+ # 2) represent the actual field name as defined in the proto
+ # definitions. If this is the case then we need to preserve the
+ # name
+ # list_item_name = ''
+ # if len(fields) == 1:
+ # if fields[0]['name'] == 'items':
+ # list_item_name = 'items'
+
+ # Rearrange the dictionary response as specified by the YANG
+ # definitions
+ rearranged_response = self.rearrange_dict(mapper, response, fields)
+
+ log.info('rpc-result', service=service, method=method,
+ response=response,
+ rearranged_response=rearranged_response, xml_tag=xml_tag,
+ list_item_name=list_item_name, fields=fields)
+
+ returnValue((rearranged_response, (xml_tag, list_item_name)))
except Exception, e:
log.exception('rpc-failure', service=service, method=method,
params=params, e=e)
+ def rearrange_dict(self, mapper, orig_dict, fields):
+ log.debug('rearranging-dict', fields=fields)
+ result = collections.OrderedDict()
+ if len(orig_dict) == 0 or not fields:
+ return result
+ for f in fields:
+ if orig_dict.has_key(f['name']):
+ if f['type_ref']:
+ # Get the fields for that type
+ sub_fields = mapper.get_fields_from_type_name(f['module'],
+ f['type'])
+ if f['repeated']:
+ result[f['name']] = []
+ for d in orig_dict[f['name']]:
+ result[f['name']].append(self.rearrange_dict(
+ mapper, d, sub_fields))
+ else:
+ result[f['name']] = self.rearrange_dict(mapper,
+ orig_dict[
+ f['name']],
+ sub_fields)
+ else:
+ result[f['name']] = orig_dict[f['name']]
+ return result
+
@inlineCallbacks
def invoke(self, stub, method_name, request, metadata, retry=1):
"""
diff --git a/netconf/grpc_client/nc_rpc_mapper.py b/netconf/grpc_client/nc_rpc_mapper.py
index dacfc2b..6267b8f 100644
--- a/netconf/grpc_client/nc_rpc_mapper.py
+++ b/netconf/grpc_client/nc_rpc_mapper.py
@@ -17,6 +17,7 @@
import sys
import inspect
from structlog import get_logger
+from netconf.constants import Constants as C
log = get_logger()
@@ -32,6 +33,7 @@
self.work_dir = work_dir
self.grpc_client = grpc_client
self.rpc_map = {}
+ self.yang_defs = {}
def _add_rpc_map(self, func_name, func_ref):
if not self.rpc_map.has_key(func_name):
@@ -42,6 +44,10 @@
for name, ref in self.list_functions(mod):
self._add_rpc_map(name, ref)
+ def _add_m(self, mod):
+ for name, ref in self.list_functions(mod):
+ self._add_rpc_map(name, ref)
+
def is_mod_function(self, mod, func):
return inspect.isfunction(func) and inspect.getmodule(func) == mod
@@ -63,20 +69,88 @@
except Exception, e:
log.exception('loading-module-exception', modname=modname, e=e)
+ # load the yang definition
+ for fname in [f for f in os.listdir(self.work_dir)
+ if f.endswith(C.YANG_MESSAGE_DEFINITIONS_FILE)]:
+ modname = fname[:-len('.py')]
+ try:
+ m = __import__(modname)
+ for name, ref in self.list_functions(m):
+ self.yang_defs[name] = ref
+ except Exception, e:
+ log.exception('loading-yang-module-exception', modname=modname,
+ e=e)
+
+ def get_fields_from_yang_defs(self, service, method):
+ # Get the return type of that method
+ func_name = self._get_function_name(service, method)
+ return_type_func_name = ''.join(['get_return_type_', func_name])
+ if self.rpc_map.has_key(return_type_func_name):
+ type_name = self.rpc_map[return_type_func_name]()
+ log.info('get-yang-defs', type_name=type_name, service=service,
+ method=method)
+ if type_name:
+ # Type name is in the form "<package-name>_pb2".<message_name>
+ name = type_name.split('.')
+ if len(name) == 2:
+ package = name[0][:-len('_pb2')]
+ message_name = name[1]
+ if self.yang_defs.has_key('get_fields'):
+ return self.yang_defs['get_fields'](package,
+ message_name)
+ else:
+ log.info('Incorrect-type-format', type_name=type_name,
+ service=service,
+ method=method)
+ return None
+
+ def get_fields_from_type_name(self, module_name, type_name):
+ if self.yang_defs.has_key('get_fields'):
+ return self.yang_defs['get_fields'](module_name,
+ type_name)
+
def get_function(self, service, method):
- if service:
- func_name = ''.join([service, '_', method])
- else:
- func_name = method
+
+ func_name = self._get_function_name(service, method)
if self.rpc_map.has_key(func_name):
return self.rpc_map[func_name]
else:
return None
+ def get_xml_tag(self, service, method):
+ func_name = self._get_function_name(service, method)
+ xml_tag_func_name = ''.join(['get_xml_tag_', func_name])
+ if self.rpc_map.has_key(xml_tag_func_name):
+ tag = self.rpc_map[xml_tag_func_name]()
+ if tag == '':
+ return None
+ else:
+ return tag
+ else:
+ return None
+
+ def get_list_items_name(self, service, method):
+ func_name = self._get_function_name(service, method)
+ list_items_name = ''.join(['get_list_items_name_', func_name])
+ if self.rpc_map.has_key(list_items_name):
+ name = self.rpc_map[list_items_name]()
+ if name == '':
+ return None
+ else:
+ return name
+ else:
+ return None
+
def is_rpc_exist(self, rpc_name):
return self.rpc_map.has_key(rpc_name)
+ def _get_function_name(self, service, method):
+ if service:
+ return ''.join([service, '_', method])
+ else:
+ return method
+
def get_nc_rpc_mapper_instance(work_dir=None, grpc_client=None):
if NetconfRPCMapper.instance == None:
diff --git a/netconf/nc_rpc/base/get.py b/netconf/nc_rpc/base/get.py
index 6337b5f..19535f5 100644
--- a/netconf/nc_rpc/base/get.py
+++ b/netconf/nc_rpc/base/get.py
@@ -63,7 +63,7 @@
self.metadata = self.request['metadata']
# Execute the request
- res_dict = yield self.grpc_client.invoke_voltha_rpc(
+ res_dict, yang_options = yield self.grpc_client.invoke_voltha_rpc(
service=self.service,
method=self.method,
params=self.params,
@@ -77,7 +77,7 @@
# Build the yang response
self.rpc_response.node = self.rpc_response.build_yang_response(
- root_elem, self.request)
+ root_elem, self.request, yang_options=yang_options)
self.rpc_response.is_error = False
returnValue(self.rpc_response)
@@ -158,9 +158,9 @@
{'subclass': 'device_groups',
'rpc': 'VolthaLocalService-ListDeviceGroups'
},
- ]
- # 'VolthaInstances': [
- # {'subclass': None,
- # 'rpc': 'VolthaGlobalService-ListVolthaInstances'
- # }],
+ ],
+ 'VolthaInstances': [
+ {'subclass': None,
+ 'rpc': 'VolthaGlobalService-ListVolthaInstances'
+ }],
}
diff --git a/netconf/nc_rpc/base/get_config.py b/netconf/nc_rpc/base/get_config.py
index 7b89ded..ba281e4 100644
--- a/netconf/nc_rpc/base/get_config.py
+++ b/netconf/nc_rpc/base/get_config.py
@@ -23,8 +23,10 @@
class GetConfig(Rpc):
- def __init__(self, request, request_xml, grpc_client, session, capabilities):
- super(GetConfig, self).__init__(request, request_xml, grpc_client, session)
+ def __init__(self, request, request_xml, grpc_client, session,
+ capabilities):
+ super(GetConfig, self).__init__(request, request_xml, grpc_client,
+ session, capabilities)
self._validate_parameters()
def execute(self):
@@ -44,7 +46,8 @@
self.rpc_response.node = ncerror.BadMsg(self.rpc_request)
return
- self.source_param = self.rpc_method.find(C.NC_SOURCE, namespaces=C.NS_MAP)
+ self.source_param = self.rpc_method.find(C.NC_SOURCE,
+ namespaces=C.NS_MAP)
# if self.source_param is None:
# self.rpc_response.is_error = True
# self.rpc_response.node = ncerror.MissingElement(
diff --git a/netconf/nc_rpc/ext/voltha_rpc.py b/netconf/nc_rpc/ext/voltha_rpc.py
index 37ba305..835f900 100644
--- a/netconf/nc_rpc/ext/voltha_rpc.py
+++ b/netconf/nc_rpc/ext/voltha_rpc.py
@@ -48,7 +48,7 @@
request=self.request)
# Execute the request
- res_dict = yield self.grpc_client.invoke_voltha_rpc(
+ res_dict, yang_options = yield self.grpc_client.invoke_voltha_rpc(
service=self.service,
method=self.method,
params=self.request['params'],
@@ -62,7 +62,7 @@
# Build the yang response
self.rpc_response.node = self.rpc_response.build_yang_response(
- root_elem, self.request, custom_rpc=True)
+ root_elem, self.request, yang_options=yang_options, custom_rpc=True)
self.rpc_response.is_error = False
returnValue(self.rpc_response)
diff --git a/netconf/nc_rpc/rpc_response.py b/netconf/nc_rpc/rpc_response.py
index 2d60d77..3b2deba 100644
--- a/netconf/nc_rpc/rpc_response.py
+++ b/netconf/nc_rpc/rpc_response.py
@@ -47,6 +47,10 @@
elif voltha_xml_string.startswith('<yang/>'):
voltha_xml_string = ''
+ # Replace any True/False data to true/false
+ voltha_xml_string = voltha_xml_string.replace('>False<', '>false<')
+ voltha_xml_string = voltha_xml_string.replace('>True<', '>true<')
+
if not custom_rpc:
# Create the xml body as
if request.has_key('subclass'):
@@ -127,10 +131,11 @@
if (attrib == 'list'):
if list(elem) is None:
return self.copy_basic_element(elem)
- if elem.tag == 'items':
- new_elem = etree.Element('items')
- else:
- new_elem = etree.Element('ignore')
+ # if elem.tag == 'items':
+ # new_elem = etree.Element('items')
+ new_elem = etree.Element('ignore')
+ # else:
+ # new_elem = etree.Element('ignore')
for elm in list(elem):
elm.tag = elem.tag
if elm.get('type') in ['list', 'dict']:
@@ -155,7 +160,8 @@
else:
return self.copy_basic_element(elem)
- def to_yang_xml(self, from_xml, request, custom_rpc=False):
+ def to_yang_xml(self, from_xml, request, yang_options=None,
+ custom_rpc=False):
# Parse from_xml as follows:
# 1. Any element having a list attribute shoud have each item move 1 level
# up and retag using the parent tag
@@ -163,20 +169,27 @@
# sub-element should have all it's items move to teh parent level
top = etree.Element('yang')
elms = list(from_xml)
-
+ xml_tag = yang_options[0]
+ list_items_name = yang_options[1]
# special case the xml contain a list type
- if len(elms) == 1 and not custom_rpc:
+ if len(elms) == 1:
item = elms[0]
- # TODO: Address name 'items' clash when a list name is actually
- # 'items'.
if item.get('type') == 'list':
- if request.has_key('subclass'):
- item.tag = request['subclass']
- # remove the subclass element in request to avoid duplicate tag
- del request['subclass']
+ if list_items_name == 'items':
+ # Create a new parent element
+ new_elem = etree.Element(xml_tag)
+ self.add_node(self.process_element(item), new_elem)
+ top.append(new_elem)
else:
- item.tag = 'ignore'
- self.add_node(self.process_element(item), top)
+ if xml_tag and custom_rpc:
+ item.tag = xml_tag
+ elif request.has_key('subclass'):
+ item.tag = request['subclass']
+ # remove the subclass element in request to avoid duplicate tag
+ del request['subclass']
+ else:
+ item.tag = 'ignore'
+ self.add_node(self.process_element(item), top)
return top
# Process normally for all other cases
@@ -185,12 +198,20 @@
return top
+ # Helper method to sort the xml message based on the xml tags
+ def sort_xml_response(self, xml):
+ for parent in xml.xpath('//*[./*]'): # Search for parent elements
+ parent[:] = sorted(parent, key=lambda x: x.tag)
+ return xml
+
# custom_rpc refers to custom RPCs different from Netconf default RPCs
# like get, get-config, edit-config, etc
- def build_yang_response(self, root, request, custom_rpc=False):
+ def build_yang_response(self, root, request, yang_options=None,
+ custom_rpc=False):
try:
self.custom_rpc = custom_rpc
- yang_xml = self.to_yang_xml(root, request, custom_rpc)
+ yang_xml = self.to_yang_xml(root, request, yang_options,
+ custom_rpc)
log.info('yang-xml', yang_xml=etree.tounicode(yang_xml,
pretty_print=True))
return self.build_xml_response(request, yang_xml, custom_rpc)
diff --git a/netconf/protoc_plugins/proto2yang.py b/netconf/protoc_plugins/proto2yang.py
index fc887a6..e9bf614 100755
--- a/netconf/protoc_plugins/proto2yang.py
+++ b/netconf/protoc_plugins/proto2yang.py
@@ -40,12 +40,39 @@
FieldDescriptorProto
from descriptor_parser import DescriptorParser
import copy
+from netconf.constants import Constants as C
import yang_options_pb2
from google.protobuf.descriptor import FieldDescriptor
import jinja2
-env = jinja2.Environment(extensions=["jinja2.ext.do",], trim_blocks=True, lstrip_blocks=True)
+
+env = jinja2.Environment(extensions=["jinja2.ext.do", ], trim_blocks=True,
+ lstrip_blocks=True)
+
+template_yang_definition = env.from_string("""
+# Generated file; please do not edit
+
+from structlog import get_logger
+
+log = get_logger()
+
+message_definitions = {
+ {% for m in messages %}
+ '{{ m.name }}': {{ m.fields }},
+ {% if loop.last %}{% endif %}
+ {% endfor %}
+}
+
+def get_fields(package, type_name, **kw):
+ log.info('fields-request', type=type_name, package=package, **kw)
+ full_name = ''.join([package, '-', type_name])
+ if message_definitions.has_key(full_name):
+ return message_definitions[full_name]
+ else:
+ return None
+
+""")
template_yang = env.from_string("""
module ietf-{{ module.name }} {
@@ -741,6 +768,77 @@
m['output'] = ''.join([m['output'], '_grouping'])
+def get_module_name(type, data_types):
+ for t in data_types:
+ # Verify both the type and when it is a referred type as they will
+ # both be in the same module
+ if t['type'] in [type, ''.join([type, '_grouping'])]:
+ return t['module']
+
+ # return the default module name
+ return 'voltha'
+
+
+def get_message_defs(messages, data_types, msg_response):
+ for msg in messages:
+ fields = []
+
+ # First process the fields as they appear before the oneofs in the
+ # YANG module
+ for f in msg['fields']:
+ module_name = '.'
+ if f['type_ref']:
+ module_name = get_module_name(f['type'], data_types)
+ fields.append(
+ {
+ 'oneof_key': None,
+ 'repeated': f['repeated'],
+ 'name': f['name'],
+ 'full_name': f['full_name'],
+ 'type': f['type'],
+ 'type_ref': f['type_ref'],
+ 'module': module_name
+ }
+ )
+
+ # Now process the oneofs
+ if msg['oneofs']:
+ for key, value in msg['oneofs'].iteritems():
+ # Value contains a list of fields
+ for v in value:
+ module_name = '.'
+ if v['type_ref']:
+ module_name = get_module_name(v['type'], data_types)
+ fields.append(
+ {
+ 'oneof_key': key,
+ 'repeated': v['repeated'],
+ 'name': v['name'],
+ 'full_name': v['full_name'],
+ 'type': v['type'],
+ 'type_ref': v['type_ref'],
+ 'module': module_name
+ }
+ )
+
+ msg_response.append({
+ 'name': msg['full_name'],
+ 'fields': fields
+ })
+
+ if msg['messages']:
+ get_message_defs(msg['messages'], data_types, msg_response)
+
+
+def build_yang_definitions(all_proto_data):
+ msg_response = []
+ for proto_data in all_proto_data:
+ get_message_defs(proto_data['module']['messages'], proto_data[
+ 'module']['data_types'], msg_response)
+
+ return msg_response
+
+
def generate_code(request, response):
assert isinstance(request, plugin.CodeGeneratorRequest)
@@ -839,6 +937,17 @@
# print_message(proto_data['module']['messages'])
f.content = template_yang.render(module=proto_data['module'])
+ # Create a summary of the YANG definitions with the order in which the
+ # attributes appear in each message. It would have been easier to sort
+ # the attributes in the YANG files and then sort the XML tags when a
+ # XML response is built. However, this strategy won't work with the oneof
+ # protobuf definition. The attributes in the oneof need to be kept
+ # together and as such will break the sort strategy.
+ msg_response = build_yang_definitions(all_proto_data)
+ yang_def = response.file.add()
+ yang_def.name = C.YANG_MESSAGE_DEFINITIONS_FILE
+ yang_def.content = template_yang_definition.render(messages=msg_response)
+
def get_yang_type(field):
type = field['type']
diff --git a/netconf/protoc_plugins/rpc_gw_gen.py b/netconf/protoc_plugins/rpc_gw_gen.py
index 671bf28..3a3af0f 100755
--- a/netconf/protoc_plugins/rpc_gw_gen.py
+++ b/netconf/protoc_plugins/rpc_gw_gen.py
@@ -22,6 +22,7 @@
MethodOptions
from jinja2 import Template
from simplejson import dumps
+import yang_options_pb2
from netconf.protos.third_party.google.api import annotations_pb2, http_pb2
@@ -73,6 +74,15 @@
log.info('{{ method_name }}', **out_data)
returnValue(out_data)
+def get_xml_tag_{{ method_name }}():
+ return '{{ method['xml_tag'] }}'
+
+def get_list_items_name_{{ method_name }}():
+ return '{{ method['list_item_name'] }}'
+
+def get_return_type_{{ method_name }}():
+ return '{{ type_map[method['output_type']] }}'
+
{% endfor %}
""", trim_blocks=True, lstrip_blocks=True)
@@ -92,13 +102,27 @@
if output_type.startswith('.'):
output_type = output_type[1:]
+ # Process any specific yang option
+ xml_tag = ''
+ list_item_name = ''
+ options = method.options
+ assert isinstance(options, MethodOptions)
+ for fd, yang_tag in options.ListFields():
+ if fd.full_name == 'voltha.yang_xml_tag':
+ if yang_tag.xml_tag:
+ xml_tag = yang_tag.xml_tag
+ if yang_tag.list_items_name:
+ list_item_name = yang_tag.list_items_name
+
data = {
'package': package,
'filename': proto_file.name,
'service': proto_file.package + '.' + service.name,
'method': method.name,
'input_type': input_type,
- 'output_type': output_type
+ 'output_type': output_type,
+ 'xml_tag': xml_tag,
+ 'list_item_name': list_item_name
}
yield data
diff --git a/netconf/protos/Makefile b/netconf/protos/Makefile
index 94bfaab..98ac79a 100644
--- a/netconf/protos/Makefile
+++ b/netconf/protos/Makefile
@@ -27,7 +27,7 @@
PROTOC_PREFIX := /usr/local
PROTOC_LIBDIR := $(PROTOC_PREFIX)/lib
-build: $(PROTO_PB2_FILES) copyfiles
+build: copyprotos $(PROTO_PB2_FILES) copypb2files
%_pb2.py: %.proto Makefile
@echo "Building protocol buffer artifacts from $<"
@@ -43,16 +43,22 @@
TARGET_YANG_OPTION_DIR := $(VOLTHA_BASE)/netconf/protoc_plugins
YANG_OPTION_FILE := yang_options_pb2.py
+YANG_PROTO_FILE := yang_options.proto
+SCHEMA_PROTO_FILE := schema.proto
-copyfiles:
+copyprotos:
+ cp $(SOURCE_PROTO_DIR)/$(SCHEMA_PROTO_FILE) $(TARGET_PROTO_DIR)
+ cp $(SOURCE_PROTO_DIR)/$(YANG_PROTO_FILE) $(TARGET_PROTO_DIR)
+
+copypb2files:
rsync -av --include '*/' --exclude='third_party/__init__.py' --include '*.py' --exclude='*' $(SOURCE_PROTO_DIR)/ $(TARGET_PROTO_DIR)
cp $(SOURCE_PROTO_DIR)/$(YANG_OPTION_FILE) $(TARGET_YANG_OPTION_DIR)
clean:
rm -f $(PROTO_PB2_FILES) $(PROTO_DESC_FILES)
- rm $(TARGET_YANG_OPTION_DIR)/$(YANG_OPTION_FILE)
- rm $(TARGET_PROTO_DIR)/*.py
- rm $(TARGET_PROTO_DIR)/*.pyc
-
+ rm -f $(TARGET_YANG_OPTION_DIR)/$(YANG_OPTION_FILE)
+ rm -f $(TARGET_PROTO_DIR)/*.py
+ rm -f $(TARGET_PROTO_DIR)/*.pyc
+ rm -f $(TARGET_PROTO_DIR)/*.proto
diff --git a/voltha/protos/voltha.proto b/voltha/protos/voltha.proto
index 9df42bd..6cb8262 100644
--- a/voltha/protos/voltha.proto
+++ b/voltha/protos/voltha.proto
@@ -108,6 +108,8 @@
option (google.api.http) = {
get: "/api/v1/instances"
};
+ option (voltha.yang_xml_tag).xml_tag = 'items';
+ option (voltha.yang_xml_tag).list_items_name = 'items';
}
// Get details on a Voltha cluster instance
@@ -122,6 +124,7 @@
option (google.api.http) = {
get: "/api/v1/logical_devices"
};
+ option (voltha.yang_xml_tag).xml_tag = 'logical_devices';
}
// Get additional information on a given logical device
@@ -136,6 +139,7 @@
option (google.api.http) = {
get: "/api/v1/logical_devices/{id}/ports"
};
+ option (voltha.yang_xml_tag).xml_tag = 'ports';
}
// List all flows of a logical device
@@ -143,6 +147,8 @@
option (google.api.http) = {
get: "/api/v1/logical_devices/{id}/flows"
};
+ option (voltha.yang_xml_tag).xml_tag = 'flows';
+ option (voltha.yang_xml_tag).list_items_name = 'items';
}
// Update flow table for logical device
@@ -159,6 +165,8 @@
option (google.api.http) = {
get: "/api/v1/logical_devices/{id}/flow_groups"
};
+ option (voltha.yang_xml_tag).xml_tag = 'flow_groups';
+ option (voltha.yang_xml_tag).list_items_name = 'items';
}
// Update group table for device
@@ -175,6 +183,7 @@
option (google.api.http) = {
get: "/api/v1/devices"
};
+ option (voltha.yang_xml_tag).xml_tag = 'devices';
}
// Get more information on a given physical device
@@ -204,6 +213,7 @@
option (google.api.http) = {
get: "/api/v1/devices/{id}/ports"
};
+ option (voltha.yang_xml_tag).xml_tag = 'ports';
}
// List all flows of a device
@@ -211,6 +221,8 @@
option (google.api.http) = {
get: "/api/v1/devices/{id}/flows"
};
+ option (voltha.yang_xml_tag).xml_tag = 'flows';
+ option (voltha.yang_xml_tag).list_items_name = 'items';
}
// List all flow groups of a device
@@ -218,6 +230,8 @@
option (google.api.http) = {
get: "/api/v1/devices/{id}/flow_groups"
};
+ option (voltha.yang_xml_tag).xml_tag = 'flow_groups';
+ option (voltha.yang_xml_tag).list_items_name = 'items';
}
// List device types known to Voltha
@@ -225,6 +239,7 @@
option (google.api.http) = {
get: "/api/v1/device_types"
};
+ option (voltha.yang_xml_tag).xml_tag = 'device_types';
}
// Get additional information on a device type
@@ -239,6 +254,7 @@
option (google.api.http) = {
get: "/api/v1/device_groups"
};
+ option (voltha.yang_xml_tag).xml_tag = 'device_groups';
}
// Get additional information on a device group
@@ -270,6 +286,7 @@
option (google.api.http) = {
get: "/api/v1/local/health"
};
+ option (voltha.yang_xml_tag).xml_tag = 'health';
}
// List all active adapters (plugins) in this Voltha instance
@@ -277,6 +294,7 @@
option (google.api.http) = {
get: "/api/v1/local/adapters"
};
+ option (voltha.yang_xml_tag).xml_tag = 'adapters';
}
// List all logical devices managed by this Voltha instance
@@ -284,6 +302,7 @@
option (google.api.http) = {
get: "/api/v1/local/logical_devices"
};
+ option (voltha.yang_xml_tag).xml_tag = 'logical_devices';
}
// Get additional information on given logical device
@@ -298,6 +317,7 @@
option (google.api.http) = {
get: "/api/v1/local/logical_devices/{id}/ports"
};
+ option (voltha.yang_xml_tag).xml_tag = 'ports';
}
// List all flows of a logical device
@@ -305,6 +325,8 @@
option (google.api.http) = {
get: "/api/v1/local/logical_devices/{id}/flows"
};
+ option (voltha.yang_xml_tag).xml_tag = 'flows';
+ option (voltha.yang_xml_tag).list_items_name = 'items';
}
// Update flow table for logical device
@@ -321,6 +343,8 @@
option (google.api.http) = {
get: "/api/v1/local/logical_devices/{id}/flow_groups"
};
+ option (voltha.yang_xml_tag).xml_tag = 'flow_groups';
+ option (voltha.yang_xml_tag).list_items_name = 'items';
}
// Update group table for logical device
@@ -337,6 +361,7 @@
option (google.api.http) = {
get: "/api/v1/local/devices"
};
+ option (voltha.yang_xml_tag).xml_tag = 'devices';
}
// Get additional information on this device
@@ -366,6 +391,7 @@
option (google.api.http) = {
get: "/api/v1/local/devices/{id}/ports"
};
+ option (voltha.yang_xml_tag).xml_tag = 'ports';
}
// List all flows of a device
@@ -373,6 +399,8 @@
option (google.api.http) = {
get: "/api/v1/local/devices/{id}/flows"
};
+ option (voltha.yang_xml_tag).xml_tag = 'flows';
+ option (voltha.yang_xml_tag).list_items_name = 'items';
}
// List all flow groups of a device
@@ -380,6 +408,8 @@
option (google.api.http) = {
get: "/api/v1/local/devices/{id}/flow_groups"
};
+ option (voltha.yang_xml_tag).xml_tag = 'flow_groups';
+ option (voltha.yang_xml_tag).list_items_name = 'items';
}
// List device types know to Voltha instance
@@ -387,6 +417,7 @@
option (google.api.http) = {
get: "/api/v1/local/device_types"
};
+ option (voltha.yang_xml_tag).xml_tag = 'device_types';
}
// Get additional information on given device type
@@ -401,6 +432,7 @@
option (google.api.http) = {
get: "/api/v1/local/device_groups"
};
+ option (voltha.yang_xml_tag).xml_tag = 'device_groups';
}
// Get more information on given device shard
diff --git a/voltha/protos/yang_options.proto b/voltha/protos/yang_options.proto
index 5ff2ed6..e64fef1 100644
--- a/voltha/protos/yang_options.proto
+++ b/voltha/protos/yang_options.proto
@@ -37,6 +37,22 @@
string type = 2;
}
+message RpcReturnDef {
+ // The gRPC methods return message types. NETCONF expects an actual
+ // attribute as defined in the YANG schema. The xnl_tag will be used
+ // as the top most tag when translating a gRPC response into an xml
+ // response
+ string xml_tag = 1;
+
+ // When the gRPC response is a list of items, we need to differentiate
+ // between a YANG schema attribute whose name is "items" and when "items"
+ // is used only to indicate a list of items is being returned. The default
+ // behavior assumes a list is returned when "items" is present in
+ // the response. This option will therefore be used when the attribute
+ // name in the YANG schema is 'items'
+ string list_items_name = 2;
+}
+
extend google.protobuf.MessageOptions {
// This annotation is used to indicate how a message is parsed when
// converting from proto to yang format.
@@ -50,3 +66,7 @@
// message itself. For now, this applies only to non-repeated fields.
InlineNode yang_inline_node = 7761776;
}
+
+extend google.protobuf.MethodOptions {
+ RpcReturnDef yang_xml_tag = 7761777;
+}