This commit cleans up the python directory to ensure the adapters
and the cli runs properly.
Change-Id: Ic68a3ecd1f16a5af44296e3c020c808b185f4c18
diff --git a/python/cli/README.md b/python/cli/README.md
new file mode 100644
index 0000000..c810df4
--- /dev/null
+++ b/python/cli/README.md
@@ -0,0 +1,14 @@
+## CLI (~/cli)
+
+* Add auto-completion for most common args like device and logical device ids
+* Add consistent argument checking
+* Unify code that retrieves data from gRPC
+* Unify code that prints out data/response, to allow:
+ * Selectable output mode:
+ * JSON
+ * Tabular
+* Organize history per sub context so that in each context the commands
+ entered in that context will show
+* Metaprogramming [BIG ONE]: Make large part of the commands come from annotations embedded in
+ the protobuf files and have corresponding handler auto-generated by protoc
+* Package CLI as docker container, bake it into composition
diff --git a/python/cli/__init__.py b/python/cli/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/python/cli/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/cli/alarm_filters.py b/python/cli/alarm_filters.py
new file mode 100644
index 0000000..ed2af32
--- /dev/null
+++ b/python/cli/alarm_filters.py
@@ -0,0 +1,238 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Alarm filter CLI commands
+"""
+from optparse import make_option, OptionValueError
+
+from cmd2 import Cmd, options
+from google.protobuf.empty_pb2 import Empty
+
+from table import print_pb_list_as_table
+from python.protos import third_party
+from python.protos import voltha_pb2
+from python.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+
+_ = third_party
+
+
+class AlarmFiltersCli(Cmd):
+ def __init__(self, get_stub):
+ Cmd.__init__(self)
+ self.get_stub = get_stub
+ self.prompt = '(' + self.colorize(
+ self.colorize('alarm_filters', 'red'), 'bold') + ') '
+
+ def cmdloop(self):
+ self._cmdloop()
+
+ def help_show(self):
+ self.poutput(
+'''
+Display the list of configured filters.
+
+Valid options:
+
+-i FILTER_ID | --filter-id=FILTER_ID Display the filter rules for a specific filter id (OPTIONAL)
+
+'''
+ )
+
+ @options([
+ make_option('-i', '--filter-id', action="store", dest='filter_id')
+ ])
+ def do_show(self, line, opts):
+ stub = self.get_stub()
+
+ if not opts.filter_id:
+ result = stub.ListAlarmFilters(Empty())
+ print_pb_list_as_table("Alarm Filters:", result.filters, {}, self.poutput)
+ else:
+ result = stub.GetAlarmFilter(voltha_pb2.ID(id=opts.filter_id))
+ print_pb_list_as_table("Rules for Filter ID = {}:".format(opts.filter_id),
+ result.rules, {}, self.poutput)
+
+ @staticmethod
+ def construct_rule(raw_rule):
+ rule = dict()
+
+ rule_kv = raw_rule.strip().split(':')
+
+ if len(rule_kv) == 2:
+ rule['key'] = rule_kv[0].lower()
+ rule['value'] = rule_kv[1].lower()
+ else:
+ raise OptionValueError("Error: A rule must be a colon separated key/value pair")
+
+ return rule
+
+ def parse_filter_rules(option, opt_str, value, parser):
+ rules = getattr(parser.values, option.dest)
+ if rules is None:
+ rules = list()
+ rules.append(AlarmFiltersCli.construct_rule(value))
+
+ for arg in parser.rargs:
+ if (arg[:2] == "--" and len(arg) > 2) or (arg[:1] == "-" and len(arg) > 1 and arg[1] != "-"):
+ break
+ else:
+ rules.append(AlarmFiltersCli.construct_rule(arg))
+
+ setattr(parser.values, option.dest, rules)
+ else:
+ raise OptionValueError('Warning: The filter rule option can only be specified once')
+
+ def help_create(self):
+ types = list(
+ k for k, v in
+ AlarmEventType.DESCRIPTOR.enum_values_by_name.items())
+ categories = list(
+ k for k, v in
+ AlarmEventCategory.DESCRIPTOR.enum_values_by_name.items())
+ severities = list(
+ k for k, v in
+ AlarmEventSeverity.DESCRIPTOR.enum_values_by_name.items())
+
+ alarm_types = types
+ alarm_categories = categories
+ alarm_severities = severities
+
+ usage = '''
+Create a new alarm filter.
+
+Valid options:
+
+-r rule:value ... | --filter-rules rule:value ... Specify one or more filter rules as key/value pairs (REQUIRED)
+
+Valid rule keys and expected values:
+
+id : Identifier of an incoming alarm
+type : Type of an incoming alarm {}
+category : Category of an incoming alarm {}
+severity : Severity of an incoming alarm {}
+resource_id : Resource identifier of an incoming alarm
+device_id : Device identifier of an incoming alarm
+
+Example:
+
+# Filter any alarm that matches the following criteria
+
+create -r type:environment severity:indeterminate
+create -r device_id:754f9dcbe4a6
+
+'''.format(alarm_types, alarm_categories, alarm_severities)
+
+ self.poutput(usage)
+
+ @options([
+ make_option('-r', '--filter-rules', help='<key>:<value>...', action="callback",
+ callback=parse_filter_rules, type='string', dest='filter_rules'),
+ ])
+ def do_create(self, line, opts):
+ if opts.filter_rules:
+ stub = self.get_stub()
+ result = stub.CreateAlarmFilter(voltha_pb2.AlarmFilter(rules=opts.filter_rules))
+ print_pb_list_as_table("Rules for Filter ID = {}:".format(result.id),
+ result.rules, {}, self.poutput)
+
+ def help_delete(self):
+ self.poutput(
+'''
+Delete a specific alarm filter entry.
+
+Valid options:
+
+-i FILTER_ID | --filter-id=FILTER_ID Display the filter rules for a specific filter id (REQUIRED)
+
+'''
+ )
+
+ @options([
+ make_option('-i', '--filter-id', action="store", dest='filter_id')
+ ])
+ def do_delete(self, line, opts):
+ if not opts.filter_id:
+ self.poutput(self.colorize('Error: ', 'red') + 'Specify ' + \
+ self.colorize(self.colorize('"filter id"', 'blue'),
+ 'bold') + ' to update')
+ return
+
+ stub = self.get_stub()
+ stub.DeleteAlarmFilter(voltha_pb2.ID(id=opts.filter_id))
+
+ def help_update(self):
+ types = list(
+ k for k, v in
+ AlarmEventType.DESCRIPTOR.enum_values_by_name.items())
+ categories = list(
+ k for k, v in
+ AlarmEventCategory.DESCRIPTOR.enum_values_by_name.items())
+ severities = list(
+ k for k, v in
+ AlarmEventSeverity.DESCRIPTOR.enum_values_by_name.items())
+
+ alarm_types = types
+ alarm_categories = categories
+ alarm_severities = severities
+
+ usage = '''
+Update the filter rules for an existing alarm filter.
+
+Valid options:
+
+-i FILTER_ID | --filter-id=FILTER_ID Indicate the alarm filter identifier to update (REQUIRED)
+-r rule:value ... | --filter-rules rule:value ... Specify one or more filter rules as key/value pairs (REQUIRED)
+
+Valid rule keys and expected values:
+
+id : Identifier of an incoming alarm
+type : Type of an incoming alarm {}
+category : Category of an incoming alarm {}
+severity : Severity of an incoming alarm {}
+resource_id : Resource identifier of an incoming alarm
+device_id : Device identifier of an incoming alarm
+
+Example:
+
+# Filter any alarm that matches the following criteria
+
+update -i 9da115b900bc -r type:environment severity:indeterminate resource_id:1554b0517a07
+
+'''.format(alarm_types, alarm_categories, alarm_severities)
+
+ self.poutput(usage)
+
+ @options([
+ make_option('-r', '--filter-rules', help='<key>:<value>...', action="callback",
+ callback=parse_filter_rules, type='string', dest='filter_rules'),
+ make_option('-i', '--filter-id', action="store", dest='filter_id')
+ ])
+ def do_update(self, line, opts):
+ if not opts.filter_id:
+ self.poutput(self.colorize('Error: ', 'red') + 'Specify ' + \
+ self.colorize(self.colorize('"filter id"', 'blue'),
+ 'bold') + ' to update')
+ return
+
+ if opts.filter_rules:
+ stub = self.get_stub()
+ result = stub.UpdateAlarmFilter(
+ voltha_pb2.AlarmFilter(id=opts.filter_id, rules=opts.filter_rules)
+ )
+ print_pb_list_as_table("Rules for Filter ID = {}:".format(result.id),
+ result.rules, {}, self.poutput)
diff --git a/python/cli/device.py b/python/cli/device.py
new file mode 100644
index 0000000..38ea835
--- /dev/null
+++ b/python/cli/device.py
@@ -0,0 +1,597 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Device level CLI commands
+"""
+from optparse import make_option
+from cmd2 import Cmd, options
+from simplejson import dumps
+
+from table import print_pb_as_table, print_pb_list_as_table
+from utils import print_flows, pb2dict, enum2name
+from python.protos import third_party
+
+_ = third_party
+from python.protos import voltha_pb2, common_pb2
+import sys
+import json
+from google.protobuf.json_format import MessageToDict
+
+# Since proto3 won't send fields that are set to 0/false/"" any object that
+# might have those values set in them needs to be replicated here such that the
+# fields can be adequately
+
+
+class DeviceCli(Cmd):
+
+ def __init__(self, device_id, get_stub):
+ Cmd.__init__(self)
+ self.get_stub = get_stub
+ self.device_id = device_id
+ self.prompt = '(' + self.colorize(
+ self.colorize('device {}'.format(device_id), 'red'), 'bold') + ') '
+ self.pm_config_last = None
+ self.pm_config_dirty = False
+
+ def cmdloop(self):
+ self._cmdloop()
+
+ def get_device(self, depth=0):
+ stub = self.get_stub()
+ res = stub.GetDevice(voltha_pb2.ID(id=self.device_id),
+ metadata=(('get-depth', str(depth)), ))
+ return res
+
+ do_exit = Cmd.do_quit
+
+ def do_quit(self, line):
+ if self.pm_config_dirty:
+ self.poutput("Uncommited changes for " + \
+ self.colorize(
+ self.colorize("perf_config,", "blue"),
+ "bold") + " please either " + self.colorize(
+ self.colorize("commit", "blue"), "bold") + \
+ " or " + self.colorize(
+ self.colorize("reset", "blue"), "bold") + \
+ " your changes using " + \
+ self.colorize(
+ self.colorize("perf_config", "blue"), "bold"))
+ return False
+ else:
+ return self._STOP_AND_EXIT
+
+ def do_show(self, line):
+ """Show detailed device information"""
+ print_pb_as_table('Device {}'.format(self.device_id),
+ self.get_device(depth=-1))
+
+ def do_ports(self, line):
+ """Show ports of device"""
+ device = self.get_device(depth=-1)
+ omit_fields = {
+ }
+ print_pb_list_as_table('Device ports:', device.ports,
+ omit_fields, self.poutput)
+
+ def complete_perf_config(self, text, line, begidx, endidx):
+ sub_cmds = {"show", "set", "commit", "reset"}
+ sub_opts = {"-f", "-e", "-d", "-o"}
+ # Help the interpreter complete the paramters.
+ completions = []
+ if not self.pm_config_last:
+ device = self.get_device(depth=-1)
+ self.pm_config_last = device.pm_configs
+ m_names = [d.name for d in self.pm_config_last.metrics]
+ cur_cmd = line.strip().split(" ")
+ try:
+ if not text and len(cur_cmd) == 1:
+ completions = ("show", "set", "commit", "reset")
+ elif len(cur_cmd) == 2:
+ if "set" == cur_cmd[1]:
+ completions = [d for d in sub_opts]
+ else:
+ completions = [d for d in sub_cmds if d.startswith(text)]
+ elif len(cur_cmd) > 2 and cur_cmd[1] == "set":
+ if cur_cmd[len(cur_cmd)-1] == "-":
+ completions = [list(d)[1] for d in sub_opts]
+ elif cur_cmd[len(cur_cmd)-1] == "-f":
+ completions = ("\255","Please enter a sampling frequency in 10ths of a second")
+ elif cur_cmd[len(cur_cmd)-2] == "-f":
+ completions = [d for d in sub_opts]
+ elif cur_cmd[len(cur_cmd)-1] in {"-e","-d","-o"}:
+ if self.pm_config_last.grouped:
+ pass
+ else:
+ completions = [d.name for d in self.pm_config_last.metrics]
+ elif cur_cmd[len(cur_cmd)-2] in {"-e","-d"}:
+ if text and text not in m_names:
+ completions = [d for d in m_names if d.startswith(text)]
+ else:
+ completions = [d for d in sub_opts]
+ elif cur_cmd[len(cur_cmd)-2] == "-o":
+ if cur_cmd[len(cur_cmd)-1] in [d.name for d in self.pm_config_last.metrics]:
+ completions = ("\255","Please enter a sampling frequency in 10ths of a second")
+ else:
+ completions = [d for d in m_names if d.startswith(text)]
+ elif cur_cmd[len(cur_cmd)-3] == "-o":
+ completions = [d for d in sub_opts]
+ except:
+ e = sys.exc_info()
+ print(e)
+ return completions
+
+
+ def help_perf_config(self):
+ self.poutput(
+'''
+perf_config [show | set | commit | reset] [-f <default frequency>] [{-e <metric/group
+ name>}] [{-d <metric/group name>}] [{-o <metric/group name> <override
+ frequency>}]
+
+show: displays the performance configuration of the device
+set: changes the parameters specified with -e, -d, and -o
+reset: reverts any changes made since the last commit
+commit: commits any changes made which applies them to the device.
+
+-e: enable collection of the specified metric, more than one -e may be
+ specified.
+-d: disable collection of the specified metric, more than on -d may be
+ specified.
+-o: override the collection frequency of the specified metric, more than one -o
+ may be specified. Note that -o isn't valid unless
+ frequency_override is set to True for the device.
+
+Changes made by set are held locally until a commit or reset command is issued.
+A commit command will write the configuration to the device and it takes effect
+immediately. The reset command will undo any changes since the start of the
+device session.
+
+If grouped is true then the -d, -e and -o commands refer to groups and not
+individual metrics.
+'''
+ )
+
+ @options([
+ make_option('-f', '--default_freq', action="store", dest='default_freq',
+ type='long', default=None),
+ make_option('-e', '--enable', action='append', dest='enable',
+ default=None),
+ make_option('-d', '--disable', action='append', dest='disable',
+ default=None),
+ make_option('-o', '--override', action='append', dest='override',
+ nargs=2, default=None, type='string'),
+ ])
+ def do_perf_config(self, line, opts):
+ """Show and set the performance monitoring configuration of the device"""
+
+ device = self.get_device(depth=-1)
+ if not self.pm_config_last:
+ self.pm_config_last = device.pm_configs
+
+ # Ensure that a valid sub-command was provided
+ if line.strip() not in {"set", "show", "commit", "reset", ""}:
+ self.poutput(self.colorize('Error: ', 'red') +
+ self.colorize(self.colorize(line.strip(), 'blue'),
+ 'bold') + ' is not recognized')
+ return
+
+ # Ensure no options are provided when requesting to view the config
+ if line.strip() == "show" or line.strip() == "":
+ if opts.default_freq or opts.enable or opts.disable:
+ self.poutput(opts.disable)
+ self.poutput(self.colorize('Error: ', 'red') + 'use ' +
+ self.colorize(self.colorize('"set"', 'blue'),
+ 'bold') + ' to change settings')
+ return
+
+ if line.strip() == "set": # Set the supplied values
+ metric_list = set()
+ if opts.enable is not None:
+ metric_list |= {metric for metric in opts.enable}
+ if opts.disable is not None:
+ metric_list |= {metric for metric in opts.disable}
+ if opts.override is not None:
+ metric_list |= {metric for metric, _ in opts.override}
+
+ # The default frequency
+ if opts.default_freq:
+ self.pm_config_last.default_freq = opts.default_freq
+ self.pm_config_dirty = True
+
+ # Field or group visibility
+ if self.pm_config_last.grouped:
+ for g in self.pm_config_last.groups:
+ if opts.enable:
+ if g.group_name in opts.enable:
+ g.enabled = True
+ self.pm_config_dirty = True
+ metric_list.discard(g.group_name)
+ for g in self.pm_config_last.groups:
+ if opts.disable:
+ if g.group_name in opts.disable:
+ g.enabled = False
+ self.pm_config_dirty = True
+ metric_list.discard(g.group_name)
+ else:
+ for m in self.pm_config_last.metrics:
+ if opts.enable:
+ if m.name in opts.enable:
+ m.enabled = True
+ self.pm_config_dirty = True
+ metric_list.discard(m.name)
+ for m in self.pm_config_last.metrics:
+ if opts.disable:
+ if m.name in opts.disable:
+ m.enabled = False
+ self.pm_config_dirty = True
+ metric_list.discard(m.name)
+
+ # Frequency overrides.
+ if opts.override:
+ if self.pm_config_last.freq_override:
+ oo = dict()
+ for o in opts.override:
+ oo[o[0]] = o[1]
+ if self.pm_config_last.grouped:
+ for g in self.pm_config_last.groups:
+ if g.group_name in oo:
+ try:
+ g.group_freq = int(oo[g.group_name])
+ except ValueError:
+ self.poutput(self.colorize('Warning: ',
+ 'yellow') +
+ self.colorize(oo[g.group_name],
+ 'blue') +
+ " is not an integer... ignored")
+ del oo[g.group_name]
+ self.pm_config_dirty = True
+ metric_list.discard(g.group_name)
+ else:
+ for m in self.pm_config_last.metrics:
+ if m.name in oo:
+ try:
+ m.sample_freq = int(oo[m.name])
+ except ValueError:
+ self.poutput(self.colorize('Warning: ',
+ 'yellow') +
+ self.colorize(oo[m.name],
+ 'blue') +
+ " is not an integer... ignored")
+ del oo[m.name]
+ self.pm_config_dirty = True
+ metric_list.discard(m.name)
+
+ # If there's anything left the input was typoed
+ if self.pm_config_last.grouped:
+ field = 'group'
+ else:
+ field = 'metric'
+ for o in oo:
+ self.poutput(self.colorize('Warning: ', 'yellow') +
+ 'the parameter' + ' ' +
+ self.colorize(o, 'blue') + ' is not ' +
+ 'a ' + field + ' name... ignored')
+ if oo:
+ return
+
+ else: # Frequency overrides not enabled
+ self.poutput(self.colorize('Error: ', 'red') +
+ 'Individual overrides are only ' +
+ 'supported if ' +
+ self.colorize('freq_override', 'blue') +
+ ' is set to ' + self.colorize('True', 'blue'))
+ return
+
+ if len(metric_list):
+ metric_name_list = ", ".join(str(metric) for metric in metric_list)
+ self.poutput(self.colorize('Error: ', 'red') +
+ 'Metric/Metric Group{} '.format('s' if len(metric_list) > 1 else '') +
+ self.colorize(metric_name_list, 'blue') +
+ ' {} not found'.format('were' if len(metric_list) > 1 else 'was'))
+ return
+
+ self.poutput("Success")
+ return
+
+ elif line.strip() == "commit" and self.pm_config_dirty:
+ stub = self.get_stub()
+ stub.UpdateDevicePmConfigs(self.pm_config_last)
+ self.pm_config_last = self.get_device(depth=-1).pm_configs
+ self.pm_config_dirty = False
+
+ elif line.strip() == "reset" and self.pm_config_dirty:
+ self.pm_config_last = self.get_device(depth=-1).pm_configs
+ self.pm_config_dirty = False
+
+ omit_fields = {'groups', 'metrics', 'id'}
+ print_pb_as_table('PM Config:', self.pm_config_last, omit_fields,
+ self.poutput,show_nulls=True)
+ if self.pm_config_last.grouped:
+ #self.poutput("Supported metric groups:")
+ for g in self.pm_config_last.groups:
+ if self.pm_config_last.freq_override:
+ omit_fields = {'metrics'}
+ else:
+ omit_fields = {'group_freq','metrics'}
+ print_pb_as_table('', g, omit_fields, self.poutput,
+ show_nulls=True)
+ if g.enabled:
+ state = 'enabled'
+ else:
+ state = 'disabled'
+ print_pb_list_as_table(
+ 'Metric group {} is {}'.format(g.group_name,state),
+ g.metrics, {'enabled', 'sample_freq'}, self.poutput,
+ dividers=100, show_nulls=True)
+ else:
+ if self.pm_config_last.freq_override:
+ omit_fields = {}
+ else:
+ omit_fields = {'sample_freq'}
+ print_pb_list_as_table('Supported metrics:', self.pm_config_last.metrics,
+ omit_fields, self.poutput, dividers=100,
+ show_nulls=True)
+
+ def do_flows(self, line):
+ """Show flow table for device"""
+ device = pb2dict(self.get_device(-1))
+ print_flows(
+ 'Device',
+ self.device_id,
+ type=device['type'],
+ flows=device['flows']['items'],
+ groups=device['flow_groups']['items']
+ )
+
+ def do_images(self, line):
+ """Show software images on the device"""
+ device = self.get_device(depth=-1)
+ omit_fields = {}
+ print_pb_list_as_table('Software Images:', device.images.image,
+ omit_fields, self.poutput, show_nulls=True)
+
+ @options([
+ make_option('-u', '--url', action='store', dest='url',
+ help="URL to get sw image"),
+ make_option('-n', '--name', action='store', dest='name',
+ help="Image name"),
+ make_option('-c', '--crc', action='store', dest='crc',
+ help="CRC code to verify with", default=0),
+ make_option('-v', '--version', action='store', dest='version',
+ help="Image version", default=0),
+ ])
+ def do_img_dnld_request(self, line, opts):
+ """
+ Request image download to a device
+ """
+ device = self.get_device(depth=-1)
+ self.poutput('device_id {}'.format(device.id))
+ self.poutput('name {}'.format(opts.name))
+ self.poutput('url {}'.format(opts.url))
+ self.poutput('crc {}'.format(opts.crc))
+ self.poutput('version {}'.format(opts.version))
+ try:
+ device_id = device.id
+ if device_id and opts.name and opts.url:
+ kw = dict(id=device_id)
+ kw['name'] = opts.name
+ kw['url'] = opts.url
+ else:
+ self.poutput('Device ID and URL are needed')
+ raise Exception('Device ID and URL are needed')
+ except Exception as e:
+ self.poutput('Error request img dnld {}. Error:{}'.format(device_id, e))
+ return
+ kw['crc'] = long(opts.crc)
+ kw['image_version'] = opts.version
+ response = None
+ try:
+ request = voltha_pb2.ImageDownload(**kw)
+ stub = self.get_stub()
+ response = stub.DownloadImage(request)
+ except Exception as e:
+ self.poutput('Error download image {}. Error:{}'.format(kw['id'], e))
+ return
+ name = enum2name(common_pb2.OperationResp,
+ 'OperationReturnCode', response.code)
+ self.poutput('response: {}'.format(name))
+ self.poutput('{}'.format(response))
+
+ @options([
+ make_option('-n', '--name', action='store', dest='name',
+ help="Image name"),
+ ])
+ def do_img_dnld_status(self, line, opts):
+ """
+ Get a image download status
+ """
+ device = self.get_device(depth=-1)
+ self.poutput('device_id {}'.format(device.id))
+ self.poutput('name {}'.format(opts.name))
+ try:
+ device_id = device.id
+ if device_id and opts.name:
+ kw = dict(id=device_id)
+ kw['name'] = opts.name
+ else:
+ self.poutput('Device ID, Image Name are needed')
+ raise Exception('Device ID, Image Name are needed')
+ except Exception as e:
+ self.poutput('Error get img dnld status {}. Error:{}'.format(device_id, e))
+ return
+ status = None
+ try:
+ img_dnld = voltha_pb2.ImageDownload(**kw)
+ stub = self.get_stub()
+ status = stub.GetImageDownloadStatus(img_dnld)
+ except Exception as e:
+ self.poutput('Error get img dnld status {}. Error:{}'.format(device_id, e))
+ return
+ fields_to_omit = {
+ 'crc',
+ 'local_dir',
+ }
+ try:
+ print_pb_as_table('ImageDownload Status:', status, fields_to_omit, self.poutput)
+ except Exception, e:
+ self.poutput('Error {}. Error:{}'.format(device_id, e))
+
+ def do_img_dnld_list(self, line):
+ """
+ List all image download records for a given device
+ """
+ device = self.get_device(depth=-1)
+ device_id = device.id
+ self.poutput('Get all img dnld records {}'.format(device_id))
+ try:
+ stub = self.get_stub()
+ img_dnlds = stub.ListImageDownloads(voltha_pb2.ID(id=device_id))
+ except Exception, e:
+ self.poutput('Error list img dnlds {}. Error:{}'.format(device_id, e))
+ return
+ fields_to_omit = {
+ 'crc',
+ 'local_dir',
+ }
+ try:
+ print_pb_list_as_table('ImageDownloads:', img_dnlds.items, fields_to_omit, self.poutput)
+ except Exception, e:
+ self.poutput('Error {}. Error:{}'.format(device_id, e))
+
+
+ @options([
+ make_option('-n', '--name', action='store', dest='name',
+ help="Image name"),
+ ])
+ def do_img_dnld_cancel(self, line, opts):
+ """
+ Cancel a requested image download
+ """
+ device = self.get_device(depth=-1)
+ self.poutput('device_id {}'.format(device.id))
+ self.poutput('name {}'.format(opts.name))
+ device_id = device.id
+ try:
+ if device_id and opts.name:
+ kw = dict(id=device_id)
+ kw['name'] = opts.name
+ else:
+ self.poutput('Device ID, Image Name are needed')
+ raise Exception('Device ID, Image Name are needed')
+ except Exception as e:
+ self.poutput('Error cancel sw dnld {}. Error:{}'.format(device_id, e))
+ return
+ response = None
+ try:
+ img_dnld = voltha_pb2.ImageDownload(**kw)
+ stub = self.get_stub()
+ img_dnld = stub.GetImageDownload(img_dnld)
+ response = stub.CancelImageDownload(img_dnld)
+ except Exception as e:
+ self.poutput('Error cancel sw dnld {}. Error:{}'.format(device_id, e))
+ return
+ name = enum2name(common_pb2.OperationResp,
+ 'OperationReturnCode', response.code)
+ self.poutput('response: {}'.format(name))
+ self.poutput('{}'.format(response))
+
+ @options([
+ make_option('-n', '--name', action='store', dest='name',
+ help="Image name"),
+ make_option('-s', '--save', action='store', dest='save_config',
+ help="Save Config", default="True"),
+ make_option('-d', '--dir', action='store', dest='local_dir',
+ help="Image on device location"),
+ ])
+ def do_img_activate(self, line, opts):
+ """
+ Activate an image update on device
+ """
+ device = self.get_device(depth=-1)
+ device_id = device.id
+ try:
+ if device_id and opts.name and opts.local_dir:
+ kw = dict(id=device_id)
+ kw['name'] = opts.name
+ kw['local_dir'] = opts.local_dir
+ else:
+ self.poutput('Device ID, Image Name, and Location are needed')
+ raise Exception('Device ID, Image Name, and Location are needed')
+ except Exception as e:
+ self.poutput('Error activate image {}. Error:{}'.format(device_id, e))
+ return
+ kw['save_config'] = json.loads(opts.save_config.lower())
+ self.poutput('activate image update {} {} {} {}'.format( \
+ kw['id'], kw['name'],
+ kw['local_dir'], kw['save_config']))
+ response = None
+ try:
+ img_dnld = voltha_pb2.ImageDownload(**kw)
+ stub = self.get_stub()
+ img_dnld = stub.GetImageDownload(img_dnld)
+ response = stub.ActivateImageUpdate(img_dnld)
+ except Exception as e:
+ self.poutput('Error activate image {}. Error:{}'.format(kw['id'], e))
+ return
+ name = enum2name(common_pb2.OperationResp,
+ 'OperationReturnCode', response.code)
+ self.poutput('response: {}'.format(name))
+ self.poutput('{}'.format(response))
+
+ @options([
+ make_option('-n', '--name', action='store', dest='name',
+ help="Image name"),
+ make_option('-s', '--save', action='store', dest='save_config',
+ help="Save Config", default="True"),
+ make_option('-d', '--dir', action='store', dest='local_dir',
+ help="Image on device location"),
+ ])
+ def do_img_revert(self, line, opts):
+ """
+ Revert an image update on device
+ """
+ device = self.get_device(depth=-1)
+ device_id = device.id
+ try:
+ if device_id and opts.name and opts.local_dir:
+ kw = dict(id=device_id)
+ kw['name'] = opts.name
+ kw['local_dir'] = opts.local_dir
+ else:
+ self.poutput('Device ID, Image Name, and Location are needed')
+ raise Exception('Device ID, Image Name, and Location are needed')
+ except Exception as e:
+ self.poutput('Error revert image {}. Error:{}'.format(device_id, e))
+ return
+ kw['save_config'] = json.loads(opts.save_config.lower())
+ self.poutput('revert image update {} {} {} {}'.format( \
+ kw['id'], kw['name'],
+ kw['local_dir'], kw['save_config']))
+ response = None
+ try:
+ img_dnld = voltha_pb2.ImageDownload(**kw)
+ stub = self.get_stub()
+ img_dnld = stub.GetImageDownload(img_dnld)
+ response = stub.RevertImageUpdate(img_dnld)
+ except Exception as e:
+ self.poutput('Error revert image {}. Error:{}'.format(kw['id'], e))
+ return
+ name = enum2name(common_pb2.OperationResp,
+ 'OperationReturnCode', response.code)
+ self.poutput('response: {}'.format(name))
+ self.poutput('{}'.format(response))
diff --git a/python/cli/logical_device.py b/python/cli/logical_device.py
new file mode 100644
index 0000000..cd991c6
--- /dev/null
+++ b/python/cli/logical_device.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Logical device level CLI commands
+"""
+from cmd2 import Cmd
+from simplejson import dumps
+
+from table import print_pb_as_table, print_pb_list_as_table
+from utils import pb2dict
+from utils import print_flows, print_groups
+from python.protos import third_party
+from google.protobuf.empty_pb2 import Empty
+
+_ = third_party
+from python.protos import voltha_pb2
+
+
+class LogicalDeviceCli(Cmd):
+
+ def __init__(self, logical_device_id, get_stub):
+ Cmd.__init__(self)
+ self.get_stub = get_stub
+ self.logical_device_id = logical_device_id
+ self.prompt = '(' + self.colorize(
+ self.colorize('logical device {}'.format(logical_device_id), 'red'),
+ 'bold') + ') '
+
+ def cmdloop(self):
+ self._cmdloop()
+
+ def get_logical_device(self, depth=0):
+ stub = self.get_stub()
+ res = stub.GetLogicalDevice(voltha_pb2.ID(id=self.logical_device_id),
+ metadata=(('get-depth', str(depth)), ))
+ return res
+
+ def get_device(self, id):
+ stub = self.get_stub()
+ return stub.GetDevice(voltha_pb2.ID(id=id))
+
+ def get_devices(self):
+ stub = self.get_stub()
+ res = stub.ListDevices(Empty())
+ return res.items
+
+ do_exit = Cmd.do_quit
+
+ def do_show(self, _):
+ """Show detailed logical device information"""
+ print_pb_as_table('Logical device {}'.format(self.logical_device_id),
+ self.get_logical_device(depth=-1))
+
+ def do_ports(self, _):
+ """Show ports of logical device"""
+ device = self.get_logical_device(depth=-1)
+ omit_fields = {
+ 'ofp_port.advertised',
+ 'ofp_port.peer',
+ 'ofp_port.max_speed'
+ }
+ print_pb_list_as_table('Logical device ports:', device.ports,
+ omit_fields, self.poutput)
+
+ def do_flows(self, _):
+ """Show flow table for logical device"""
+ logical_device = pb2dict(self.get_logical_device(-1))
+ print_flows(
+ 'Logical Device',
+ self.logical_device_id,
+ type='n/a',
+ flows=logical_device['flows']['items'],
+ groups=logical_device['flow_groups']['items']
+ )
+
+ def do_groups(self, _):
+ """Show flow group table for logical device"""
+ logical_device = pb2dict(self.get_logical_device(-1))
+ print_groups(
+ 'Logical Device',
+ self.logical_device_id,
+ type='n/a',
+ groups=logical_device['flow_groups']['items']
+ )
+
+ def do_devices(self, line):
+ """List devices that belong to this logical device"""
+ logical_device = self.get_logical_device()
+ root_device_id = logical_device.root_device_id
+ devices = [self.get_device(root_device_id)]
+ for d in self.get_devices():
+ if d.parent_id == root_device_id:
+ devices.append(d)
+ omit_fields = {
+ 'adapter',
+ 'vendor',
+ 'model',
+ 'hardware_version',
+ 'software_version',
+ 'firmware_version',
+ 'serial_number'
+ }
+ print_pb_list_as_table('Devices:', devices, omit_fields, self.poutput)
+
diff --git a/python/cli/main.py b/python/cli/main.py
new file mode 100755
index 0000000..0348f66
--- /dev/null
+++ b/python/cli/main.py
@@ -0,0 +1,922 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import argparse
+import os
+import readline
+import sys
+from optparse import make_option
+from time import sleep, time
+
+import grpc
+import requests
+from cmd2 import Cmd, options
+from consul import Consul
+from google.protobuf.empty_pb2 import Empty
+from simplejson import dumps
+
+from device import DeviceCli
+from omci import OmciCli
+from alarm_filters import AlarmFiltersCli
+from logical_device import LogicalDeviceCli
+from table import print_pb_list_as_table
+from python.common.openflow.utils import *
+from python.protos import third_party
+from python.protos import voltha_pb2
+from python.protos.openflow_13_pb2 import FlowTableUpdate, FlowGroupTableUpdate
+
+_ = third_party
+from python.cli.utils import pb2dict
+
+defs = dict(
+ # config=os.environ.get('CONFIG', './cli.yml'),
+ consul=os.environ.get('CONSUL', 'localhost:8500'),
+ voltha_grpc_endpoint=os.environ.get('VOLTHA_GRPC_ENDPOINT',
+ 'localhost:50057'),
+ voltha_sim_rest_endpoint=os.environ.get('VOLTHA_SIM_REST_ENDPOINT',
+ 'localhost:18880'),
+ global_request=os.environ.get('GLOBAL_REQUEST', False)
+)
+
+banner = """\
+ _ _ _ ___ _ ___
+__ _____| | |_| |_ __ _ / __| | |_ _|
+\ V / _ \ | _| ' \/ _` | | (__| |__ | |
+ \_/\___/_|\__|_||_\__,_| \___|____|___|
+(to exit type quit or hit Ctrl-D)
+"""
+
+
+class VolthaCli(Cmd):
+ prompt = 'voltha'
+ history_file_name = '.voltha_cli_history'
+
+ # Settable CLI parameters
+ voltha_grpc = 'localhost:50057'
+ voltha_sim_rest = 'localhost:18880'
+ global_request = False
+ max_history_lines = 500
+ default_device_id = None
+ default_logical_device_id = None
+
+ Cmd.settable.update(dict(
+ voltha_grpc='Voltha GRPC endpoint in form of <host>:<port>',
+ voltha_sim_rest='Voltha simulation back door for testing in form '
+ 'of <host>:<port>',
+ max_history_lines='Maximum number of history lines stored across '
+ 'sessions',
+ default_device_id='Device id used when no device id is specified',
+ default_logical_device_id='Logical device id used when no device id '
+ 'is specified',
+ ))
+
+ # cleanup of superfluous commands from cmd2
+ del Cmd.do_cmdenvironment
+ del Cmd.do_load
+ del Cmd.do__relative_load
+
+ def __init__(self, voltha_grpc, voltha_sim_rest, global_request=False):
+
+ VolthaCli.voltha_grpc = "localhost:50057"
+ # VolthaCli.voltha_grpc = voltha_grpc
+ VolthaCli.voltha_sim_rest = voltha_sim_rest
+ VolthaCli.global_request = global_request
+ Cmd.__init__(self)
+ self.prompt = '(' + self.colorize(
+ self.colorize(self.prompt, 'blue'), 'bold') + ') '
+ self.channel = None
+ self.stub = None
+ self.device_ids_cache = None
+ self.device_ids_cache_ts = time()
+ self.logical_device_ids_cache = None
+ self.logical_device_ids_cache_ts = time()
+
+ # we override cmd2's method to avoid its optparse conflicting with our
+ # command line parsing
+ def cmdloop(self):
+ self._cmdloop()
+
+ def load_history(self):
+ """Load saved command history from local history file"""
+ try:
+ with file(self.history_file_name, 'r') as f:
+ for line in f.readlines():
+ stripped_line = line.strip()
+ self.history.append(stripped_line)
+ readline.add_history(stripped_line)
+ except IOError:
+ pass # ignore if file cannot be read
+
+ def save_history(self):
+ try:
+ with open(self.history_file_name, 'w') as f:
+ f.write('\n'.join(self.history[-self.max_history_lines:]))
+ except IOError as e:
+ self.perror('Could not save history in {}: {}'.format(
+ self.history_file_name, e))
+ else:
+ self.poutput('History saved as {}'.format(
+ self.history_file_name))
+
+ def perror(self, errmsg, statement=None):
+ # Touch it up to make sure error is prefixed and colored
+ Cmd.perror(self, self.colorize('***ERROR: ', 'red') + errmsg,
+ statement)
+
+ def get_channel(self):
+ if self.channel is None:
+ self.channel = grpc.insecure_channel(self.voltha_grpc)
+ return self.channel
+
+ def get_stub(self):
+ if self.stub is None:
+ self.stub = voltha_pb2.VolthaServiceStub(self.get_channel())
+ # self.stub = \
+ # voltha_pb2.VolthaGlobalServiceStub(self.get_channel()) \
+ # if self.global_request else \
+ # voltha_pb2.VolthaLocalServiceStub(self.get_channel())
+ return self.stub
+
+ # ~~~~~~~~~~~~~~~~~ ACTUAL COMMAND IMPLEMENTATIONS ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ def do_reset_history(self, line):
+ """Reset CLI history"""
+ while self.history:
+ self.history.pop()
+
+ def do_launch(self, line):
+ """If Voltha is not running yet, launch it"""
+ raise NotImplementedError('not implemented yet')
+
+ def do_restart(self, line):
+ """Launch Voltha, but if it is already running, terminate it first"""
+ pass
+
+ def do_adapters(self, line):
+ """List loaded adapter"""
+ stub = self.get_stub()
+ res = stub.ListAdapters(Empty())
+ omit_fields = {'config.log_level', 'logical_device_ids'}
+ print_pb_list_as_table('Adapters:', res.items, omit_fields, self.poutput)
+
+ def get_devices(self):
+ stub = self.get_stub()
+ res = stub.ListDevices(Empty())
+ return res.items
+
+ def get_logical_devices(self):
+ stub = self.get_stub()
+ res = stub.ListLogicalDevices(Empty())
+ return res.items
+
+ def do_devices(self, line):
+ """List devices registered in Voltha"""
+ devices = self.get_devices()
+ omit_fields = {
+ 'adapter',
+ 'vendor',
+ 'model',
+ 'hardware_version',
+ 'images',
+ 'firmware_version',
+ 'vendor_id'
+ }
+ print_pb_list_as_table('Devices:', devices, omit_fields, self.poutput)
+
+ def do_logical_devices(self, line):
+ """List logical devices in Voltha"""
+ stub = self.get_stub()
+ res = stub.ListLogicalDevices(Empty())
+ omit_fields = {
+ 'desc.mfr_desc',
+ 'desc.hw_desc',
+ 'desc.sw_desc',
+ 'desc.dp_desc',
+ 'desc.serial_number',
+ 'switch_features.capabilities'
+ }
+ presfns = {
+ 'datapath_id': lambda x: "{0:0{1}x}".format(int(x), 16)
+ }
+ print_pb_list_as_table('Logical devices:', res.items, omit_fields,
+ self.poutput, presfns=presfns)
+
+ def do_device(self, line):
+ """Enter device level command mode"""
+ device_id = line.strip() or self.default_device_id
+ if not device_id:
+ raise Exception('<device-id> parameter needed')
+ if device_id not in self.device_ids():
+ self.poutput( self.colorize('Error: ', 'red') +
+ 'There is no such device')
+ raise Exception('<device-id> is not a valid one')
+ sub = DeviceCli(device_id, self.get_stub)
+ sub.cmdloop()
+
+ def do_logical_device(self, line):
+ """Enter logical device level command mode"""
+ logical_device_id = line.strip() or self.default_logical_device_id
+ if not logical_device_id:
+ raise Exception('<logical-device-id> parameter needed')
+ if logical_device_id not in self.logical_device_ids():
+ self.poutput( self.colorize('Error: ', 'red') +
+ 'There is no such device')
+ raise Exception('<logical-device-id> is not a valid one')
+ sub = LogicalDeviceCli(logical_device_id, self.get_stub)
+ sub.cmdloop()
+
+ def device_ids(self, force_refresh=False):
+ if force_refresh or self.device_ids is None or \
+ (time() - self.device_ids_cache_ts) > 1:
+ self.device_ids_cache = [d.id for d in self.get_devices()]
+ self.device_ids_cache_ts = time()
+ return self.device_ids_cache
+
+ def logical_device_ids(self, force_refresh=False):
+ if force_refresh or self.logical_device_ids is None or \
+ (time() - self.logical_device_ids_cache_ts) > 1:
+ self.logical_device_ids_cache = [d.id for d
+ in self.get_logical_devices()]
+ self.logical_device_ids_cache_ts = time()
+ return self.logical_device_ids_cache
+
+ def complete_device(self, text, line, begidx, endidx):
+ if not text:
+ completions = self.device_ids()[:]
+ else:
+ completions = [d for d in self.device_ids() if d.startswith(text)]
+ return completions
+
+ def complete_logical_device(self, text, line, begidx, endidx):
+ if not text:
+ completions = self.logical_device_ids()[:]
+ else:
+ completions = [d for d in self.logical_device_ids()
+ if d.startswith(text)]
+ return completions
+
+ def do_xpon(self, line):
+ """xpon <optional> [device_ID] - Enter xpon level command mode"""
+ device_id = line.strip()
+ if device_id:
+ stub = self.get_stub()
+ try:
+ res = stub.GetDevice(voltha_pb2.ID(id=device_id))
+ except Exception:
+ self.poutput(
+ self.colorize('Error: ', 'red') + 'No device id ' +
+ self.colorize(device_id, 'blue') + ' is found')
+ return
+ sub = XponCli(self.get_channel, device_id)
+ sub.cmdloop()
+
+ def do_omci(self, line):
+ """omci <device_ID> - Enter OMCI level command mode"""
+
+ device_id = line.strip() or self.default_device_id
+ if not device_id:
+ raise Exception('<device-id> parameter needed')
+ sub = OmciCli(device_id, self.get_stub)
+ sub.cmdloop()
+
+ def do_pdb(self, line):
+ """Launch PDB debug prompt in CLI (for CLI development)"""
+ from pdb import set_trace
+ set_trace()
+
+ def do_version(self, line):
+ """Show the VOLTHA core version"""
+ stub = self.get_stub()
+ voltha = stub.GetVoltha(Empty())
+ self.poutput('{}'.format(voltha.version))
+
+ def do_health(self, line):
+ """Show connectivity status to Voltha status"""
+ stub = voltha_pb2.HealthServiceStub(self.get_channel())
+ res = stub.GetHealthStatus(Empty())
+ self.poutput(dumps(pb2dict(res), indent=4))
+
+ @options([
+ make_option('-t', '--device-type', action="store", dest='device_type',
+ help="Device type", default='simulated_olt'),
+ make_option('-m', '--mac-address', action='store', dest='mac_address',
+ default='00:0c:e2:31:40:00'),
+ make_option('-i', '--ip-address', action='store', dest='ip_address'),
+ make_option('-H', '--host_and_port', action='store',
+ dest='host_and_port'),
+ ])
+ def do_preprovision_olt(self, line, opts):
+ """Preprovision a new OLT with given device type"""
+ stub = self.get_stub()
+ kw = dict(type=opts.device_type)
+ if opts.host_and_port:
+ kw['host_and_port'] = opts.host_and_port
+ elif opts.ip_address:
+ kw['ipv4_address'] = opts.ip_address
+ elif opts.mac_address:
+ kw['mac_address'] = opts.mac_address.lower()
+ else:
+ raise Exception('Either IP address or Mac Address is needed')
+ # Pass any extra arguments past '--' to the device as custom arguments
+ kw['extra_args'] = line
+
+ device = voltha_pb2.Device(**kw)
+ device = stub.CreateDevice(device)
+ self.poutput('success (device id = {})'.format(device.id))
+ self.default_device_id = device.id
+
+ def do_enable(self, line):
+ """
+ Enable a device. If the <id> is not provided, it will be on the last
+ pre-provisioned device.
+ """
+ device_id = line or self.default_device_id
+ if device_id not in self.device_ids():
+ self.poutput('Error: There is no such preprovisioned device')
+ return
+
+ try:
+ stub = self.get_stub()
+ device = stub.GetDevice(voltha_pb2.ID(id=device_id))
+ if device.admin_state == voltha_pb2.AdminState.ENABLED:
+ if device.oper_status != voltha_pb2.OperStatus.ACTIVATING:
+ self.poutput('Error: Device is already enabled')
+ return
+ else:
+ stub.EnableDevice(voltha_pb2.ID(id=device_id))
+ self.poutput('enabling {}'.format(device_id))
+
+ while True:
+ device = stub.GetDevice(voltha_pb2.ID(id=device_id))
+ # If this is an OLT then acquire logical device id
+ if device.oper_status == voltha_pb2.OperStatus.ACTIVE:
+ if device.type.endswith('_olt'):
+ assert device.parent_id
+ self.default_logical_device_id = device.parent_id
+ self.poutput('success (logical device id = {})'.format(
+ self.default_logical_device_id))
+ else:
+ self.poutput('success (device id = {})'.format(device.id))
+ break
+ self.poutput('waiting for device to be enabled...')
+ sleep(.5)
+ except Exception as e:
+ self.poutput('Error enabling {}. Error:{}'.format(device_id, e))
+
+ complete_activate_olt = complete_device
+
+ def do_reboot(self, line):
+ """
+ Rebooting a device. ID of the device needs to be provided
+ """
+ device_id = line or self.default_device_id
+ self.poutput('rebooting {}'.format(device_id))
+ try:
+ stub = self.get_stub()
+ stub.RebootDevice(voltha_pb2.ID(id=device_id))
+ self.poutput('rebooted {}'.format(device_id))
+ except Exception as e:
+ self.poutput('Error rebooting {}. Error:{}'.format(device_id, e))
+
+ def do_self_test(self, line):
+ """
+ Self Test a device. ID of the device needs to be provided
+ """
+ device_id = line or self.default_device_id
+ self.poutput('Self Testing {}'.format(device_id))
+ try:
+ stub = self.get_stub()
+ res = stub.SelfTest(voltha_pb2.ID(id=device_id))
+ self.poutput('Self Tested {}'.format(device_id))
+ self.poutput(dumps(pb2dict(res), indent=4))
+ except Exception as e:
+ self.poutput('Error in self test {}. Error:{}'.format(device_id, e))
+
+ def do_delete(self, line):
+ """
+ Deleting a device. ID of the device needs to be provided
+ """
+ device_id = line or self.default_device_id
+ self.poutput('deleting {}'.format(device_id))
+ try:
+ stub = self.get_stub()
+ stub.DeleteDevice(voltha_pb2.ID(id=device_id))
+ self.poutput('deleted {}'.format(device_id))
+ except Exception as e:
+ self.poutput('Error deleting {}. Error:{}'.format(device_id, e))
+
+ def do_disable(self, line):
+ """
+ Disable a device. ID of the device needs to be provided
+ """
+ device_id = line
+ if device_id not in self.device_ids():
+ self.poutput('Error: There is no such device')
+ return
+ try:
+ stub = self.get_stub()
+ device = stub.GetDevice(voltha_pb2.ID(id=device_id))
+ if device.admin_state == voltha_pb2.AdminState.DISABLED:
+ self.poutput('Error: Device is already disabled')
+ return
+ stub.DisableDevice(voltha_pb2.ID(id=device_id))
+ self.poutput('disabling {}'.format(device_id))
+
+ # Do device query and verify that the device admin status is
+ # DISABLED and Operational Status is unknown
+ device = stub.GetDevice(voltha_pb2.ID(id=device_id))
+ if device.admin_state == voltha_pb2.AdminState.DISABLED:
+ self.poutput('disabled successfully {}'.format(device_id))
+ else:
+ self.poutput('disabling failed {}. Admin State:{} '
+ 'Operation State: {}'.format(device_id,
+ device.admin_state,
+ device.oper_status))
+ except Exception as e:
+ self.poutput('Error disabling {}. Error:{}'.format(device_id, e))
+
+ def do_test(self, line):
+ """Enter test mode, which makes a bunch on new commands available"""
+ sub = TestCli(self.history, self.voltha_grpc,
+ self.get_stub, self.voltha_sim_rest)
+ sub.cmdloop()
+
+ def do_alarm_filters(self, line):
+ sub = AlarmFiltersCli(self.get_stub)
+ sub.cmdloop()
+
+
+class TestCli(VolthaCli):
+ def __init__(self, history, voltha_grpc, get_stub, voltha_sim_rest):
+ VolthaCli.__init__(self, voltha_grpc, voltha_sim_rest)
+ self.history = history
+ self.get_stub = get_stub
+ self.prompt = '(' + self.colorize(self.colorize('test', 'cyan'),
+ 'bold') + ') '
+
+ def get_device(self, device_id, depth=0):
+ stub = self.get_stub()
+ res = stub.GetDevice(voltha_pb2.ID(id=device_id),
+ metadata=(('get-depth', str(depth)),))
+ return res
+
+ def do_arrive_onus(self, line):
+ """
+ Simulate the arrival of ONUs (available only on simulated_olt)
+ """
+ device_id = line or self.default_device_id
+
+ # verify that device is of type simulated_olt
+ device = self.get_device(device_id)
+ assert device.type == 'simulated_olt', (
+ 'Cannot use it on this device type (only on simulated_olt type)')
+
+ requests.get('http://{}/devices/{}/detect_onus'.format(
+ self.voltha_sim_rest, device_id
+ ))
+
+ complete_arrive_onus = VolthaCli.complete_device
+
+ def get_logical_ports(self, logical_device_id):
+ """
+ Return the NNI port number and the first usable UNI port of logical
+ device, and the vlan associated with the latter.
+ """
+ stub = self.get_stub()
+ ports = stub.ListLogicalDevicePorts(
+ voltha_pb2.ID(id=logical_device_id)).items
+ nni = None
+ unis = []
+ for port in ports:
+ if port.root_port:
+ assert nni is None, "There shall be only one root port"
+ nni = port.ofp_port.port_no
+ else:
+ uni = port.ofp_port.port_no
+ uni_device = self.get_device(port.device_id)
+ vlan = uni_device.vlan
+ unis.append((uni, vlan))
+
+ assert nni is not None, "No NNI port found"
+ assert unis, "Not a single UNI?"
+
+ return nni, unis
+
+ def do_install_eapol_flow(self, line):
+ """
+ Install an EAPOL flow on the given logical device. If device is not
+ given, it will be applied to logical device of the last pre-provisioned
+ OLT device.
+ """
+
+ logical_device_id = line or self.default_logical_device_id
+
+ # gather NNI and UNI port IDs
+ nni_port_no, unis = self.get_logical_ports(logical_device_id)
+
+ # construct and push flow rule
+ stub = self.get_stub()
+ print "I am now here", unis
+ for uni_port_no, _ in unis:
+ update = FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=2000,
+ match_fields=[in_port(uni_port_no), eth_type(0x888e)],
+ actions=[
+ # push_vlan(0x8100),
+ # set_field(vlan_vid(4096 + 4000)),
+ output(ofp.OFPP_CONTROLLER)
+ ]
+ )
+ )
+ print "I am now here"
+ res = stub.UpdateLogicalDeviceFlowTable(update)
+ self.poutput('success for uni {} ({})'.format(uni_port_no, res))
+
+ complete_install_eapol_flow = VolthaCli.complete_logical_device
+
+ def do_install_all_controller_bound_flows(self, line):
+ """
+ Install all flow rules for controller bound flows, including EAPOL,
+ IGMP and DHCP. If device is not given, it will be applied to logical
+ device of the last pre-provisioned OLT device.
+ """
+ logical_device_id = line or self.default_logical_device_id
+
+ # gather NNI and UNI port IDs
+ nni_port_no, unis = self.get_logical_ports(logical_device_id)
+
+ # construct and push flow rules
+ stub = self.get_stub()
+
+ for uni_port_no, _ in unis:
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=2000,
+ match_fields=[
+ in_port(uni_port_no),
+ eth_type(0x888e)
+ ],
+ actions=[output(ofp.OFPP_CONTROLLER)]
+ )
+ ))
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=1000,
+ match_fields=[
+ in_port(uni_port_no),
+ eth_type(0x800),
+ ip_proto(2)
+ ],
+ actions=[output(ofp.OFPP_CONTROLLER)]
+ )
+ ))
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=1000,
+ match_fields=[
+ in_port(uni_port_no),
+ eth_type(0x800),
+ ip_proto(17),
+ udp_dst(67)
+ ],
+ actions=[output(ofp.OFPP_CONTROLLER)]
+ )
+ ))
+ self.poutput('success')
+
+ complete_install_all_controller_bound_flows = \
+ VolthaCli.complete_logical_device
+
+ def do_install_all_sample_flows(self, line):
+ """
+ Install all flows that are representative of the virtualized access
+ scenario in a PON network.
+ """
+ logical_device_id = line or self.default_logical_device_id
+
+ # gather NNI and UNI port IDs
+ nni_port_no, unis = self.get_logical_ports(logical_device_id)
+
+ # construct and push flow rules
+ stub = self.get_stub()
+
+ for uni_port_no, c_vid in unis:
+ # Controller-bound flows
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=2000,
+ match_fields=[in_port(uni_port_no), eth_type(0x888e)],
+ actions=[
+ # push_vlan(0x8100),
+ # set_field(vlan_vid(4096 + 4000)),
+ output(ofp.OFPP_CONTROLLER)
+ ]
+ )
+ ))
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=1000,
+ match_fields=[eth_type(0x800), ip_proto(2)],
+ actions=[output(ofp.OFPP_CONTROLLER)]
+ )
+ ))
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=1000,
+ match_fields=[eth_type(0x800), ip_proto(17), udp_dst(67)],
+ actions=[output(ofp.OFPP_CONTROLLER)]
+ )
+ ))
+
+ # Unicast flows:
+ # Downstream flow 1
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=500,
+ match_fields=[
+ in_port(nni_port_no),
+ vlan_vid(4096 + 1000),
+ metadata(c_vid) # here to mimic an ONOS artifact
+ ],
+ actions=[pop_vlan()],
+ next_table_id=1
+ )
+ ))
+ # Downstream flow 2
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=500,
+ table_id=1,
+ match_fields=[in_port(nni_port_no), vlan_vid(4096 + c_vid)],
+ actions=[set_field(vlan_vid(4096 + 0)), output(uni_port_no)]
+ )
+ ))
+ # Upstream flow 1 for 0-tagged case
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=500,
+ match_fields=[in_port(uni_port_no), vlan_vid(4096 + 0)],
+ actions=[set_field(vlan_vid(4096 + c_vid))],
+ next_table_id=1
+ )
+ ))
+ # Upstream flow 1 for untagged case
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=500,
+ match_fields=[in_port(uni_port_no), vlan_vid(0)],
+ actions=[push_vlan(0x8100), set_field(vlan_vid(4096 + c_vid))],
+ next_table_id=1
+ )
+ ))
+ # Upstream flow 2 for s-tag
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=500,
+ table_id=1,
+ match_fields=[in_port(uni_port_no), vlan_vid(4096 + c_vid)],
+ actions=[
+ push_vlan(0x8100),
+ set_field(vlan_vid(4096 + 1000)),
+ output(nni_port_no)
+ ]
+ )
+ ))
+
+ # Push a few multicast flows
+ # 1st with one bucket for our uni 0
+ stub.UpdateLogicalDeviceFlowGroupTable(FlowGroupTableUpdate(
+ id=logical_device_id,
+ group_mod=mk_multicast_group_mod(
+ group_id=1,
+ buckets=[
+ ofp.ofp_bucket(actions=[
+ pop_vlan(),
+ output(unis[0][0])
+ ])
+ ]
+ )
+ ))
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=1000,
+ match_fields=[
+ in_port(nni_port_no),
+ eth_type(0x800),
+ vlan_vid(4096 + 140),
+ ipv4_dst(0xe4010101)
+ ],
+ actions=[group(1)]
+ )
+ ))
+
+ # 2nd with one bucket for uni 0 and 1
+ stub.UpdateLogicalDeviceFlowGroupTable(FlowGroupTableUpdate(
+ id=logical_device_id,
+ group_mod=mk_multicast_group_mod(
+ group_id=2,
+ buckets=[
+ ofp.ofp_bucket(actions=[pop_vlan(), output(unis[0][0])])
+ # ofp.ofp_bucket(actions=[pop_vlan(), output(unis[1][0])])
+ ]
+ )
+ ))
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=1000,
+ match_fields=[
+ in_port(nni_port_no),
+ eth_type(0x800),
+ vlan_vid(4096 + 140),
+ ipv4_dst(0xe4020202)
+ ],
+ actions=[group(2)]
+ )
+ ))
+
+ # 3rd with empty bucket
+ stub.UpdateLogicalDeviceFlowGroupTable(FlowGroupTableUpdate(
+ id=logical_device_id,
+ group_mod=mk_multicast_group_mod(
+ group_id=3,
+ buckets=[]
+ )
+ ))
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=1000,
+ match_fields=[
+ in_port(nni_port_no),
+ eth_type(0x800),
+ vlan_vid(4096 + 140),
+ ipv4_dst(0xe4030303)
+ ],
+ actions=[group(3)]
+ )
+ ))
+
+ self.poutput('success')
+
+ complete_install_all_sample_flows = VolthaCli.complete_logical_device
+
+ def do_install_dhcp_flows(self, line):
+ """
+ Install all dhcp flows that are representative of the virtualized access
+ scenario in a PON network.
+ """
+ logical_device_id = line or self.default_logical_device_id
+
+ # gather NNI and UNI port IDs
+ nni_port_no, unis = self.get_logical_ports(logical_device_id)
+
+ # construct and push flow rules
+ stub = self.get_stub()
+
+ # Controller-bound flows
+ for uni_port_no, _ in unis:
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=mk_simple_flow_mod(
+ priority=1000,
+ match_fields=[
+ in_port(uni_port_no),
+ eth_type(0x800),
+ ip_proto(17),
+ udp_dst(67)
+ ],
+ actions=[output(ofp.OFPP_CONTROLLER)]
+ )
+ ))
+
+ self.poutput('success')
+
+ complete_install_dhcp_flows = VolthaCli.complete_logical_device
+
+ def do_delete_all_flows(self, line):
+ """
+ Remove all flows and flow groups from given logical device
+ """
+ logical_device_id = line or self.default_logical_device_id
+ stub = self.get_stub()
+ stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+ id=logical_device_id,
+ flow_mod=ofp.ofp_flow_mod(
+ command=ofp.OFPFC_DELETE,
+ table_id=ofp.OFPTT_ALL,
+ cookie_mask=0,
+ out_port=ofp.OFPP_ANY,
+ out_group=ofp.OFPG_ANY
+ )
+ ))
+ stub.UpdateLogicalDeviceFlowGroupTable(FlowGroupTableUpdate(
+ id=logical_device_id,
+ group_mod=ofp.ofp_group_mod(
+ command=ofp.OFPGC_DELETE,
+ group_id=ofp.OFPG_ALL
+ )
+ ))
+ self.poutput('success')
+
+ complete_delete_all_flows = VolthaCli.complete_logical_device
+
+ def do_send_simulated_upstream_eapol(self, line):
+ """
+ Send an EAPOL upstream from a simulated OLT
+ """
+ device_id = line or self.default_device_id
+ requests.get('http://{}/devices/{}/test_eapol_in'.format(
+ self.voltha_sim_rest, device_id
+ ))
+
+ complete_send_simulated_upstream_eapol = VolthaCli.complete_device
+
+ def do_inject_eapol_start(self, line):
+ """
+ Send out an an EAPOL start message into the given Unix interface
+ """
+ pass
+
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser()
+
+ _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+ parser.add_argument(
+ '-C', '--consul', action='store', default=defs['consul'], help=_help)
+
+ _help = 'Lookup Voltha endpoints based on service entries in Consul'
+ parser.add_argument(
+ '-L', '--lookup', action='store_true', help=_help)
+
+ _help = 'All requests to the Voltha gRPC service are global'
+ parser.add_argument(
+ '-G', '--global_request', action='store_true', help=_help)
+
+ _help = '<hostname>:<port> of Voltha gRPC service (default={})'.format(
+ defs['voltha_grpc_endpoint'])
+ parser.add_argument('-g', '--grpc-endpoint', action='store',
+ default=defs['voltha_grpc_endpoint'], help=_help)
+
+ _help = '<hostname>:<port> of Voltha simulated adapter backend for ' \
+ 'testing (default={})'.format(
+ defs['voltha_sim_rest_endpoint'])
+ parser.add_argument('-s', '--sim-rest-endpoint', action='store',
+ default=defs['voltha_sim_rest_endpoint'], help=_help)
+
+ args = parser.parse_args()
+
+ if args.lookup:
+ host = args.consul.split(':')[0].strip()
+ port = int(args.consul.split(':')[1].strip())
+ consul = Consul(host=host, port=port)
+
+ _, services = consul.catalog.service('voltha-grpc')
+ if not services:
+ print('No voltha-grpc service registered in consul; exiting')
+ sys.exit(1)
+ args.grpc_endpoint = '{}:{}'.format(services[0]['ServiceAddress'],
+ services[0]['ServicePort'])
+
+ _, services = consul.catalog.service('voltha-sim-rest')
+ if not services:
+ print('No voltha-sim-rest service registered in consul; exiting')
+ sys.exit(1)
+ args.sim_rest_endpoint = '{}:{}'.format(services[0]['ServiceAddress'],
+ services[0]['ServicePort'])
+
+ c = VolthaCli(args.grpc_endpoint, args.sim_rest_endpoint,
+ args.global_request)
+ c.poutput(banner)
+ c.load_history()
+ c.cmdloop()
+ c.save_history()
diff --git a/python/cli/omci.py b/python/cli/omci.py
new file mode 100644
index 0000000..d8b8334
--- /dev/null
+++ b/python/cli/omci.py
@@ -0,0 +1,356 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+OpenOMCI level CLI commands
+"""
+from optparse import make_option
+from cmd2 import Cmd, options
+from datetime import datetime
+from google.protobuf.empty_pb2 import Empty
+from table import print_pb_list_as_table
+from python.protos import third_party
+from python.protos import voltha_pb2
+from python.protos.omci_mib_db_pb2 import MibDeviceData, MibClassData, \
+ MibInstanceData
+from os import linesep
+
+_ = third_party
+
+
+class OmciCli(Cmd):
+ CREATED_KEY = 'created'
+ MODIFIED_KEY = 'modified'
+ MDS_KEY = 'mib_data_sync'
+ LAST_SYNC_KEY = 'last_mib_sync'
+ VERSION_KEY = 'version'
+ DEVICE_ID_KEY = 'device_id'
+ CLASS_ID_KEY = 'class_id'
+ INSTANCE_ID_KEY = 'instance_id'
+ ATTRIBUTES_KEY = 'attributes'
+ TIME_FORMAT = '%Y%m%d-%H%M%S.%f'
+ ME_KEY = 'managed_entities'
+ MSG_TYPE_KEY = 'message_types'
+
+ MSG_TYPE_TO_NAME = {
+ 4: 'Create',
+ 5: 'Create Complete',
+ 6: 'Delete',
+ 8: 'Set',
+ 9: 'Get',
+ 10: 'Get Complete',
+ 11: 'Get All Alarms',
+ 12: 'Get All Alarms Next',
+ 13: 'Mib Upload',
+ 14: 'Mib Upload Next',
+ 15: 'Mib Reset',
+ 16: 'Alarm Notification',
+ 17: 'Attribute Value Change',
+ 18: 'Test',
+ 19: 'Start Software Download',
+ 20: 'Download Section',
+ 21: 'End Software Download',
+ 22: 'Activate Software',
+ 23: 'Commit Software',
+ 24: 'Synchronize Time',
+ 25: 'Reboot',
+ 26: 'Get Next',
+ 27: 'Test Result',
+ 28: 'Get Current Data',
+ 29: 'Set Table'
+ }
+
+ def __init__(self, device_id, get_stub):
+ Cmd.__init__(self)
+ self.get_stub = get_stub
+ self.device_id = device_id
+ self.prompt = '(' + self.colorize(
+ self.colorize('omci {}'.format(device_id), 'green'),
+ 'bold') + ') '
+
+ def cmdloop(self, intro=None):
+ self._cmdloop()
+
+ do_exit = Cmd.do_quit
+
+ def do_quit(self, line):
+ return self._STOP_AND_EXIT
+
+ def get_device_mib(self, device_id, depth=-1):
+ stub = self.get_stub()
+
+ try:
+ res = stub.GetMibDeviceData(voltha_pb2.ID(id=device_id),
+ metadata=(('get-depth', str(depth)), ))
+ except Exception as e:
+ pass
+
+ return res
+
+ def help_show_mib(self):
+ self.poutput('show_mib [-d <device-id>] [-c <class-id> [-i <instance-id>]]' +
+ linesep + '-d: <device-id> ONU Device ID' +
+ linesep + '-c: <class-id> Managed Entity Class ID' +
+ linesep + '-i: <instance-id> ME Instance ID')
+
+ @options([
+ make_option('-d', '--device-id', action="store", dest='device_id', type='string',
+ help='ONU Device ID', default=None),
+ make_option('-c', '--class-id', action="store", dest='class_id',
+ type='int', help='Managed Entity Class ID', default=None),
+ make_option('-i', '--instance-id', action="store", dest='instance_id',
+ type='int', help='ME Instance ID', default=None)
+ ])
+ def do_show_mib(self, _line, opts):
+ """
+ Show OMCI MIB Database Information
+ """
+ device_id = opts.device_id or self.device_id
+
+ if opts.class_id is not None and not 1 <= opts.class_id <= 0xFFFF:
+ self.poutput(self.colorize('Error: ', 'red') +
+ self.colorize('Class ID must be 1..65535', 'blue'))
+ return
+
+ if opts.instance_id is not None and opts.class_id is None:
+ self.poutput(self.colorize('Error: ', 'red') +
+ self.colorize('Class ID required if specifying an Instance ID',
+ 'blue'))
+ return
+
+ if opts.instance_id is not None and not 0 <= opts.instance_id <= 0xFFFF:
+ self.poutput(self.colorize('Error: ', 'red') +
+ self.colorize('Instance ID must be 0..65535', 'blue'))
+ return
+
+ try:
+ mib_db = self.get_device_mib(device_id, depth=-1)
+
+ except Exception: # UnboundLocalError if Device ID not found in DB
+ self.poutput(self.colorize('Failed to get MIB database for ONU {}'
+ .format(device_id), 'red'))
+ return
+
+ mib = self._device_to_dict(mib_db)
+
+ self.poutput('OpenOMCI MIB Database for ONU {}'.format(device_id))
+
+ if opts.class_id is None and opts.instance_id is None:
+ self.poutput('Version : {}'.format(mib[OmciCli.VERSION_KEY]))
+ self.poutput('Created : {}'.format(mib[OmciCli.CREATED_KEY]))
+ self.poutput('Last In-Sync Time : {}'.format(mib[OmciCli.LAST_SYNC_KEY]))
+ self.poutput('MIB Data Sync Value: {}'.format(mib[OmciCli.MDS_KEY]))
+
+ class_ids = [k for k in mib.iterkeys()
+ if isinstance(k, int) and
+ (opts.class_id is None or opts.class_id == k)]
+ class_ids.sort()
+
+ if len(class_ids) == 0 and opts.class_id is not None:
+ self.poutput(self.colorize('Class ID {} not found in MIB Database'
+ .format(opts.class_id), 'red'))
+ return
+
+ for cls_id in class_ids:
+ class_data = mib[cls_id]
+ self.poutput(' ----------------------------------------------')
+ self.poutput(' Class ID: {0} - ({0:#x})'.format(cls_id))
+
+ inst_ids = [k for k in class_data.iterkeys()
+ if isinstance(k, int) and
+ (opts.instance_id is None or opts.instance_id == k)]
+ inst_ids.sort()
+
+ if len(inst_ids) == 0 and opts.instance_id is not None:
+ self.poutput(self.colorize('Instance ID {} of Class ID {} not ' +
+ 'found in MIB Database'.
+ format(opts.instance_id, opts.class_id),
+ 'red'))
+ return
+
+ for inst_id in inst_ids:
+ inst_data = class_data[inst_id]
+ self.poutput(' Instance ID: {0} - ({0:#x})'.format(inst_id))
+ self.poutput(' Created : {}'.format(inst_data[OmciCli.CREATED_KEY]))
+ self.poutput(' Modified : {}'.format(inst_data[OmciCli.MODIFIED_KEY]))
+
+ attributes = inst_data[OmciCli.ATTRIBUTES_KEY]
+ attr_names = attributes.keys()
+ attr_names.sort()
+ max_len = max([len(attr) for attr in attr_names])
+
+ for attr in attr_names:
+ name = self._cleanup_attribute_name(attr).ljust(max_len)
+ value = attributes[attr]
+ try:
+ ivalue = int(value)
+ self.poutput(' {0}: {1} - ({1:#x})'.format(name, ivalue))
+
+ except ValueError:
+ self.poutput(' {}: {}'.format(name, value))
+
+ if inst_id is not inst_ids[-1]:
+ self.poutput(linesep)
+
+ def _cleanup_attribute_name(self, attr):
+ """Change underscore to space and capitalize first character"""
+ return ' '.join([v[0].upper() + v[1:] for v in attr.split('_')])
+
+ def _instance_to_dict(self, instance):
+ if not isinstance(instance, MibInstanceData):
+ raise TypeError('{} is not of type MibInstanceData'.format(type(instance)))
+
+ data = {
+ OmciCli.INSTANCE_ID_KEY: instance.instance_id,
+ OmciCli.CREATED_KEY: self._string_to_time(instance.created),
+ OmciCli.MODIFIED_KEY: self._string_to_time(instance.modified),
+ OmciCli.ATTRIBUTES_KEY: dict()
+ }
+ for attribute in instance.attributes:
+ data[OmciCli.ATTRIBUTES_KEY][attribute.name] = str(attribute.value)
+
+ return data
+
+ def _class_to_dict(self, val):
+ if not isinstance(val, MibClassData):
+ raise TypeError('{} is not of type MibClassData'.format(type(val)))
+
+ data = {
+ OmciCli.CLASS_ID_KEY: val.class_id,
+ }
+ for instance in val.instances:
+ data[instance.instance_id] = self._instance_to_dict(instance)
+ return data
+
+ def _device_to_dict(self, val):
+ if not isinstance(val, MibDeviceData):
+ raise TypeError('{} is not of type MibDeviceData'.format(type(val)))
+
+ data = {
+ OmciCli.DEVICE_ID_KEY: val.device_id,
+ OmciCli.CREATED_KEY: self._string_to_time(val.created),
+ OmciCli.LAST_SYNC_KEY: self._string_to_time(val.last_sync_time),
+ OmciCli.MDS_KEY: val.mib_data_sync,
+ OmciCli.VERSION_KEY: val.version,
+ OmciCli.ME_KEY: dict(),
+ OmciCli.MSG_TYPE_KEY: set()
+ }
+ for class_data in val.classes:
+ data[class_data.class_id] = self._class_to_dict(class_data)
+
+ for managed_entity in val.managed_entities:
+ data[OmciCli.ME_KEY][managed_entity.class_id] = managed_entity.name
+
+ for msg_type in val.message_types:
+ data[OmciCli.MSG_TYPE_KEY].add(msg_type.message_type)
+
+ return data
+
+ def _string_to_time(self, time):
+ return datetime.strptime(time, OmciCli.TIME_FORMAT) if len(time) else None
+
+ def help_show_me(self):
+ self.poutput('show_me [-d <device-id>]' +
+ linesep + '-d: <device-id> ONU Device ID')
+
+ @options([
+ make_option('-d', '--device-id', action="store", dest='device_id', type='string',
+ help='ONU Device ID', default=None),
+ ])
+ def do_show_me(self, _line, opts):
+ """ Show supported OMCI Managed Entities"""
+
+ device_id = opts.device_id or self.device_id
+
+ try:
+ mib_db = self.get_device_mib(device_id, depth=1)
+ mib = self._device_to_dict(mib_db)
+
+ except Exception: # UnboundLocalError if Device ID not found in DB
+ self.poutput(self.colorize('Failed to get supported ME information for ONU {}'
+ .format(device_id), 'red'))
+ return
+
+ class_ids = [class_id for class_id in mib[OmciCli.ME_KEY].keys()]
+ class_ids.sort()
+
+ self.poutput('Supported Managed Entities for ONU {}'.format(device_id))
+ for class_id in class_ids:
+ self.poutput(' {0} - ({0:#x}): {1}'.format(class_id,
+ mib[OmciCli.ME_KEY][class_id]))
+
+ def help_show_msg_types(self):
+ self.poutput('show_msg_types [-d <device-id>]' +
+ linesep + '-d: <device-id> ONU Device ID')
+
+ @options([
+ make_option('-d', '--device-id', action="store", dest='device_id', type='string',
+ help='ONU Device ID', default=None),
+ ])
+ def do_show_msg_types(self, _line, opts):
+ """ Show supported OMCI Message Types"""
+ device_id = opts.device_id or self.device_id
+
+ try:
+ mib_db = self.get_device_mib(device_id, depth=1)
+ mib = self._device_to_dict(mib_db)
+
+ except Exception: # UnboundLocalError if Device ID not found in DB
+ self.poutput(self.colorize('Failed to get supported Message Types for ONU {}'
+ .format(device_id), 'red'))
+ return
+
+ msg_types = [msg_type for msg_type in mib[OmciCli.MSG_TYPE_KEY]]
+ msg_types.sort()
+
+ self.poutput('Supported Message Types for ONU {}'.format(device_id))
+ for msg_type in msg_types:
+ self.poutput(' {0} - ({0:#x}): {1}'.
+ format(msg_type,
+ OmciCli.MSG_TYPE_TO_NAME.get(msg_type, 'Unknown')))
+
+ def get_devices(self):
+ stub = self.get_stub()
+ res = stub.ListDevices(Empty())
+ return res.items
+
+ def do_devices(self, line):
+ """List devices registered in Voltha reduced for OMCI menu"""
+ devices = self.get_devices()
+ omit_fields = {
+ 'adapter',
+ 'model',
+ 'hardware_version',
+ 'images',
+ 'firmware_version',
+ 'serial_number',
+ 'vlan',
+ 'root',
+ 'extra_args',
+ 'proxy_address',
+ }
+ print_pb_list_as_table('Devices:', devices, omit_fields, self.poutput)
+
+ def help_devices(self):
+ self.poutput('TODO: Provide some help')
+
+ def poutput(self, msg):
+ """Convenient shortcut for self.stdout.write(); adds newline if necessary."""
+ if msg:
+ self.stdout.write(msg)
+ if msg[-1] != '\n':
+ self.stdout.write('\n')
diff --git a/python/cli/setup.sh b/python/cli/setup.sh
new file mode 100755
index 0000000..6cab0bf
--- /dev/null
+++ b/python/cli/setup.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+while getopts LGC:g:s: option
+do
+ case "${option}"
+ in
+ L) LOOKUP_OPT="-L";;
+ G) GLOBAL_REQUEST_OPT="-G";;
+ C) CONSUL_OPT="-C ${OPTARG}";;
+ g) GRPC_OPT="-g ${OPTARG}";;
+ s) SIM_OPT="-s ${OPTARG}";;
+ esac
+done
+
+if [ -z "$CONSUL_OPT" ]
+then
+ CONSUL_OPT="-C $DOCKER_HOST_IP:8500"
+fi
+
+echo "export DOCKER_HOST_IP=$DOCKER_HOST_IP" > /home/voltha/.bashrc
+echo "export PYTHONPATH=/voltha" >> /home/voltha/.bashrc
+echo "export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" >> /home/voltha/.bashrc
+echo "export DOCKER_HOST_IP=$DOCKER_HOST_IP" > /home/voltha/.bash_profile
+echo "export PYTHONPATH=/voltha" >> /home/voltha/.bash_profile
+echo "export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" >> /home/voltha/.bash_profile
+echo "/voltha/python/cli/main.py $LOOKUP_OPT $GLOBAL_REQUEST_OPT $CONSUL_OPT $GRPC_OPT $SIM_OPT" >> /home/voltha/.bash_profile
+echo "logout" >> /home/voltha/.bash_profile
+chown voltha.voltha /home/voltha/.bash_profile
+/usr/sbin/sshd -D
+
diff --git a/python/cli/table.py b/python/cli/table.py
new file mode 100644
index 0000000..7e6a4d8
--- /dev/null
+++ b/python/cli/table.py
@@ -0,0 +1,204 @@
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import sys
+
+from google.protobuf.message import Message
+from termcolor import colored
+
+_printfn = lambda l: sys.stdout.write(l + '\n')
+
+
+class TablePrinter(object):
+ """Simple tabular data printer utility. For usage, see bottom of file"""
+
+ def __init__(self):
+ self.max_field_lengths = {}
+ self.field_names = {}
+ self.cell_values = {}
+
+ def add_cell(self, row_number, field_key, field_name, value):
+ if not isinstance(value, str):
+ value = str(value)
+ self._add_field_type(field_key, field_name)
+ row = self.cell_values.setdefault(row_number, {})
+ row[field_key] = value
+ self._update_max_length(field_key, value)
+
+ def number_of_rows(self):
+ return len(self.cell_values)
+
+ def print_table(self, header=None, printfn=_printfn, dividers=10):
+
+ if header is not None:
+ printfn(header)
+
+ field_keys = sorted(self.field_names.keys())
+
+ if not field_keys:
+ printfn('table empty')
+ return
+
+ def p_sep():
+ printfn('+' + '+'.join(
+ [(self.max_field_lengths[k] + 2) * '-'
+ for k in field_keys]) + '+')
+
+ p_sep()
+
+ printfn('| ' + ' | '.join(
+ '%%%ds' % self.max_field_lengths[k] % self.field_names[k]
+ for k in field_keys) + ' |')
+ p_sep()
+
+ for i in range(len(self.cell_values)):
+ row = self.cell_values[i]
+ printfn(colored('| ' + ' | '.join(
+ '%%%ds' % self.max_field_lengths[k] % row.get(k, '')
+ for k in field_keys
+ ) + ' |'))
+ if not ((i + 1) % dividers):
+ p_sep()
+
+ if (i + 1) % dividers:
+ p_sep()
+
+ def _update_max_length(self, field_key, string):
+ length = len(string)
+ if length > self.max_field_lengths.get(field_key, 0):
+ self.max_field_lengths[field_key] = length
+
+ def _add_field_type(self, field_key, field_name):
+ if field_key not in self.field_names:
+ self.field_names[field_key] = field_name
+ self._update_max_length(field_key, field_name)
+ else:
+ assert self.field_names[field_key] == field_name
+
+
+def print_pb_list_as_table(header, items, fields_to_omit=None,
+ printfn=_printfn, dividers=10, show_nulls=False,
+ presfns={}):
+ from utils import pb2dict
+
+ t = TablePrinter()
+ for row, obj in enumerate(items):
+ assert isinstance(obj, Message)
+
+ def set_row(pd_dict, _row, field, value, t, prefix,
+ fields_to_omit, number):
+ fname = prefix + field.name
+ if fname in fields_to_omit:
+ return
+ if isinstance(value, Message):
+ add(_row, value, fname + '.',
+ 100 * (number + field.number))
+ else:
+ presentationfn = presfns[fname] if fname in presfns else lambda x: x
+ t.add_cell(_row, number + field.number, fname,
+ presentationfn(pd_dict.get(field.name)))
+
+ def add(_row, pb, prefix='', number=0):
+ d = pb2dict(pb)
+ if show_nulls:
+ fields = pb.DESCRIPTOR.fields
+ for field in fields:
+ set_row(d,
+ _row,
+ field,
+ getattr(pb, field.name),
+ t,
+ prefix,
+ fields_to_omit,
+ number)
+ else:
+ fields = pb.ListFields()
+ for (field, value) in fields:
+ set_row(d,
+ _row,
+ field,
+ value,
+ t,
+ prefix,
+ fields_to_omit,
+ number)
+ add(row, obj)
+
+ t.print_table(header, printfn, dividers)
+
+
+def print_pb_as_table(header, pb, fields_to_omit={}, printfn=_printfn,
+ show_nulls=False):
+
+ from utils import pb2dict
+
+ def is_repeated_item(msg):
+ return hasattr(msg, "extend")
+
+ def set_cell(pb, field, value, t, prefix, fields_to_omit):
+ d = pb2dict(pb)
+ fname = prefix + field.name
+
+ if fname in fields_to_omit:
+ return
+ if isinstance(value, Message):
+ pr(value, fname + '.')
+ elif is_repeated_item(value): # handles any list
+ row = t.number_of_rows()
+ t.add_cell(row, 0, 'field', fname)
+ t.add_cell(row, 1, 'value',
+ '{} item(s)'.format(len(d.get(field.name))))
+ else:
+ row = t.number_of_rows()
+ t.add_cell(row, 0, 'field', fname)
+ t.add_cell(row, 1, 'value', value)
+
+
+ t = TablePrinter()
+
+ def pr(_pb, prefix=''):
+ if show_nulls:
+ fields = _pb.DESCRIPTOR.fields
+ for field in sorted(fields, key=lambda f: f.number):
+ set_cell(_pb,
+ field,
+ getattr(_pb, field.name),
+ t,
+ prefix,
+ fields_to_omit)
+ else:
+ fields = _pb.ListFields()
+ for (field, value) in sorted(fields, key=lambda (f, v): f.number):
+ set_cell(_pb,
+ field,
+ value,
+ t,
+ prefix,
+ fields_to_omit)
+
+ pr(pb)
+
+ t.print_table(header, printfn)
+
+
+if __name__ == '__main__':
+ import random
+
+ t = TablePrinter()
+ for row in range(10):
+ t.add_cell(row, 0, 'id', row + 100)
+ t.add_cell(row, 1, 'name', 'Joe Somebody')
+ t.add_cell(row, 2, 'ows', '${}'.format(random.randint(10, 100000)))
+ t.print_table()
diff --git a/python/cli/utils.py b/python/cli/utils.py
new file mode 100644
index 0000000..38e5ee2
--- /dev/null
+++ b/python/cli/utils.py
@@ -0,0 +1,186 @@
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import sys
+
+from google.protobuf.json_format import MessageToDict
+from termcolor import cprint, colored
+
+from table import TablePrinter
+
+
+_printfn = lambda l: sys.stdout.write(l + '\n')
+
+
+def pb2dict(pb_msg):
+ d = MessageToDict(pb_msg, including_default_value_fields=1,
+ preserving_proto_field_name=1)
+ return d
+
+
+def p_cookie(cookie):
+ cookie = '%x' % int(cookie)
+ if len(cookie) > 8:
+ return '~' + cookie[len(cookie)-8:]
+ else:
+ return cookie
+
+'''
+ OFPP_NORMAL = 0x7ffffffa; /* Forward using non-OpenFlow pipeline. */
+ OFPP_FLOOD = 0x7ffffffb; /* Flood using non-OpenFlow pipeline. */
+ OFPP_ALL = 0x7ffffffc; /* All standard ports except input port. */
+ OFPP_CONTROLLER = 0x7ffffffd; /* Send to controller. */
+ OFPP_LOCAL = 0x7ffffffe; /* Local openflow "port". */
+ OFPP_ANY = 0x7fffffff; /* Special value used in some requests when
+'''
+
+
+def p_port(port):
+ if port & 0x7fffffff == 0x7ffffffa:
+ return 'NORMAL'
+ elif port & 0x7fffffff == 0x7ffffffb:
+ return 'FLOOD'
+ elif port & 0x7fffffff == 0x7ffffffc:
+ return 'ALL'
+ elif port & 0x7fffffff == 0x7ffffffd:
+ return 'CONTROLLER'
+ elif port & 0x7fffffff == 0x7ffffffe:
+ return 'LOCAL'
+ elif port & 0x7fffffff == 0x7fffffff:
+ return 'ANY'
+ else:
+ return str(port)
+
+
+def p_vlan_vid(vlan_vid):
+ if vlan_vid == 0:
+ return 'untagged'
+ assert vlan_vid & 4096 == 4096
+ return str(vlan_vid - 4096)
+
+
+def p_ipv4(x):
+ return '.'.join(str(v) for v in [
+ (x >> 24) & 0xff, (x >> 16) & 0xff, (x >> 8) & 0xff, x & 0xff
+ ])
+
+
+field_printers = {
+ 'IN_PORT': lambda f: (100, 'in_port', p_port(f['port'])),
+ 'VLAN_VID': lambda f: (101, 'vlan_vid', p_vlan_vid(f['vlan_vid'])),
+ 'VLAN_PCP': lambda f: (102, 'vlan_pcp', str(f['vlan_pcp'])),
+ 'ETH_TYPE': lambda f: (103, 'eth_type', '%X' % f['eth_type']),
+ 'IP_PROTO': lambda f: (104, 'ip_proto', str(f['ip_proto'])),
+ 'IPV4_DST': lambda f: (105, 'ipv4_dst', p_ipv4(f['ipv4_dst'])),
+ 'UDP_SRC': lambda f: (106, 'udp_src', str(f['udp_src'])),
+ 'UDP_DST': lambda f: (107, 'udp_dst', str(f['udp_dst'])),
+ 'TCP_SRC': lambda f: (108, 'tcp_src', str(f['tcp_src'])),
+ 'TCP_DST': lambda f: (109, 'tcp_dst', str(f['tcp_dst'])),
+ 'METADATA': lambda f: (110, 'metadata', str(f['table_metadata'])),
+}
+
+
+def p_field(field):
+ assert field['oxm_class'].endswith('OPENFLOW_BASIC')
+ ofb = field['ofb_field']
+ assert not ofb['has_mask']
+ type = ofb['type'][len('OFPXMT_OFB_'):]
+ weight, field_name, value = field_printers[type](ofb)
+ return 1000 + weight, 'set_' + field_name, value
+
+
+action_printers = {
+ 'SET_FIELD': lambda a: p_field(a['set_field']['field']),
+ 'POP_VLAN': lambda a: (2000, 'pop_vlan', 'Yes'),
+ 'PUSH_VLAN': lambda a: (2001, 'push_vlan', '%x' % a['push']['ethertype']),
+ 'GROUP': lambda a: (3000, 'group', p_port(a['group']['group_id'])),
+ 'OUTPUT': lambda a: (4000, 'output', p_port(a['output']['port'])),
+}
+
+
+def print_flows(what, id, type, flows, groups, printfn=_printfn):
+
+ header = ''.join([
+ '{} '.format(what),
+ colored(id, color='green', attrs=['bold']),
+ ' (type: ',
+ colored(type, color='blue'),
+ ')'
+ ]) + '\nFlows ({}):'.format(len(flows))
+
+ table = TablePrinter()
+ for i, flow in enumerate(flows):
+
+ table.add_cell(i, 0, 'table_id', value=str(flow['table_id']))
+ table.add_cell(i, 1, 'priority', value=str(flow['priority']))
+ table.add_cell(i, 2, 'cookie', p_cookie(flow['cookie']))
+
+ assert flow['match']['type'] == 'OFPMT_OXM'
+ for field in flow['match']['oxm_fields']:
+ assert field['oxm_class'].endswith('OPENFLOW_BASIC')
+ ofb = field['ofb_field']
+ # see CORD-816 (https://jira.opencord.org/browse/CORD-816)
+ assert not ofb['has_mask'], 'masked match not handled yet'
+ type = ofb['type'][len('OFPXMT_OFB_'):]
+ table.add_cell(i, *field_printers[type](ofb))
+
+ for instruction in flow['instructions']:
+ itype = instruction['type']
+ if itype == 4:
+ for action in instruction['actions']['actions']:
+ atype = action['type'][len('OFPAT_'):]
+ table.add_cell(i, *action_printers[atype](action))
+ elif itype == 1:
+ table.add_cell(i, 10000, 'goto-table',
+ instruction['goto_table']['table_id'])
+ elif itype == 5:
+ table.add_cell(i, 10000, 'clear-actions', [])
+ else:
+ raise NotImplementedError(
+ 'not handling instruction type {}'.format(itype))
+
+ table.print_table(header, printfn)
+
+
+def print_groups(what, id, type, groups, printfn=_printfn):
+ header = ''.join([
+ '{} '.format(what),
+ colored(id, color='green', attrs=['bold']),
+ ' (type: ',
+ colored(type, color='blue'),
+ ')'
+ ]) + '\nGroups ({}):'.format(len(groups))
+
+ table = TablePrinter()
+ for i, group in enumerate(groups):
+ output_ports = []
+ for bucket in group['desc']['buckets']:
+ for action in bucket['actions']:
+ if action['type'] == 'OFPAT_OUTPUT':
+ output_ports.append(action['output']['port'])
+ table.add_cell(i, 0, 'group_id', value=str(group['desc']['group_id']))
+ table.add_cell(i, 1, 'buckets', value=str(dict(output=output_ports)))
+
+ table.print_table(header, printfn)
+
+def dict2line(d):
+ assert isinstance(d, dict)
+ return ', '.join('{}: {}'.format(k, v) for k, v in sorted(d.items()))
+
+def enum2name(msg_obj, enum_type, enum_value):
+ descriptor = msg_obj.DESCRIPTOR.enum_types_by_name[enum_type]
+ name = descriptor.values_by_number[enum_value].name
+ return name