First, very raw version of a primitive CLI
Change-Id: I61e13297a7c04c357a9d15c4399d137b055c9420
diff --git a/cli/__init__.py b/cli/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cli/__init__.py
diff --git a/cli/main.py b/cli/main.py
new file mode 100755
index 0000000..7b90038
--- /dev/null
+++ b/cli/main.py
@@ -0,0 +1,203 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import sys
+from cmd2 import Cmd, make_option, options
+import readline
+import grpc
+from simplejson import dumps
+
+from voltha.protos import third_party
+from voltha.protos import voltha_pb2
+from google.protobuf.empty_pb2 import Empty
+from google.protobuf.json_format import MessageToDict
+_ = third_party
+from cli.utils import print_flows
+
+
+def pb2dict(pb_msg):
+ d = MessageToDict(pb_msg, including_default_value_fields=1,
+ preserving_proto_field_name=1)
+ return d
+
+
+class VolthaCli(Cmd):
+
+ prompt = 'voltha'
+ history_file_name = '.voltha_cli_history'
+ max_history_lines = 500
+
+ Cmd.settable.update(dict(
+ voltha_grpc='Voltha GRPC endpoint in form of <host>:<port>'
+ ))
+
+ voltha_grpc = 'localhost:50055'
+
+ def __init__(self, *args, **kw):
+ Cmd.__init__(self, *args, **kw)
+ self.prompt = '(' + self.colorize(
+ self.colorize(self.prompt, 'red'), 'bold') + ') '
+ self.channel = None
+
+ def load_history(self):
+ """Load saved command history from local history file"""
+ try:
+ with file(self.history_file_name, 'r') as f:
+ for line in f.readlines():
+ stripped_line = line.strip()
+ self.history.append(stripped_line)
+ readline.add_history(stripped_line)
+ except IOError:
+ pass # ignore if file cannot be read
+
+ def save_history(self):
+ try:
+ with file(self.history_file_name, 'w') as f:
+ f.write('\n'.join(self.history[-self.max_history_lines:]))
+ except IOError, e:
+ print >> sys.stderr, 'Could not save history in {}: {}'.format(
+ self.history_file_name, e.msg)
+ else:
+ print >> sys.stderr, 'History saved as {}'.format(
+ self.history_file_name)
+
+ def get_channel(self):
+ if self.channel is None:
+ self.channel = grpc.insecure_channel(self.voltha_grpc)
+ return self.channel
+
+ def do_reset_history(self, arg):
+ """Reset CLI history"""
+ while self.history:
+ self.history.pop()
+
+ def do_launch(self, arg):
+ """If Voltha is not running yet, launch it"""
+ pass
+
+ def do_restart(self, arg):
+ """Launch Voltha, but if it is already running, terminate it first"""
+ pass
+
+ def do_devices(self, arg):
+ """List devices registered in Voltha"""
+ stub = voltha_pb2.VolthaLocalServiceStub(self.get_channel())
+ res = stub.ListDevices(Empty())
+ for device in res.items:
+ print self.colorize('# ====== device {}'.format(device.id), 'blue')
+ print dumps(pb2dict(device), indent=4, sort_keys=True)
+
+ def do_logical_devices(self, arg):
+ """List logical devices in Voltha"""
+ stub = voltha_pb2.VolthaLocalServiceStub(self.get_channel())
+ res = stub.ListLogicalDevices(Empty())
+ for logical_device in res.items:
+ print self.colorize('# ====== logical device {}'.format(
+ logical_device.id), 'blue')
+ print dumps(pb2dict(logical_device), indent=4, sort_keys=True)
+
+ def do_device(self, arg):
+ """Enter device level command mode"""
+ sub = DeviceCli(self.get_channel, arg)
+ sub.cmdloop()
+
+ def do_logical_device(self, arg):
+ """Enter logical device level command mode"""
+ sub = LogicalDeviceCli(self.get_channel, arg)
+ sub.cmdloop()
+
+ def do_debug(self, arg):
+ """Launch PDB debug prompt in CLI (for CLI development)"""
+ from pdb import set_trace
+ set_trace()
+
+ def do_health(self, arg):
+ """Show connectivity status to Voltha status"""
+ stub = voltha_pb2.HealthServiceStub(self.get_channel())
+ res = stub.GetHealthStatus(Empty())
+ print dumps(pb2dict(res), indent=4)
+
+
+class DeviceCli(Cmd):
+
+ def __init__(self, get_channel, device_id):
+ Cmd.__init__(self)
+ self.get_channel = get_channel
+ self.device_id = device_id
+ self.prompt = '(' + self.colorize(
+ self.colorize('device {}'.format(device_id), 'red'), 'bold') + ') '
+
+ def get_device(self, depth=0):
+ stub = voltha_pb2.VolthaLocalServiceStub(self.get_channel())
+ res = stub.GetDevice(voltha_pb2.ID(id=self.device_id),
+ metadata=(('get-depth', str(depth)), ))
+ return res
+
+ def do_show(self, arg):
+ """Show detailed device information"""
+ print dumps(pb2dict(self.get_device(depth=-1)),
+ indent=4, sort_keys=True)
+
+ def do_flows(self, arg):
+ """Show flow table for device"""
+ device = pb2dict(self.get_device(-1))
+ print_flows(
+ 'Device',
+ self.device_id,
+ type=device['type'],
+ flows=device['flows']['items'],
+ groups=device['flow_groups']['items']
+ )
+
+
+class LogicalDeviceCli(Cmd):
+
+ def __init__(self, get_channel, logical_device_id):
+ Cmd.__init__(self)
+ self.get_channel = get_channel
+ self.logical_device_id = logical_device_id
+ self.prompt = '(' + self.colorize(
+ self.colorize('device {}'.format(logical_device_id), 'red'),
+ 'bold') + ') '
+
+ def get_logical_device(self, depth=0):
+ stub = voltha_pb2.VolthaLocalServiceStub(self.get_channel())
+ res = stub.GetLogicalDevice(voltha_pb2.ID(id=self.logical_device_id),
+ metadata=(('get-depth', str(depth)), ))
+ return res
+
+ def do_show(self, arg):
+ """Show detailed logical device information"""
+ print dumps(pb2dict(self.get_logical_device(depth=-1)),
+ indent=4, sort_keys=True)
+
+ def do_flows(self, arg):
+ """Show flow table for logical device"""
+ logical_device = pb2dict(self.get_logical_device(-1))
+ print_flows(
+ 'Logical Device',
+ self.logical_device_id,
+ type='n/a',
+ flows=logical_device['flows']['items'],
+ groups=logical_device['flow_groups']['items']
+ )
+
+
+if __name__ == '__main__':
+ c = VolthaCli()
+ c.load_history()
+ c.cmdloop()
+ c.save_history()
diff --git a/cli/utils.py b/cli/utils.py
new file mode 100644
index 0000000..23fab7f
--- /dev/null
+++ b/cli/utils.py
@@ -0,0 +1,169 @@
+import os
+import sys
+import requests
+from termcolor import cprint, colored
+from os.path import join as pjoin
+
+
+def p_cookie(cookie):
+ cookie = str(cookie)
+ if len(cookie) > 8:
+ return cookie[:6] + '...'
+ else:
+ return cookie
+
+'''
+ OFPP_NORMAL = 0x7ffffffa; /* Forward using non-OpenFlow pipeline. */
+ OFPP_FLOOD = 0x7ffffffb; /* Flood using non-OpenFlow pipeline. */
+ OFPP_ALL = 0x7ffffffc; /* All standard ports except input port. */
+ OFPP_CONTROLLER = 0x7ffffffd; /* Send to controller. */
+ OFPP_LOCAL = 0x7ffffffe; /* Local openflow "port". */
+ OFPP_ANY = 0x7fffffff; /* Special value used in some requests when
+'''
+
+
+def p_port(port):
+ if port & 0x7fffffff == 0x7ffffffa:
+ return 'NORMAL'
+ elif port & 0x7fffffff == 0x7ffffffb:
+ return 'FLOOD'
+ elif port & 0x7fffffff == 0x7ffffffc:
+ return 'ALL'
+ elif port & 0x7fffffff == 0x7ffffffd:
+ return 'CONTROLLER'
+ elif port & 0x7fffffff == 0x7ffffffe:
+ return 'LOCAL'
+ elif port & 0x7fffffff == 0x7fffffff:
+ return 'ANY'
+ else:
+ return str(port)
+
+
+def p_vlan_vid(vlan_vid):
+ if vlan_vid == 0:
+ return 'untagged'
+ assert vlan_vid & 4096 == 4096
+ return str(vlan_vid - 4096)
+
+
+def p_ipv4(x):
+ return '.'.join(str(v) for v in [
+ (x >> 24) & 0xff, (x >> 16) & 0xff, (x >> 8) & 0xff, x & 0xff
+ ])
+
+
+field_printers = {
+ 'IN_PORT': lambda f: (100, 'in_port', p_port(f['port'])),
+ 'VLAN_VID': lambda f: (101, 'vlan_vid', p_vlan_vid(f['vlan_vid'])),
+ 'VLAN_PCP': lambda f: (102, 'vlan_pcp', str(f['vlan_pcp'])),
+ 'ETH_TYPE': lambda f: (103, 'eth_type', '%X' % f['eth_type']),
+ 'IPV4_DST': lambda f: (104, 'ipv4_dst', p_ipv4(f['ipv4_dst'])),
+ 'IP_PROTO': lambda f: (105, 'ip_proto', str(f['ip_proto']))
+}
+
+
+def p_field(field):
+ assert field['oxm_class'].endswith('OPENFLOW_BASIC')
+ ofb = field['ofb_field']
+ assert not ofb['has_mask']
+ type = ofb['type'][len('OFPXMT_OFB_'):]
+ weight, field_name, value = field_printers[type](ofb)
+ return 1000 + weight, 'set_' + field_name, value
+
+
+action_printers = {
+ 'SET_FIELD': lambda a: p_field(a['set_field']['field']),
+ 'POP_VLAN': lambda a: (2000, 'pop_vlan', 'Yes'),
+ 'PUSH_VLAN': lambda a: (2001, 'push_vlan', '%x' % a['push']['ethertype']),
+ 'GROUP': lambda a: (3000, 'group', p_port(a['group']['group_id'])),
+ 'OUTPUT': lambda a: (4000, 'output', p_port(a['output']['port'])),
+}
+
+
+def print_flows(what, id, type, flows, groups):
+
+ print
+ print ''.join([
+ '{} '.format(what),
+ colored(id, color='green', attrs=['bold']),
+ ' (type: ',
+ colored(type, color='blue'),
+ ')'
+ ])
+ print 'Flows ({}):'.format(len(flows))
+
+ max_field_lengths = {}
+ field_names = {}
+
+ def update_max_length(field_key, string):
+ length = len(string)
+ if length > max_field_lengths.get(field_key, 0):
+ max_field_lengths[field_key] = length
+
+ def add_field_type(field_key, field_name):
+ if field_key not in field_names:
+ field_names[field_key] = field_name
+ update_max_length(field_key, field_name)
+ else:
+ assert field_names[field_key] == field_name
+
+ cell_values = {}
+
+ # preprocess data
+ if not flows:
+ return
+ for i, flow in enumerate(flows):
+
+ def add_field(field_key, field_name, value):
+ add_field_type(field_key, field_name)
+ row = cell_values.setdefault(i, {})
+ row[field_key] = value
+ update_max_length(field_key, value)
+
+ add_field(0, 'table_id', value=str(flow['table_id']))
+ add_field(1, 'priority', value=str(flow['priority']))
+ add_field(2, 'cookie', p_cookie(flow['cookie']))
+
+ assert flow['match']['type'] == 'OFPMT_OXM'
+ for field in flow['match']['oxm_fields']:
+ assert field['oxm_class'].endswith('OPENFLOW_BASIC')
+ ofb = field['ofb_field']
+ assert not ofb['has_mask'], 'masked match not handled yet' # TODO
+ type = ofb['type'][len('OFPXMT_OFB_'):]
+ add_field(*field_printers[type](ofb))
+
+ for instruction in flow['instructions']:
+ if instruction['type'] == 4:
+ for action in instruction['actions']['actions']:
+ type = action['type'][len('OFPAT_'):]
+ add_field(*action_printers[type](action))
+
+ # print header
+ field_keys = sorted(field_names.keys())
+ def p_sep():
+ print '+' + '+'.join(
+ [(max_field_lengths[k] + 2) * '-' for k in field_keys]) + '+'
+
+ p_sep()
+ print '| ' + ' | '.join(
+ '%%%ds' % max_field_lengths[k] % field_names[k]
+ for k in field_keys) + ' |'
+ p_sep()
+
+ # print values
+ for i in xrange(len(flows)):
+ row = cell_values[i]
+ cprint('| ' + ' | '.join(
+ '%%%ds' % max_field_lengths[k] % row.get(k, '')
+ for k in field_keys
+ ) + ' |')
+ if not ((i + 1) % 3):
+ p_sep()
+
+ if ((i + 1) % 3):
+ p_sep()
+
+ # TODO groups TBF
+ assert len(groups) == 0
+
+
diff --git a/requirements.txt b/requirements.txt
index 13a7c0a..e7aea50 100755
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,6 @@
argparse==1.2.1
arrow>=0.10.0
+cmd2>=0.6.9
colorama>=0.2.5
cython==0.24.1
decorator>=3.4.0