Merge branch 'master' into configure_refactored_containers
diff --git a/containers/xos/Dockerfile b/containers/xos/Dockerfile
index 9e69cac..dc9a638 100644
--- a/containers/xos/Dockerfile
+++ b/containers/xos/Dockerfile
@@ -86,6 +86,7 @@
# Set environment variables.
ENV HOME /root
+ENV PYTHONPATH /usr/local/lib/python2.7/site-packages:/usr/lib/python2.7/dist-packages
# Define working directory.
WORKDIR /opt/xos
diff --git a/containers/xos/Dockerfile.templ b/containers/xos/Dockerfile.templ
index f29305b..216ceb5 100644
--- a/containers/xos/Dockerfile.templ
+++ b/containers/xos/Dockerfile.templ
@@ -80,6 +80,7 @@
# Set environment variables.
ENV HOME /root
+ENV PYTHONPATH /usr/local/lib/python2.7/site-packages:/usr/lib/python2.7/dist-packages
# Define working directory.
WORKDIR /root
diff --git a/xos/configurations/common/Makefile.cloudlab b/xos/configurations/common/Makefile.cloudlab
index 8a73e8e..2cf9fcb 100644
--- a/xos/configurations/common/Makefile.cloudlab
+++ b/xos/configurations/common/Makefile.cloudlab
@@ -1,4 +1,7 @@
MYFLATLANIF:=$(shell netstat -i |grep "flat"|awk '{print $$1}' )
+ifndef MYFLATLANIF
+$(error MYFLATLANIF is empty)
+endif
MYFLATLANIP:=$(shell ifconfig $(MYFLATLANIF) | grep "inet addr" | awk -F: '{print $$2}' | awk '{print $$1}' )
SETUPDIR:=./setup
diff --git a/xos/configurations/cord/README-VTN.md b/xos/configurations/cord/README-VTN.md
index c8bdfa0..ffb5d56 100644
--- a/xos/configurations/cord/README-VTN.md
+++ b/xos/configurations/cord/README-VTN.md
@@ -1,8 +1,13 @@
vtn notes:
+see also: https://github.com/hyunsun/documentations/wiki/Neutron-ONOS-Integration-for-CORD-VTN#onos-setup
+
inside the xos container:
python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/tosca/samples/vtn.yaml
+ emacs /opt/xos/xos_configuration/xos_common_config
+ [networking]
+ use_vtn=True
ctl node:
@@ -30,6 +35,12 @@
# files. Maybe it can be restarted using systemctl instead...
/usr/bin/neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini --config-file /usr/local/etc/neutron/plugins/ml2/conf_onos.ini
+Neutron driver arg-parsing issue
+
+ # For some reason, the VTN Neutron plugin isn't getting its arguments from neutron
+ emacs /usr/local/lib/python2.7/dist-packages/networking_onos/plugins/ml2/driver.py
+ hard-code self.onos_path and self.onos_auth
+
Compute node that has the ONOS Container
# we need NAT rule so the neutron vtn plugin can talk to onos
@@ -45,6 +56,11 @@
service openvswitch-switch restart
ovs-vsctl del-br br-int
+nm node:
+
+ # neutron-dhcp-agent causes VTN app to throw port errors, because XOS uses --no-gateway
+ systemctl stop neutron-dhcp-agent.service
+
VTN doesn't seem to like cloudlab's networks (flat-net-1, ext-net, etc). You might have to delete them all. I've placed a script in xos/scripts/ called destroy-all-networks.sh that will automate tearing down all of cloudlab's neutron networks.
For development, I suggest using the bash configuration (remember to start the ONOS observer manually) so that
@@ -52,3 +68,6 @@
Problems:
* If you have more than one compute node, then the node that isn't running ONOS VTN will report as incomplete in VTN. This is because the openvswitch is trying to contact VTN on 172.17.0.2:6653.
+
+Notes:
+* Adding use_vtn=True to the [networking] section in the XOS config file has two effects: 1) it sets the gateway in sync_controller_networks, and 2) it disables automatic creation of nat-net for new slices. This is because VTN will fail if there is no gateway on a network, and because we don't have nat-net under the VTN configuration.
diff --git a/xos/model_policies/model_policy_Slice.py b/xos/model_policies/model_policy_Slice.py
index 308d8cb..dfdcb4f 100644
--- a/xos/model_policies/model_policy_Slice.py
+++ b/xos/model_policies/model_policy_Slice.py
@@ -1,3 +1,5 @@
+from xos.config import Config
+
def handle_delete(slice):
from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
from collections import defaultdict
@@ -15,6 +17,9 @@
from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
from collections import defaultdict
+ # only create nat_net if not using VTN
+ support_nat_net = not getattr(Config(), "networking_use_vtn", False)
+
print "MODEL POLICY: slice", slice
# slice = Slice.get(slice_id)
@@ -49,8 +54,8 @@
public_nets.append(network)
elif network.template.name == 'Private':
private_nets.append(network)
- if not public_nets:
- # ensure there is at least one public network, and default it to dedicated
+ if support_nat_net and (not public_nets):
+ # ensure there is at least one public network, and default it to dedicated
nat_net = Network(
name = slice.name+'-nat',
template = NetworkTemplate.objects.get(name='Public shared IPv4'),
@@ -78,7 +83,7 @@
public_net_slice = net_slice
elif net_slice.network in private_nets:
private_net_slice = net_slice
- if not public_net_slice:
+ if support_nat_net and (not public_net_slice):
public_net_slice = NetworkSlice(slice=slice, network=public_nets[0])
public_net_slice.save()
print "MODEL POLICY: slice", slice, "made public_net_slice"
diff --git a/xos/observers/base/SyncInstanceUsingAnsible.py b/xos/observers/base/SyncInstanceUsingAnsible.py
index 5bb8250..373178c 100644
--- a/xos/observers/base/SyncInstanceUsingAnsible.py
+++ b/xos/observers/base/SyncInstanceUsingAnsible.py
@@ -179,6 +179,44 @@
o.save()
- def delete_record(self, m):
- pass
+ def delete_record(self, o):
+ try:
+ controller = o.get_controller()
+ controller_register = json.loads(o.node.site_deployment.controller.backend_register)
+
+ if (controller_register.get('disabled',False)):
+ raise InnocuousException('Controller %s is disabled'%o.node.site_deployment.controller.name)
+ except AttributeError:
+ pass
+
+ instance = self.get_instance(o)
+ if isinstance(instance, basestring):
+ # sync to some external host
+
+ # XXX - this probably needs more work...
+
+ fields = { "hostname": instance,
+ "instance_id": "ubuntu", # this is the username to log into
+ "private_key": service.key,
+ }
+ else:
+ # sync to an XOS instance
+ fields = self.get_ansible_fields(instance)
+
+ fields["ansible_tag"] = o.__class__.__name__ + "_" + str(o.id)
+
+ # If 'o' defines a 'sync_attributes' list, then we'll copy those
+ # attributes into the Ansible recipe's field list automatically.
+ if hasattr(o, "sync_attributes"):
+ for attribute_name in o.sync_attributes:
+ fields[attribute_name] = getattr(o, attribute_name)
+
+ fields.update(self.map_delete_inputs(o))
+
+ fields['delete']=True
+ res = self.run_playbook(o,fields)
+ try:
+ self.map_delete_outputs(o,res)
+ except AttributeError:
+ pass
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
index ad6564e..78ad45e 100644
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.py
@@ -70,5 +70,7 @@
#o.last_ansible_hash = ansible_hash
- def delete_record(self, m):
- pass
+ def map_delete_inputs(self, o):
+ fields = {"unique_id": o.id,
+ "delete": True}
+ return fields
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
index 3fbd569..c2705ba 100644
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
@@ -22,6 +22,14 @@
{% endfor %}
tasks:
+{% if delete %}
+ - name: Remove tenant
+# FIXME: Adding dummy template action to avoid "action attribute missing in task" error
+ template: src=/opt/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 dest=/usr/local/share/monitoring-channel-{{ unique_id }}_ceilometer_proxy_config mode=0777
+ notify:
+ - stop monitoring-channel
+ - remove container
+{% else %}
{% if full_setup %}
- name: Docker repository
copy: src=/opt/xos/observers/monitoring_channel/files/docker.list
@@ -75,16 +83,17 @@
- name: Make sure Monitoring channel service is running
service: name=monitoring-channel-{{ unique_id }} state=started
+{% endif %}
handlers:
- name: restart monitoring-channel
- shell: service monitoring-channel-{{ unique_id }} stop; sleep 1; service vcpe-{{ unique_id }} start
+ shell: service monitoring-channel-{{ unique_id }} stop; sleep 1; service monitoring-channel-{{ unique_id }} start
- name: stop monitoring-channel
service: name=monitoring-channel-{{ unique_id }} state=stopped
- name: remove container
- docker: name=monitoring-channel-{{ unique_id }} state=absent image=docker-vcpe
+ docker: name=monitoring-channel-{{ unique_id }} state=absent image=monitoring-channel
- name: start monitoring-channel
service: name=monitoring-channel-{{ unique_id }} state=started
diff --git a/xos/openstack_observer/steps/sync_controller_networks.py b/xos/openstack_observer/steps/sync_controller_networks.py
index b646936..ad1604b 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.py
+++ b/xos/openstack_observer/steps/sync_controller_networks.py
@@ -12,6 +12,7 @@
from util.logger import observer_logger as logger
from observer.ansible import *
from openstack.driver import OpenStackDriver
+from xos.config import Config
import json
import pdb
@@ -33,6 +34,17 @@
cidr = '%d.%d.%d.%d/24'%(a,b,c,d)
return cidr
+ def alloc_gateway(self, uuid):
+ # 16 bits only
+ uuid_masked = uuid & 0xffff
+ a = 10
+ b = uuid_masked >> 8
+ c = uuid_masked & 0xff
+ d = 1
+
+ gateway = '%d.%d.%d.%d'%(a,b,c,d)
+ return gateway
+
def save_controller_network(self, controller_network):
network_name = controller_network.network.name
@@ -51,6 +63,8 @@
'subnet_name':subnet_name,
'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
'cidr':cidr,
+ 'gateway':self.alloc_gateway(controller_network.pk),
+ 'use_vtn':getattr(Config(), "networking_use_vtn", False),
'delete':False
}
return network_fields
diff --git a/xos/openstack_observer/steps/sync_controller_networks.yaml b/xos/openstack_observer/steps/sync_controller_networks.yaml
index bbf8ec2..b885516 100644
--- a/xos/openstack_observer/steps/sync_controller_networks.yaml
+++ b/xos/openstack_observer/steps/sync_controller_networks.yaml
@@ -28,7 +28,11 @@
state=absent
{% else %}
state=present
+ {% if use_vtn %}
+ gateway_ip={{ gateway }}
+ {% else %}
no_gateway=true
+ {% endif %}
dns_nameservers=8.8.8.8
cidr={{ cidr }}
{% endif %}