CORD-1908: cleanup VSG service
Change-Id: Ib7fa0a68dbba185b30d7cd1ea1b425ad3d33a8d3
diff --git a/xos/synchronizer/files/etc/rc.local b/xos/synchronizer/files/etc/rc.local
deleted file mode 100755
index 49ee927..0000000
--- a/xos/synchronizer/files/etc/rc.local
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh -e
-#
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# rc.local
-#
-# This script is executed at the end of each multiuser runlevel.
-# Make sure that the script will "exit 0" on success or any other
-# value on error.
-#
-# In order to enable or disable this script just change the execution
-# bits.
-#
-# By default this script does nothing.
-
-ufw enable
-ufw allow bootps
-ufw allow from 192.168.0.0/24
-ufw route allow in on eth1 out on eth0
-ufw route allow in on eth1 out on eth2
-
-BWLIMIT=/usr/local/sbin/bwlimit.sh
-[ -e $BWLIMIT ] && $BWLIMIT start || true
-
-exit 0
diff --git a/xos/synchronizer/files/vcpe.conf b/xos/synchronizer/files/vcpe.conf
deleted file mode 100644
index 752c57a..0000000
--- a/xos/synchronizer/files/vcpe.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# Upstart script for vCPE
-description "vCPE container"
-author "andy@onlab.us"
-start on filesystem and started docker
-stop on runlevel [!2345]
-respawn
-
-script
- /usr/local/sbin/start-vcpe.sh
-end script
diff --git a/xos/synchronizer/files/vm-resolv.conf b/xos/synchronizer/files/vm-resolv.conf
deleted file mode 100644
index cae093a..0000000
--- a/xos/synchronizer/files/vm-resolv.conf
+++ /dev/null
@@ -1 +0,0 @@
-nameserver 8.8.8.8
diff --git a/xos/synchronizer/files/vcpe.dnsmasq b/xos/synchronizer/files/vsg.dnsmasq
similarity index 100%
rename from xos/synchronizer/files/vcpe.dnsmasq
rename to xos/synchronizer/files/vsg.dnsmasq
diff --git a/xos/synchronizer/model_policies/model_policy_vsgtenant.py b/xos/synchronizer/model_policies/model_policy_vsgserviceinstance.py
similarity index 62%
rename from xos/synchronizer/model_policies/model_policy_vsgtenant.py
rename to xos/synchronizer/model_policies/model_policy_vsgserviceinstance.py
index 33b8877..e5a0562 100644
--- a/xos/synchronizer/model_policies/model_policy_vsgtenant.py
+++ b/xos/synchronizer/model_policies/model_policy_vsgserviceinstance.py
@@ -18,56 +18,56 @@
from synchronizers.new_base.model_policies.model_policy_tenantwithcontainer import TenantWithContainerPolicy, LeastLoadedNodeScheduler
from synchronizers.new_base.exceptions import *
-class VSGTenantPolicy(TenantWithContainerPolicy):
- model_name = "VSGTenant"
+class VSGServiceInstancePolicy(TenantWithContainerPolicy):
+ model_name = "VSGServiceInstance"
- def handle_create(self, tenant):
- return self.handle_update(tenant)
+ def handle_create(self, service_instance):
+ return self.handle_update(service_instance)
- def handle_update(self, tenant):
- if (tenant.link_deleted_count>0) and (not tenant.provided_links.exists()):
+ def handle_update(self, service_instance):
+ if (service_instance.link_deleted_count>0) and (not service_instance.provided_links.exists()):
# if the last provided_link has just gone away, then self-destruct
- self.logger.info("The last provided link has been deleted -- self-destructing.");
+ self.logger.info("The last provided link has been deleted -- self-destructing.")
# TODO: We shouldn't have to call handle_delete ourselves. The model policy framework should handle this
- # for us, but it isn't. I think that's happening is that tenant.delete() isn't setting a new
+ # for us, but it isn't. I think that's happening is that serviceinstance.delete() isn't setting a new
# updated timestamp, since there's no way to pass `always_update_timestamp`, and therefore the
# policy framework doesn't know that the object has changed and needs new policies. For now, the
# workaround is to just call handle_delete ourselves.
- self.handle_delete(tenant)
- # Note that if we deleted the Instance in handle_delete, then django may have cascade-deleted the tenant
- # by now. Thus we have to guard our delete, to check that the tenant still exists.
- if VSGTenant.objects.filter(id=tenant.id).exists():
- tenant.delete()
+ self.handle_delete(service_instance)
+ # Note that if we deleted the Instance in handle_delete, then django may have cascade-deleted the service
+ # instance by now. Thus we have to guard our delete, to check that the service instance still exists.
+ if VSGServiceInstance.objects.filter(id=service_instance.id).exists():
+ service_instance.delete()
else:
- self.logger.info("Tenant %s is already deleted" % tenant)
+ self.logger.info("Tenant %s is already deleted" % service_instance)
return
- self.manage_container(tenant)
- self.manage_address_service_instance(tenant)
- self.cleanup_orphans(tenant)
+ self.manage_container(service_instance)
+ self.manage_address_service_instance(service_instance)
+ self.cleanup_orphans(service_instance)
- def handle_delete(self, tenant):
- if tenant.instance and (not tenant.instance.deleted):
- all_tenants_this_instance = VSGTenant.objects.filter(instance_id=tenant.instance.id)
- other_tenants_this_instance = [x for x in all_tenants_this_instance if x.id != tenant.id]
- if (not other_tenants_this_instance):
- self.logger.info("VSG Instance %s is now unused -- deleting" % tenant.instance)
- self.delete_instance(tenant, tenant.instance)
+ def handle_delete(self, service_instance):
+ if service_instance.instance and (not service_instance.instance.deleted):
+ all_service_instances_this_instance = VSGServiceInstance.objects.filter(instance_id=service_instance.instance.id)
+ other_service_instances_this_instance = [x for x in all_service_instances_this_instance if x.id != service_instance.id]
+ if (not other_service_instances_this_instance):
+ self.logger.info("VSG Instance %s is now unused -- deleting" % service_instance.instance)
+ self.delete_instance(service_instance, service_instance.instance)
else:
- self.logger.info("VSG Instance %s has %d other service instances attached" % (tenant.instance, len(other_tenants_this_instance)))
+ self.logger.info("VSG Instance %s has %d other service instances attached" % (service_instance.instance, len(other_service_instances_this_instance)))
- def manage_address_service_instance(self, tenant):
- if tenant.deleted:
+ def manage_address_service_instance(self, service_instance):
+ if service_instance.deleted:
return
- if tenant.address_service_instance is None:
- address_service_instance = self.allocate_public_service_instance(address_pool_name="addresses_vsg", subscriber_tenant=tenant)
+ if service_instance.address_service_instance is None:
+ address_service_instance = self.allocate_public_service_instance(address_pool_name="addresses_vsg", subscriber_tenant=service_instance)
address_service_instance.save()
- def cleanup_orphans(self, tenant):
+ def cleanup_orphans(self, service_instance):
# ensure vSG only has one AddressManagerServiceInstance
- cur_asi = tenant.address_service_instance
- for link in tenant.subscribed_links.all():
+ cur_asi = service_instance.address_service_instance
+ for link in service_instance.subscribed_links.all():
# TODO: hardcoded dependency
# cast from ServiceInstance to AddressManagerServiceInstance
asis = AddressManagerServiceInstance.objects.filter(id = link.provider_service_instance.id)
@@ -75,8 +75,8 @@
if (not cur_asi) or (asi.id != cur_asi.id):
asi.delete()
- def get_vsg_service(self, tenant):
- return VSGService.objects.get(id=tenant.owner.id)
+ def get_vsg_service(self, service_instance):
+ return VSGService.objects.get(id=service_instance.owner.id)
def find_instance_for_s_tag(self, s_tag):
tags = Tag.objects.filter(name="s_tag", value=s_tag)
@@ -85,59 +85,59 @@
return None
- def find_or_make_instance_for_s_tag(self, tenant, s_tag):
- instance = self.find_instance_for_s_tag(tenant.volt.s_tag)
+ def find_or_make_instance_for_s_tag(self, service_instance):
+ instance = self.find_instance_for_s_tag(service_instance.volt.s_tag)
if instance:
if instance.no_sync:
# if no_sync is still set, then perhaps we failed while saving it and need to retry.
- self.save_instance(tenant, instance)
+ self.save_instance(service_instance, instance)
return instance
- desired_image = self.get_image(tenant)
+ desired_image = self.get_image(service_instance)
flavors = Flavor.objects.filter(name="m1.small")
if not flavors:
raise SynchronizerConfigurationError("No m1.small flavor")
- slice = tenant.owner.slices.first()
+ slice = service_instance.owner.slices.first()
- (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_vsg_service(tenant).node_label).pick()
+ (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_vsg_service(service_instance).node_label).pick()
assert (slice is not None)
assert (node is not None)
assert (desired_image is not None)
- assert (tenant.creator is not None)
+ assert (service_instance.creator is not None)
assert (node.site_deployment.deployment is not None)
assert (desired_image is not None)
instance = Instance(slice=slice,
node=node,
image=desired_image,
- creator=tenant.creator,
+ creator=service_instance.creator,
deployment=node.site_deployment.deployment,
flavor=flavors[0],
isolation=slice.default_isolation,
parent=parent)
- self.save_instance(tenant, instance)
+ self.save_instance(service_instance, instance)
return instance
- def manage_container(self, tenant):
- if tenant.deleted:
+ def manage_container(self, service_instance):
+ if service_instance.deleted:
return
- if not tenant.volt:
+ if not service_instance.volt:
raise SynchronizerConfigurationError("This VSG container has no volt")
- if tenant.instance:
+ if service_instance.instance:
# We're good.
return
- instance = self.find_or_make_instance_for_s_tag(tenant, tenant.volt.s_tag)
- tenant.instance = instance
+ instance = self.find_or_make_instance_for_s_tag(service_instance)
+ service_instance.instance = instance
# TODO: possible for partial failure here?
- tenant.save()
+ service_instance.save()
def find_or_make_port(self, instance, network, **kwargs):
port = Port.objects.filter(instance_id=instance.id, network_id=network.id)
@@ -148,8 +148,8 @@
port.save()
return port
- def get_lan_network(self, tenant, instance):
- slice = tenant.owner.slices.all()[0]
+ def get_lan_network(self, service_instance, instance):
+ slice = service_instance.owner.slices.all()[0]
# there should only be one network private network, and its template should not be the management template
lan_networks = [x for x in slice.networks.all() if
x.template.visibility == "private" and (not "management" in x.template.name)]
@@ -171,9 +171,9 @@
p = NetworkParameter(parameter=pt, content_type=port.self_content_type_id, object_id=port.id, value=str(value))
p.save()
- def delete_instance(self, tenant, instance):
+ def delete_instance(self, service_instance, instance):
# delete the `s_tag` tags
- tags = Tag.objects.filter(service_id=tenant.owner.id, content_type=instance.self_content_type_id,
+ tags = Tag.objects.filter(service_id=service_instance.owner.id, content_type=instance.self_content_type_id,
object_id=instance.id, name="s_tag")
for tag in tags:
tag.delete()
@@ -192,28 +192,28 @@
instance.delete()
- def save_instance(self, tenant, instance):
+ def save_instance(self, service_instance, instance):
instance.volumes = "/etc/dnsmasq.d,/etc/ufw"
instance.no_sync = True # prevent instance from being synced until we're done with it
- super(VSGTenantPolicy, self).save_instance(instance)
+ super(VSGServiceInstancePolicy, self).save_instance(instance)
try:
if instance.isolation in ["container", "container_vm"]:
raise Exception("Not supported")
if instance.isolation in ["vm"]:
- lan_network = self.get_lan_network(tenant, instance)
+ lan_network = self.get_lan_network(service_instance, instance)
port = self.find_or_make_port(instance, lan_network)
- self.port_set_parameter(port, "c_tag", tenant.volt.c_tag)
- self.port_set_parameter(port, "s_tag", tenant.volt.s_tag)
- self.port_set_parameter(port, "neutron_port_name", "stag-%s" % tenant.volt.s_tag)
+ self.port_set_parameter(port, "c_tag", service_instance.volt.c_tag)
+ self.port_set_parameter(port, "s_tag", service_instance.volt.s_tag)
+ self.port_set_parameter(port, "neutron_port_name", "stag-%s" % service_instance.volt.s_tag)
port.save()
# tag the instance with the s-tag, so we can easily find the
# instance later
- if tenant.volt and tenant.volt.s_tag:
- tags = Tag.objects.filter(name="s_tag", value=tenant.volt.s_tag)
+ if service_instance.volt and service_instance.volt.s_tag:
+ tags = Tag.objects.filter(name="s_tag", value=service_instance.volt.s_tag)
if not tags:
- tag = Tag(service=tenant.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="s_tag", value=str(tenant.volt.s_tag))
+ tag = Tag(service=service_instance.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="s_tag", value=str(service_instance.volt.s_tag))
tag.save()
# VTN-CORD needs a WAN address for the VM, so that the VM can
@@ -221,15 +221,15 @@
tags = Tag.objects.filter(content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant")
if not tags:
address_service_instance = self.allocate_public_service_instance(address_pool_name="addresses_vsg",
- subscriber_service=tenant.owner)
+ subscriber_service=service_instance.owner)
address_service_instance.set_attribute("tenant_for_instance_id", instance.id)
address_service_instance.save()
# TODO: potential partial failure
- tag = Tag(service=tenant.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant", value="%d" % address_service_instance.id)
+ tag = Tag(service=service_instance.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant", value="%d" % address_service_instance.id)
tag.save()
instance.no_sync = False # allow the synchronizer to run now
- super(VSGTenantPolicy, self).save_instance(instance)
+ super(VSGServiceInstancePolicy, self).save_instance(instance)
except:
# need to clean up any failures here
raise
diff --git a/xos/synchronizer/model_policies/test_model_policy_vsgtenant.py b/xos/synchronizer/model_policies/test_model_policy_vsgserviceinstance.py
similarity index 87%
rename from xos/synchronizer/model_policies/test_model_policy_vsgtenant.py
rename to xos/synchronizer/model_policies/test_model_policy_vsgserviceinstance.py
index 4fb4542..365604a 100644
--- a/xos/synchronizer/model_policies/test_model_policy_vsgtenant.py
+++ b/xos/synchronizer/model_policies/test_model_policy_vsgserviceinstance.py
@@ -27,8 +27,8 @@
import synchronizers.new_base.modelaccessor
import synchronizers.new_base.model_policies.model_policy_tenantwithcontainer
-import model_policy_vsgtenant
-from model_policy_vsgtenant import VSGTenantPolicy
+import model_policy_vsgserviceinstance
+from model_policy_vsgserviceinstance import VSGServiceInstancePolicy
from synchronizers.new_base.model_policies.model_policy_tenantwithcontainer import LeastLoadedNodeScheduler
# ---------------------------------------------------------------------------------------------------------------------
@@ -227,9 +227,9 @@
def __init__(self, **kwargs):
super(MockVSGService, self).__init__(**kwargs)
-class MockVSGTenantObjects(MockObjectStore): pass
-class MockVSGTenant(MockObject):
- objects = get_MockObjectStore("VSGTenant")
+class MockVSGServiceInstanceObjects(MockObjectStore): pass
+class MockVSGServiceInstance(MockObject):
+ objects = get_MockObjectStore("VSGServiceInstance")
owner = None
deleted = False
instance = None
@@ -247,15 +247,15 @@
class TestModelPolicyVsgTenant(unittest.TestCase):
def setUp(self):
- # Some of the functions we call have side-effects. For example, creating a VSGTenant may lead to creation of
+ # Some of the functions we call have side-effects. For example, creating a VSGServiceInstance may lead to creation of
# tags. Ideally, this wouldn't happen, but it does. So make sure we reset the world.
for store in AllMockObjectStores:
store.items = []
- self.policy = VSGTenantPolicy()
- self.tenant = MockVSGTenant()
+ self.policy = VSGServiceInstancePolicy()
+ self.tenant = MockVSGServiceInstance()
self.user = MockUser(email="testadmin@test.org")
- self.tenant = MockVSGTenant(creator=self.user, id=1)
+ self.tenant = MockVSGServiceInstance(creator=self.user, id=1)
self.flavor = MockFlavor(name="m1.small")
self.npt_ctag = MockNetworkParameterType(name="c_tag", id=1)
self.npt_stag = MockNetworkParameterType(name="s_tag", id=2)
@@ -270,32 +270,32 @@
synchronizers.new_base.model_policies.model_policy_tenantwithcontainer.Flavor = MockFlavor
synchronizers.new_base.model_policies.model_policy_tenantwithcontainer.Tag = MockTag
synchronizers.new_base.model_policies.model_policy_tenantwithcontainer.Node = MockNode
- model_policy_vsgtenant.Instance = MockInstance
- model_policy_vsgtenant.Flavor = MockFlavor
- model_policy_vsgtenant.Tag = MockTag
- model_policy_vsgtenant.VSGService = MockVSGService
- model_policy_vsgtenant.VSGTenant = MockVSGTenant
- model_policy_vsgtenant.Node = MockNode
- model_policy_vsgtenant.Port = MockPort
- model_policy_vsgtenant.NetworkParameterType = MockNetworkParameterType
- model_policy_vsgtenant.NetworkParameter = MockNetworkParameter
- model_policy_vsgtenant.ServiceInstance = MockServiceInstance
- model_policy_vsgtenant.AddressManagerServiceInstance = MockAddressManagerServiceInstance
+ model_policy_vsgserviceinstance.Instance = MockInstance
+ model_policy_vsgserviceinstance.Flavor = MockFlavor
+ model_policy_vsgserviceinstance.Tag = MockTag
+ model_policy_vsgserviceinstance.VSGService = MockVSGService
+ model_policy_vsgserviceinstance.VSGServiceInstance = MockVSGServiceInstance
+ model_policy_vsgserviceinstance.Node = MockNode
+ model_policy_vsgserviceinstance.Port = MockPort
+ model_policy_vsgserviceinstance.NetworkParameterType = MockNetworkParameterType
+ model_policy_vsgserviceinstance.NetworkParameter = MockNetworkParameter
+ model_policy_vsgserviceinstance.ServiceInstance = MockServiceInstance
+ model_policy_vsgserviceinstance.AddressManagerServiceInstance = MockAddressManagerServiceInstance
MockTag.objects.item_list = []
- @patch.object(VSGTenantPolicy, "manage_container")
- @patch.object(VSGTenantPolicy, "manage_address_service_instance")
- @patch.object(VSGTenantPolicy, "cleanup_orphans")
+ @patch.object(VSGServiceInstancePolicy, "manage_container")
+ @patch.object(VSGServiceInstancePolicy, "manage_address_service_instance")
+ @patch.object(VSGServiceInstancePolicy, "cleanup_orphans")
def test_handle_create(self, cleanup_orphans, manage_address_service_instance, manage_container):
self.policy.handle_create(self.tenant)
manage_container.assert_called_with(self.tenant)
manage_address_service_instance.assert_called_with(self.tenant)
cleanup_orphans.assert_called_with(self.tenant)
- @patch.object(VSGTenantPolicy, "manage_container")
- @patch.object(VSGTenantPolicy, "manage_address_service_instance")
- @patch.object(VSGTenantPolicy, "cleanup_orphans")
+ @patch.object(VSGServiceInstancePolicy, "manage_container")
+ @patch.object(VSGServiceInstancePolicy, "manage_address_service_instance")
+ @patch.object(VSGServiceInstancePolicy, "cleanup_orphans")
def test_handle_update(self, cleanup_orphans, manage_address_service_instance, manage_container):
self.policy.handle_create(self.tenant)
manage_container.assert_called_with(self.tenant)
@@ -317,10 +317,10 @@
self.policy.handle_delete(self.tenant)
amsi_delete.assert_not_called()
- @patch.object(MockVSGTenantObjects, "get_items")
+ @patch.object(MockVSGServiceInstanceObjects, "get_items")
@patch.object(MockInstanceObjects, "get_items")
@patch.object(MockInstance, "delete")
- def test_handle_delete_cleanup_instance(self, instance_delete, instance_objects, vsgtenant_objects):
+ def test_handle_delete_cleanup_instance(self, instance_delete, instance_objects, vsgserviceinstance_objects):
vsg_service = MockVSGService()
instance = MockInstance(id=1)
instance_objects.return_value = [instance]
@@ -328,14 +328,14 @@
self.tenant.instance = instance
self.tenant.instance_id = instance.id
self.tenant.owner = vsg_service
- vsgtenant_objects.return_value = [self.tenant]
+ vsgserviceinstance_objects.return_value = [self.tenant]
self.policy.handle_delete(self.tenant)
instance_delete.assert_called()
- @patch.object(MockVSGTenantObjects, "get_items")
+ @patch.object(MockVSGServiceInstanceObjects, "get_items")
@patch.object(MockInstanceObjects, "get_items")
@patch.object(MockInstance, "delete")
- def test_handle_delete_cleanup_instance_live(self, instance_delete, instance_objects, vsgtenant_objects):
+ def test_handle_delete_cleanup_instance_live(self, instance_delete, instance_objects, vsgserviceinstance_objects):
# Make sure if an Instance still has active VSG Tenants, that we don't clean it up
vsg_service = MockVSGService()
instance = MockInstance(id=1)
@@ -345,13 +345,13 @@
self.tenant.instance_id = instance.id
self.tenant.owner = vsg_service
- other_tenant = MockVSGTenant()
+ other_tenant = MockVSGServiceInstance()
other_tenant.address_service_instance = None
other_tenant.instance = instance
other_tenant.instance_id = instance.id
other_tenant.owner = vsg_service
- vsgtenant_objects.return_value = [self.tenant, other_tenant]
+ vsgserviceinstance_objects.return_value = [self.tenant, other_tenant]
self.policy.handle_delete(self.tenant)
instance_delete.assert_not_called()
@@ -359,13 +359,13 @@
@patch.object(MockServiceInstanceObjects, "get_items")
@patch.object(MockAddressManagerServiceInstanceObjects, "get_items")
@patch.object(MockTagObjects, "get_items")
- @patch.object(MockVSGTenantObjects, "get_items")
+ @patch.object(MockVSGServiceInstanceObjects, "get_items")
@patch.object(MockInstanceObjects, "get_items")
@patch.object(MockAddressManagerServiceInstance, "delete")
@patch.object(MockTag, "delete")
@patch.object(MockInstance, "delete")
def test_handle_delete_cleanup_instance_and_tags_and_stuff(self, instance_delete, tag_delete, amsi_delete,
- instance_objects, vsgtenant_objects, tag_objects,
+ instance_objects, vsgserviceinstance_objects, tag_objects,
amsi_objects, si_objects):
vsg_service = MockVSGService()
am_instance = MockAddressManagerServiceInstance()
@@ -377,7 +377,7 @@
self.tenant.instance = instance
self.tenant.instance_id = instance.id
self.tenant.owner = vsg_service
- vsgtenant_objects.return_value = [self.tenant]
+ vsgserviceinstance_objects.return_value = [self.tenant]
stag_tag = MockTag(service_id=self.tenant.owner.id, content_type=instance.self_content_type_id,
object_id=instance.id, name="s_tag")
vrouter_tag = MockTag(service_id=self.tenant.owner.id, content_type=instance.self_content_type_id,
@@ -420,9 +420,9 @@
self.policy.manage_container(self.tenant)
self.assertEqual(e.exception.message, "This VSG container has no volt")
- @patch.object(VSGTenantPolicy, "find_or_make_instance_for_s_tag")
- @patch.object(MockVSGTenant, "save")
- @patch.object(MockVSGTenant, "volt")
+ @patch.object(VSGServiceInstancePolicy, "find_or_make_instance_for_s_tag")
+ @patch.object(MockVSGServiceInstance, "save")
+ @patch.object(MockVSGServiceInstance, "volt")
def test_manage_container_noinstance(self, volt, tenant_save, find_or_make_instance_for_s_tag):
instance = MockInstance()
volt.s_tag=222
@@ -432,9 +432,9 @@
self.assertEqual(self.tenant.instance, instance)
tenant_save.assert_called()
- @patch.object(VSGTenantPolicy, "find_or_make_instance_for_s_tag")
- @patch.object(MockVSGTenant, "save")
- @patch.object(MockVSGTenant, "volt")
+ @patch.object(VSGServiceInstancePolicy, "find_or_make_instance_for_s_tag")
+ @patch.object(MockVSGServiceInstance, "save")
+ @patch.object(MockVSGServiceInstance, "volt")
def test_manage_container_hasinstance(self, volt, tenant_save, find_or_make_instance_for_s_tag):
instance = MockInstance()
volt.s_tag=222
@@ -445,9 +445,9 @@
self.assertEqual(self.tenant.instance, instance)
tenant_save.assert_not_called()
- @patch.object(VSGTenantPolicy, "find_or_make_instance_for_s_tag")
- @patch.object(MockVSGTenant, "save")
- @patch.object(MockVSGTenant, "volt")
+ @patch.object(VSGServiceInstancePolicy, "find_or_make_instance_for_s_tag")
+ @patch.object(MockVSGServiceInstance, "save")
+ @patch.object(MockVSGServiceInstance, "volt")
def test_manage_container_deleted(self, volt, tenant_save, find_or_make_instance_for_s_tag):
self.tenant.deleted = True
self.policy.manage_container(self.tenant)
@@ -535,15 +535,15 @@
@patch.object(MockNodeObjects, "get_items")
@patch.object(MockFlavorObjects, "get_items")
@patch.object(MockVSGServiceObjects, "get_items")
- @patch.object(MockVSGTenant, "volt")
- @patch.object(MockVSGTenant, "save")
- @patch.object(VSGTenantPolicy, "get_image")
- @patch.object(VSGTenantPolicy, "allocate_public_service_instance")
+ @patch.object(MockVSGServiceInstance, "volt")
+ @patch.object(MockVSGServiceInstance, "save")
+ @patch.object(VSGServiceInstancePolicy, "get_image")
+ @patch.object(VSGServiceInstancePolicy, "allocate_public_service_instance")
@patch.object(LeastLoadedNodeScheduler, "pick")
@patch.object(MockNode, "site_deployment")
@patch.object(MockInstance, "save")
@patch.object(MockInstance, "delete")
- @patch.object(VSGTenantPolicy, "port_set_parameter")
+ @patch.object(VSGServiceInstancePolicy, "port_set_parameter")
def test_find_or_make_instance_for_s_tag(self, port_set_parameter, instance_delete, instance_save, site_deployment,
pick, get_psi, get_image, tenant_save, volt,
vsgservice_objects, flavor_objects, node_objects, npt_objects):
@@ -565,7 +565,7 @@
# done setup mocks
# call the function under test
- instance = self.policy.find_or_make_instance_for_s_tag(self.tenant, self.tenant.volt.s_tag)
+ instance = self.policy.find_or_make_instance_for_s_tag(self.tenant)
# make sure Instance was created
self.assertNotEqual(instance, None)
@@ -600,7 +600,7 @@
# Allocate_public_service_instance should have been called
get_psi.assert_called()
- @patch.object(VSGTenantPolicy, "allocate_public_service_instance")
+ @patch.object(VSGServiceInstancePolicy, "allocate_public_service_instance")
def test_manage_address_service_instance(self, get_psi):
vrtenant = MockAddressManagerServiceInstance(public_ip="1.2.3.4", public_mac="01:02:03:04:05:06")
get_psi.return_value = vrtenant
diff --git a/xos/synchronizer/run-from-api.sh b/xos/synchronizer/run-from-api.sh
index 310065d..d52b264 100755
--- a/xos/synchronizer/run-from-api.sh
+++ b/xos/synchronizer/run-from-api.sh
@@ -14,4 +14,4 @@
# limitations under the License.
-python vcpe-synchronizer.py
+python vsg-synchronizer.py
diff --git a/xos/synchronizer/run-vtn.sh b/xos/synchronizer/run-vtn.sh
deleted file mode 100755
index b6564ac..0000000
--- a/xos/synchronizer/run-vtn.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#if [[ ! -e ./vcpe-observer.py ]]; then
-# ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-cp /root/setup/node_key $XOS_DIR/synchronizers/vsg/node_key
-chmod 0600 $XOS_DIR/synchronizers/vsg/node_key
-python vcpe-synchronizer.py -C $XOS_DIR/synchronizers/vsg/vsg_synchronizer_config
diff --git a/xos/synchronizer/steps/sync_monitoring_agent.yaml b/xos/synchronizer/steps/sync_monitoring_agent.yaml
index e617504..677c4c7 100644
--- a/xos/synchronizer/steps/sync_monitoring_agent.yaml
+++ b/xos/synchronizer/steps/sync_monitoring_agent.yaml
@@ -27,8 +27,8 @@
rabbit_host: {{ rabbit_host }}
tasks:
- - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
- shell: pgrep -f [v]cpe_stats_notifier | wc -l
+ - name: Verify if vsg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+ shell: pgrep -f [v]sg_stats_notifier | wc -l
register: cron_job_pids_count
- name: DEBUG
@@ -40,8 +40,8 @@
when: cron_job_pids_count.stdout == "0"
- name: Copy cron job to destination
- copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
- dest=/usr/local/share/vsg_monitoring_agent/vcpe_stats_notifier.py
+ copy: src=/opt/xos/synchronizers/vsg/vsg_stats_notifier.py
+ dest=/usr/local/share/vsg_monitoring_agent/vsg_stats_notifier.py
become: yes
when: cron_job_pids_count.stdout == "0"
@@ -50,8 +50,8 @@
become: yes
when: cron_job_pids_count.stdout == "0"
- - name: Initiate vcpe_stats_notifier cron job
- command: sudo python /usr/local/share/vsg_monitoring_agent/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
+ - name: Initiate vsg_stats_notifier cron job
+ command: sudo python /usr/local/share/vsg_monitoring_agent/vsg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
async: 9999999999999999
poll: 0
become: yes
diff --git a/xos/synchronizer/steps/sync_vcpetenant.yaml b/xos/synchronizer/steps/sync_vcpetenant.yaml
deleted file mode 100644
index d3109e2..0000000
--- a/xos/synchronizer/steps/sync_vcpetenant.yaml
+++ /dev/null
@@ -1,195 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- become_user: ubuntu
-
- vars:
- cdn_enable: {{ cdn_enable }}
- dnsdemux_ip: {{ dnsdemux_ip }}
- firewall_enable: {{ firewall_enable }}
- url_filter_enable: {{ url_filter_enable }}
- c_tags:
- {% for c_tag in c_tags %}
- - {{ c_tag }}
- {% endfor %}
- s_tags:
- {% for s_tag in s_tags %}
- - {{ s_tag }}
- {% endfor %}
- firewall_rules:
- {% for firewall_rule in firewall_rules.split("\n") %}
- - {{ firewall_rule }}
- {% endfor %}
- cdn_prefixes:
- {% for prefix in cdn_prefixes %}
- - {{ prefix }}
- {% endfor %}
- bbs_addrs:
- {% for bbs_addr in bbs_addrs %}
- - {{ bbs_addr }}
- {% endfor %}
- dns_servers:
- {% for dns_server in dns_servers %}
- - {{ dns_server }}
- {% endfor %}
- nat_ip: {{ nat_ip }}
- nat_mac: {{ nat_mac }}
- lan_ip: {{ lan_ip }}
- lan_mac: {{ lan_mac }}
- wan_ip: {{ wan_ip }}
- wan_mac: {{ wan_mac }}
- wan_container_mac: {{ wan_container_mac }}
- wan_next_hop: 10.0.1.253 # FIX ME
- private_ip: {{ private_ip }}
- private_mac: {{ private_mac }}
- hpc_client_ip: {{ hpc_client_ip }}
- hpc_client_mac: {{ hpc_client_mac }}
- keystone_tenant_id: {{ keystone_tenant_id }}
- keystone_user_id: {{ keystone_user_id }}
- rabbit_user: {{ rabbit_user }}
- rabbit_password: {{ rabbit_password }}
- rabbit_host: {{ rabbit_host }}
- safe_browsing:
- {% for mac in safe_browsing_macs %}
- - {{ mac }}
- {% endfor %}
- uplink_speed: {{ uplink_speed }}
- downlink_speed: {{ downlink_speed }}
- status: {{ status }}
- enable_uverse: {{ enable_uverse }}
- url_filter_kind: {{ url_filter_kind }}
-
- tasks:
-{% if full_setup %}
- - name: Docker repository
- copy: src=/opt/xos/synchronizers/vsg/files/docker.list
- dest=/etc/apt/sources.list.d/docker.list
-
- - name: Import the repository key
- apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
-
- - name: install Docker
- apt: name=lxc-docker state=present update_cache=yes
-
- - name: install python-setuptools
- apt: name=python-setuptools state=present
-
- - name: install pip
- easy_install: name=pip
-
- - name: install docker-py
- pip: name=docker-py version=0.5.3
-
- - name: install Pipework
- get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
- dest=/usr/local/bin/pipework
- mode=0755
-
- - name: make sure /etc/dnsmasq.d exists
- file: path=/etc/dnsmasq.d state=directory owner=root group=root
-
- - name: Disable resolvconf service
- shell: service resolvconf stop
- shell: echo manual > /etc/init/resolvconf.override
- shell: rm -f /etc/resolv.conf
-
- - name: Install resolv.conf
- copy: src=/opt/xos/synchronizers/vsg/files/vm-resolv.conf
- dest=/etc/resolv.conf
-
- - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
- shell: pgrep -f [v]cpe_stats_notifier | wc -l
- register: cron_job_pids_count
-
-# - name: DEBUG
-# debug: var=cron_job_pids_count.stdout
-
-# - name: make sure ~/bin exists
-# file: path=~/bin state=directory owner=root group=root
-# when: cron_job_pids_count.stdout == "0"
-
- - name: Copy cron job to destination
- copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
- dest=/usr/local/sbin/vcpe_stats_notifier.py
- when: cron_job_pids_count.stdout == "0"
-
- - name: install python-kombu
- apt: name=python-kombu state=present
- when: cron_job_pids_count.stdout == "0"
-
- - name: Initiate vcpe_stats_notifier cron job
- command: sudo python /usr/local/sbin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
- async: 9999999999999999
- poll: 0
- when: cron_job_pids_count.stdout == "0"
-{% endif %}
-
- - name: vCPE upstart
- template: src=/opt/xos/synchronizers/vsg/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.conf
-
- - name: vCPE startup script
- template: src=/opt/xos/synchronizers/vsg/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh mode=0755
- notify:
-# - restart vcpe
- - stop vcpe
- - remove container
- - start vcpe
-
- - name: create /etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d
- file: path=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d state=directory owner=root group=root
-
- - name: vCPE basic dnsmasq config
- copy: src=/opt/xos/synchronizers/vsg/files/vcpe.dnsmasq dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: dnsmasq config
- template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/servers.conf owner=root group=root
- notify:
- - restart dnsmasq
-
-# These are samples, not necessary for correct function of demo
-
-# - name: networking info
-# template: src=/opt/xos/synchronizers/vsg/templates/vlan_sample.j2 dest=/etc/vlan_sample owner=root group=root
-
-# - name: firewall info
-# template: src=/opt/xos/synchronizers/vsg/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
-
- - name: Make sure vCPE service is running
- service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
-
- handlers:
- # Dnsmasq is automatically restarted in the container
- - name: restart dnsmasq
- shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} killall dnsmasq
-
- - name: restart vcpe
- shell: service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} stop; sleep 1; service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} start
-
- - name: stop vcpe
- service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=stopped
-
- - name: remove container
- docker: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=absent image=docker-vcpe
-
- - name: start vcpe
- service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
-
diff --git a/xos/synchronizer/steps/sync_vcpetenant_new.yaml b/xos/synchronizer/steps/sync_vcpetenant_new.yaml
deleted file mode 100644
index 723c1d9..0000000
--- a/xos/synchronizer/steps/sync_vcpetenant_new.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- become_user: {{ username }}
-
- vars:
- container_name: {{ container_name }}
- cdn_enable: {{ cdn_enable }}
- dnsdemux_ip: {{ dnsdemux_ip }}
- firewall_enable: {{ firewall_enable }}
- url_filter_enable: {{ url_filter_enable }}
- c_tags:
- {% for c_tag in c_tags %}
- - {{ c_tag }}
- {% endfor %}
- s_tags:
- {% for s_tag in s_tags %}
- - {{ s_tag }}
- {% endfor %}
- firewall_rules:
- {% for firewall_rule in firewall_rules.split("\n") %}
- - {{ firewall_rule }}
- {% endfor %}
- cdn_prefixes:
- {% for prefix in cdn_prefixes %}
- - {{ prefix }}
- {% endfor %}
- bbs_addrs:
- {% for bbs_addr in bbs_addrs %}
- - {{ bbs_addr }}
- {% endfor %}
- dns_servers:
- {% for dns_server in dns_servers %}
- - {{ dns_server }}
- {% endfor %}
- nat_ip: {{ nat_ip }}
- nat_mac: {{ nat_mac }}
- lan_ip: {{ lan_ip }}
- lan_mac: {{ lan_mac }}
- wan_ip: {{ wan_ip }}
- wan_mac: {{ wan_mac }}
- wan_container_mac: {{ wan_container_mac }}
- wan_next_hop: 10.0.1.253 # FIX ME
- private_ip: {{ private_ip }}
- private_mac: {{ private_mac }}
- hpc_client_ip: {{ hpc_client_ip }}
- hpc_client_mac: {{ hpc_client_mac }}
- keystone_tenant_id: {{ keystone_tenant_id }}
- keystone_user_id: {{ keystone_user_id }}
- rabbit_user: {{ rabbit_user }}
- rabbit_password: {{ rabbit_password }}
- rabbit_host: {{ rabbit_host }}
- safe_browsing:
- {% for mac in safe_browsing_macs %}
- - {{ mac }}
- {% endfor %}
- uplink_speed: {{ uplink_speed }}
- downlink_speed: {{ downlink_speed }}
- status: {{ status }}
- enable_uverse: {{ enable_uverse }}
- url_filter_kind: {{ url_filter_kind }}
-
- tasks:
- - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
- shell: pgrep -f [v]cpe_stats_notifier | wc -l
- register: cron_job_pids_count
-
-# - name: DEBUG
-# debug: var=cron_job_pids_count.stdout
-
- - name: make sure ~/bin exists
- file: path=~/bin state=directory owner=root group=root
- when: cron_job_pids_count.stdout == "0"
-
- - name: Copy cron job to destination
- copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
- dest=~/bin/vcpe_stats_notifier.py
- when: cron_job_pids_count.stdout == "0"
-
- - name: install python-kombu
- apt: name=python-kombu state=present
- when: cron_job_pids_count.stdout == "0"
-
- - name: Initiate vcpe_stats_notifier cron job
- command: python ~/bin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
- async: 9999999999999999
- poll: 0
- when: cron_job_pids_count.stdout == "0"
-
- - name: vCPE basic dnsmasq config
- copy: src=/opt/xos/synchronizers/vsg/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: dnsmasq config
- template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: create directory for "safe" config
- file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory
-
- - name: dnsmasq "safe" config
- template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
- notify:
- - restart dnsmasq
-
- - name: copy base ufw files
- synchronize: src=/opt/xos/synchronizers/vsg/files/etc/ufw/ dest=/var/container_volumes/{{ container_name }}/etc/ufw/
- notify:
- - reload ufw
-
- - name: redirection rules for safe DNS
- template: src=/opt/xos/synchronizers/vsg/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/etc/ufw/before.rules owner=root group=root
- notify:
- - reload ufw
-
- - name: base ufw setup uses /etc/rc.local
- copy: src=/opt/xos/synchronizers/vsg/files/etc/rc.local dest=/var/container_volumes/{{ container_name }}/etc/ owner=root group=root
- notify:
- - copy in /etc/rc.local
-
- handlers:
- # Dnsmasq is automatically restarted in the container
- - name: restart dnsmasq
- shell: docker exec {{ container_name }} /usr/bin/killall dnsmasq
-
- - name: reload ufw
- shell: docker exec {{ container_name }} bash -c "/sbin/iptables -t nat -F PREROUTING; /usr/sbin/ufw reload"
-
- # Use docker cp instead of single-file volume
- # The reason is that changes to external file volume don't show up inside the container
- # Probably Ansible deletes and then recreates the external file, and container has old version
- - name: copy in /etc/rc.local
- shell: docker cp /var/container_volumes/{{ container_name }}/etc/rc.local {{ container_name }}:/etc/
diff --git a/xos/synchronizer/steps/sync_vcpetenant.py b/xos/synchronizer/steps/sync_vsgserviceinstance.py
similarity index 79%
rename from xos/synchronizer/steps/sync_vcpetenant.py
rename to xos/synchronizer/steps/sync_vsgserviceinstance.py
index a27c8ce..b063065 100644
--- a/xos/synchronizer/steps/sync_vcpetenant.py
+++ b/xos/synchronizer/steps/sync_vsgserviceinstance.py
@@ -34,34 +34,34 @@
ENABLE_QUICK_UPDATE=False
-class SyncVSGTenant(SyncInstanceUsingAnsible):
- provides=[VSGTenant]
- observes=VSGTenant
+class SyncVSGServiceInstance(SyncInstanceUsingAnsible):
+ provides=[VSGServiceInstance]
+ observes=VSGServiceInstance
requested_interval=0
- template_name = "sync_vcpetenant.yaml"
+ template_name = "sync_vsgserviceinstance.yaml"
watches = [ModelLink(ServiceDependency,via='servicedependency'), ModelLink(ServiceMonitoringAgentInfo,via='monitoringagentinfo')]
def __init__(self, *args, **kwargs):
- super(SyncVSGTenant, self).__init__(*args, **kwargs)
+ super(SyncVSGServiceInstance, self).__init__(*args, **kwargs)
- def get_vcpe_service(self, o):
+ def get_vsg_service(self, o):
if not o.owner:
return None
- vcpes = VSGService.objects.filter(id=o.owner.id)
- if not vcpes:
+ vsg_services = VSGService.objects.filter(id=o.owner.id)
+ if not vsg_services:
return None
- return vcpes[0]
+ return vsg_services[0]
def get_extra_attributes(self, o):
# This is a place to include extra attributes that aren't part of the
- # object itself. In the case of vCPE, we need to know:
- # 1) the addresses of dnsdemux, to setup dnsmasq in the vCPE
+ # object itself. In the case of vSG, we need to know:
+ # 1) the addresses of dnsdemux, to setup dnsmasq in the vSG
# 2) CDN prefixes, so we know what URLs to send to dnsdemux
- # 4) vlan_ids, for setting up networking in the vCPE VM
+ # 4) vlan_ids, for setting up networking in the vSG VM
- vcpe_service = self.get_vcpe_service(o)
+ vsg_service = self.get_vsg_service(o)
dnsdemux_ip = None
cdn_prefixes = []
@@ -87,7 +87,7 @@
full_setup = True
safe_macs=[]
- if vcpe_service.url_filter_kind == "safebrowsing":
+ if vsg_service.url_filter_kind == "safebrowsing":
if o.volt and o.volt.subscriber:
for user in o.volt.subscriber.devices:
level = user.get("level",None)
@@ -97,23 +97,23 @@
safe_macs.append(mac)
docker_opts = []
- if vcpe_service.docker_insecure_registry:
- reg_name = vcpe_service.docker_image_name.split("/",1)[0]
+ if vsg_service.docker_insecure_registry:
+ reg_name = vsg_service.docker_image_name.split("/",1)[0]
docker_opts.append("--insecure-registry " + reg_name)
fields = {"s_tags": s_tags,
"c_tags": c_tags,
- "docker_remote_image_name": vcpe_service.docker_image_name,
- "docker_local_image_name": vcpe_service.docker_image_name,
+ "docker_remote_image_name": vsg_service.docker_image_name,
+ "docker_local_image_name": vsg_service.docker_image_name,
"docker_opts": " ".join(docker_opts),
"dnsdemux_ip": dnsdemux_ip,
"cdn_prefixes": cdn_prefixes,
"full_setup": full_setup,
"isolation": o.instance.isolation,
"safe_browsing_macs": safe_macs,
- "container_name": "vcpe-%s-%s" % (s_tags[0], c_tags[0]),
- "dns_servers": [x.strip() for x in vcpe_service.dns_servers.split(",")],
- "url_filter_kind": vcpe_service.url_filter_kind }
+ "container_name": "vsg-%s-%s" % (s_tags[0], c_tags[0]),
+ "dns_servers": [x.strip() for x in vsg_service.dns_servers.split(",")],
+ "url_filter_kind": vsg_service.url_filter_kind }
# add in the sync_attributes that come from the SubscriberRoot object
@@ -125,7 +125,7 @@
def sync_fields(self, o, fields):
# the super causes the playbook to be run
- super(SyncVSGTenant, self).sync_fields(o, fields)
+ super(SyncVSGServiceInstance, self).sync_fields(o, fields)
def run_playbook(self, o, fields):
ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
@@ -135,17 +135,16 @@
logger.info("quick_update triggered; skipping ansible recipe",extra=o.tologdict())
else:
if o.instance.isolation in ["container", "container_vm"]:
- raise Exception("probably not implemented")
- super(SyncVSGTenant, self).run_playbook(o, fields, "sync_vcpetenant_new.yaml")
+ raise Exception("Not implemented")
else:
- super(SyncVSGTenant, self).run_playbook(o, fields, template_name="sync_vcpetenant_vtn.yaml")
+ super(SyncVSGServiceInstance, self).run_playbook(o, fields)
o.last_ansible_hash = ansible_hash
def sync_record(self, o):
if (not o.policed) or (o.policed<o.updated):
self.defer_sync(o, "waiting on model policy")
- super(SyncVSGTenant, self).sync_record(o)
+ super(SyncVSGServiceInstance, self).sync_record(o)
def delete_record(self, o):
if (not o.policed) or (o.policed<o.updated):
@@ -161,7 +160,7 @@
logger.info("handle watch notifications for service monitoring agent info...ignoring because target_uri attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
return
- objs = VSGTenant.objects.all()
+ objs = VSGServiceInstance.objects.all()
for obj in objs:
if obj.owner.id != monitoring_agent_info.service.id:
logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is not matching" % (monitoring_agent_info))
@@ -172,7 +171,7 @@
logger.warn("handle watch notifications for service monitoring agent info...: No valid instance found for object %s" % (str(obj)))
return
- logger.info("handling watch notification for monitoring agent info:%s for VSGTenant object:%s" % (monitoring_agent_info, obj))
+ logger.info("handling watch notification for monitoring agent info:%s for VSGServiceInstance object:%s" % (monitoring_agent_info, obj))
#Run ansible playbook to update the routing table entries in the instance
fields = self.get_ansible_fields(instance)
@@ -187,5 +186,5 @@
fields["rabbit_host"] = url.hostname
template_name = "sync_monitoring_agent.yaml"
- super(SyncVSGTenant, self).run_playbook(obj, fields, template_name)
+ super(SyncVSGServiceInstance, self).run_playbook(obj, fields, template_name)
diff --git a/xos/synchronizer/steps/sync_vcpetenant_vtn.yaml b/xos/synchronizer/steps/sync_vsgserviceinstance.yaml
similarity index 86%
rename from xos/synchronizer/steps/sync_vcpetenant_vtn.yaml
rename to xos/synchronizer/steps/sync_vsgserviceinstance.yaml
index 81c9252..b7968b2 100644
--- a/xos/synchronizer/steps/sync_vcpetenant_vtn.yaml
+++ b/xos/synchronizer/steps/sync_vsgserviceinstance.yaml
@@ -125,14 +125,14 @@
stat: path=/root/environment_is_setup
register: environment_is_setup
-# Everything here is now baked into the vCPE image
+# Everything here is now baked into the vSG image
# Leave this spot in place for future temporary setup stuff
- name: Remember that the environment is setup, so we never do the above again
shell: touch /root/environment_is_setup
- - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
- shell: pgrep -f [v]cpe_stats_notifier | wc -l
+ - name: Verify if vsg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+ shell: pgrep -f [v]sg_stats_notifier | wc -l
register: cron_job_pids_count
# - name: DEBUG
@@ -143,16 +143,16 @@
# when: cron_job_pids_count.stdout == "0"
# - name: Copy cron job to destination
-# copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
-# dest=/usr/local/sbin/vcpe_stats_notifier.py
+# copy: src=/opt/xos/synchronizers/vsg/vsg_stats_notifier.py
+# dest=/usr/local/sbin/vsg_stats_notifier.py
# when: cron_job_pids_count.stdout == "0"
# - name: install python-kombu
# apt: name=python-kombu state=present
# when: cron_job_pids_count.stdout == "0"
-# - name: Initiate vcpe_stats_notifier cron job
-# command: sudo python /usr/local/sbin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
+# - name: Initiate vsg_stats_notifier cron job
+# command: sudo python /usr/local/sbin/vsg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
# async: 9999999999999999
# poll: 0
# when: cron_job_pids_count.stdout == "0"
@@ -163,22 +163,22 @@
notify:
- restart docker
- - name: vCPE upstart
- template: src=/opt/xos/synchronizers/vsg/templates/vcpe.conf.j2 dest=/etc/init/{{ container_name }}.conf
+ - name: vSG upstart
+ template: src=/opt/xos/synchronizers/vsg/templates/vsg.conf.j2 dest=/etc/init/{{ container_name }}.conf
- - name: vCPE startup script
- template: src=/opt/xos/synchronizers/vsg/templates/start-vcpe-vtn.sh.j2 dest=/usr/local/sbin/start-{{ container_name }}.sh mode=0755
+ - name: vSG startup script
+ template: src=/opt/xos/synchronizers/vsg/templates/start-vsg-vtn.sh.j2 dest=/usr/local/sbin/start-{{ container_name }}.sh mode=0755
notify:
-# - restart vcpe
- - stop vcpe
+# - restart vsg
+ - stop vsg
- remove container
- - start vcpe
+ - start vsg
- name: create /var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/
file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory owner=root group=root
- - name: vCPE basic dnsmasq config
- copy: src=/opt/xos/synchronizers/vsg/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
+ - name: vSG basic dnsmasq config
+ copy: src=/opt/xos/synchronizers/vsg/files/vsg.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vsg.conf owner=root group=root
notify:
- restart dnsmasq
@@ -227,14 +227,14 @@
- name: generate the message page
template: src=/opt/xos/synchronizers/vsg/templates/message.html.j2 dest=/var/container_volumes/{{ container_name }}/etc/service/message/message.html owner=root group=root mode=0644
when: status != "enabled"
- #notify: restart vcpe
+ #notify: restart vsg
- name: remove simple webserver
file: path=/var/container_volumes/{{ container_name }}/etc/service/message/run state=absent
when: status == "enabled"
- #notify: restart vcpe
+ #notify: restart vsg
- - name: Make sure vCPE service is running
+ - name: Make sure vSG service is running
service: name={{ container_name }} state=started
handlers:
@@ -242,13 +242,13 @@
- name: restart dnsmasq
shell: docker exec {{ container_name }} killall dnsmasq
- - name: stop vcpe
+ - name: stop vsg
service: name={{ container_name }} state=stopped
- name: remove container
- docker: name={{ container_name }} state=absent image=docker-vcpe
+ docker: name={{ container_name }} state=absent image=docker-vsg
- - name: start vcpe
+ - name: start vsg
service: name={{ container_name }} state=started
- name: reload ufw
@@ -260,7 +260,7 @@
- name: reset bwlimits
shell: docker exec {{ container_name }} bash -c "/usr/local/sbin/bwlimit.sh restart"
- - name: restart vcpe
+ - name: restart vsg
shell: service {{ container_name }} stop; sleep 1; service {{ container_name }} start
- name: restart docker
diff --git a/xos/synchronizer/templates/dnsmasq_safe_servers.j2 b/xos/synchronizer/templates/dnsmasq_safe_servers.j2
index fdcaf4d..4082195 100644
--- a/xos/synchronizer/templates/dnsmasq_safe_servers.j2
+++ b/xos/synchronizer/templates/dnsmasq_safe_servers.j2
@@ -13,7 +13,7 @@
# limitations under the License.
-# This file autogenerated by vCPE observer
+# This file autogenerated by vSG observer
# It contains a list of DNS servers for dnsmasq to use.
no-resolv
diff --git a/xos/synchronizer/templates/dnsmasq_servers.j2 b/xos/synchronizer/templates/dnsmasq_servers.j2
index f1b5578..36a9f54 100644
--- a/xos/synchronizer/templates/dnsmasq_servers.j2
+++ b/xos/synchronizer/templates/dnsmasq_servers.j2
@@ -13,7 +13,7 @@
# limitations under the License.
-# This file autogenerated by vCPE observer
+# This file autogenerated by vSG observer
# It contains a list of DNS servers for dnsmasq to use.
no-resolv
diff --git a/xos/synchronizer/templates/start-vcpe.sh.j2 b/xos/synchronizer/templates/start-vcpe.sh.j2
deleted file mode 100755
index 76d2d32..0000000
--- a/xos/synchronizer/templates/start-vcpe.sh.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-
-function mac_to_iface {
- MAC=$1
- ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
-}
-
-iptables -L > /dev/null
-ip6tables -L > /dev/null
-
-STAG={{ s_tags[0] }}
-CTAG={{ c_tags[0] }}
-VCPE=vcpe-$STAG-$CTAG
-
-docker inspect $VCPE > /dev/null 2>&1
-if [ "$?" == 1 ]
-then
- docker pull andybavier/docker-vcpe
- docker run -d --name=$VCPE --privileged=true --net=none -v /etc/$VCPE/dnsmasq.d:/etc/dnsmasq.d andybavier/docker-vcpe
-else
- docker start $VCPE
-fi
-
-# Set up networking via pipework
-WAN_IFACE=$( mac_to_iface {{ wan_mac }} )
-docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_ip }}/24@{{ wan_next_hop }} {{ wan_container_mac }}
-
-# LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
-# Need to encapsulate VLAN traffic so that Neutron doesn't eat it
-# Assumes that br-lan has been set up appropriately by a previous step
-LAN_IFACE=br-lan
-ifconfig $LAN_IFACE >> /dev/null
-if [ "$?" == 0 ]
-then
- ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
- ifconfig $LAN_IFACE.$STAG up
- docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
-fi
-
-#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
-
-# Make sure VM's eth0 (hpc_client) has no IP address
-#ifconfig $HPC_IFACE 0.0.0.0
-
-# Now can start up dnsmasq
-docker exec $VCPE service dnsmasq start
-
-# Attach to container
-docker start -a $VCPE
diff --git a/xos/synchronizer/templates/start-vcpe-vtn.sh.j2 b/xos/synchronizer/templates/start-vsg-vtn.sh.j2
similarity index 61%
rename from xos/synchronizer/templates/start-vcpe-vtn.sh.j2
rename to xos/synchronizer/templates/start-vsg-vtn.sh.j2
index 730d4ad..ff0b4b0 100644
--- a/xos/synchronizer/templates/start-vcpe-vtn.sh.j2
+++ b/xos/synchronizer/templates/start-vsg-vtn.sh.j2
@@ -24,25 +24,25 @@
STAG={{ s_tags[0] }}
CTAG={{ c_tags[0] }}
-VCPE=vcpe-$STAG-$CTAG
+VSG=vsg-$STAG-$CTAG
-docker inspect $VCPE > /dev/null 2>&1
+docker inspect $VSG > /dev/null 2>&1
if [ "$?" == 1 ]
then
docker pull {{ docker_remote_image_name }}
- docker run -d --name=$VCPE --privileged=true --net=none \
- -v /var/container_volumes/$VCPE/mount:/mount:ro \
- -v /var/container_volumes/$VCPE/etc/dnsmasq.d:/etc/dnsmasq.d:ro \
- -v /var/container_volumes/$VCPE/etc/service/message:/etc/service/message \
- -v /var/container_volumes/$VCPE/usr/local/sbin:/usr/local/sbin:ro \
+ docker run -d --name=$VSG --privileged=true --net=none \
+ -v /var/container_volumes/$VSG/mount:/mount:ro \
+ -v /var/container_volumes/$VSG/etc/dnsmasq.d:/etc/dnsmasq.d:ro \
+ -v /var/container_volumes/$VSG/etc/service/message:/etc/service/message \
+ -v /var/container_volumes/$VSG/usr/local/sbin:/usr/local/sbin:ro \
{{ docker_local_image_name }}
else
- docker start $VCPE
+ docker start $VSG
fi
# Set up networking via pipework
WAN_IFACE=br-wan
-docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_container_ip }}/{{ wan_container_netbits }}@{{ wan_container_gateway_ip }} {{ wan_container_mac }}
+docker exec $VSG ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VSG {{ wan_container_ip }}/{{ wan_container_netbits }}@{{ wan_container_gateway_ip }} {{ wan_container_mac }}
LAN_IFACE=eth0
ifconfig $LAN_IFACE >> /dev/null
@@ -50,14 +50,14 @@
then
ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
ifconfig $LAN_IFACE.$STAG up
- docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
+ docker exec $VSG ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VSG 192.168.0.1/24 @$CTAG
fi
#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
+#docker exec $VSG ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VSG {{ hpc_client_ip }}/24
# Make sure VM's eth0 (hpc_client) has no IP address
#ifconfig $HPC_IFACE 0.0.0.0
# Attach to container
-docker start -a $VCPE
+docker start -a $VSG
diff --git a/xos/synchronizer/templates/vlan_sample.j2 b/xos/synchronizer/templates/vlan_sample.j2
index 404a539..e1ab857 100644
--- a/xos/synchronizer/templates/vlan_sample.j2
+++ b/xos/synchronizer/templates/vlan_sample.j2
@@ -13,7 +13,7 @@
# limitations under the License.
-# below is a list of all vlan_ids associated with this vcpe
+# below is a list of all vlan_ids associated with this vsg
{% for vlan_id in c_tags %}
{{ vlan_id }}
diff --git a/xos/synchronizer/templates/vcpe.conf.j2 b/xos/synchronizer/templates/vsg.conf.j2
similarity index 82%
rename from xos/synchronizer/templates/vcpe.conf.j2
rename to xos/synchronizer/templates/vsg.conf.j2
index 39e74ae..797f645 100644
--- a/xos/synchronizer/templates/vcpe.conf.j2
+++ b/xos/synchronizer/templates/vsg.conf.j2
@@ -13,13 +13,13 @@
# limitations under the License.
-# Upstart script for vCPE
-description "vCPE container"
-author "andy@onlab.us"
+# Upstart script for vSG
+description "vSG container"
+author "andy@opennetworking.org"
start on filesystem and started docker
stop on runlevel [!2345]
respawn
script
- /usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
+ /usr/local/sbin/start-vsg-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
end script
diff --git a/xos/synchronizer/vcpe-synchronizer.py b/xos/synchronizer/vsg-synchronizer.py
similarity index 100%
rename from xos/synchronizer/vcpe-synchronizer.py
rename to xos/synchronizer/vsg-synchronizer.py
diff --git a/xos/synchronizer/vsg_from_api_config b/xos/synchronizer/vsg_from_api_config
deleted file mode 100644
index 65f021f..0000000
--- a/xos/synchronizer/vsg_from_api_config
+++ /dev/null
@@ -1,20 +0,0 @@
-# Sets options for the synchronizer
-[observer]
-name=vsg
-dependency_graph=/opt/xos/synchronizers/vsg/model-deps
-steps_dir=/opt/xos/synchronizers/vsg/steps
-sys_dir=/opt/xos/synchronizers/vsg/sys
-#logfile=/var/log/xos_backend.log
-log_file=console
-log_level=debug
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-proxy_ssh=True
-proxy_ssh_key=/opt/cord_profile/node_key
-proxy_ssh_user=root
-accessor_kind=api
-accessor_password=@/opt/xos/services/vsg/credentials/xosadmin@opencord.org
-
-[networking]
-use_vtn=True
diff --git a/xos/synchronizer/vcpe_stats_notifier.py b/xos/synchronizer/vsg_stats_notifier.py
similarity index 100%
rename from xos/synchronizer/vcpe_stats_notifier.py
rename to xos/synchronizer/vsg_stats_notifier.py