CORD-1908: cleanup VSG service

Change-Id: Ib7fa0a68dbba185b30d7cd1ea1b425ad3d33a8d3
diff --git a/xos/admin.py b/xos/admin.py
deleted file mode 100644
index 169d58a..0000000
--- a/xos/admin.py
+++ /dev/null
@@ -1,121 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-from django.contrib import admin
-
-from services.vsg.models import *
-from django import forms
-from django.utils.safestring import mark_safe
-from django.contrib.auth.admin import UserAdmin
-from django.contrib.admin.widgets import FilteredSelectMultiple
-from django.contrib.auth.forms import ReadOnlyPasswordHashField
-from django.contrib.auth.signals import user_logged_in
-from django.utils import timezone
-from django.contrib.contenttypes import generic
-from suit.widgets import LinkedSelect
-from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, SubscriberLinkInline, ProviderLinkInline, ProviderDependencyInline,SubscriberDependencyInline
-from core.middleware import get_request
-
-from functools import update_wrapper
-from django.contrib.admin.views.main import ChangeList
-from django.core.urlresolvers import reverse
-from django.contrib.admin.utils import quote
-
-class VSGServiceAdmin(ReadOnlyAwareAdmin):
-    model = VSGService
-    verbose_name = "vSG Service"
-    verbose_name_plural = "vSG Service"
-    list_display = ("backend_status_icon", "name", "enabled")
-    list_display_links = ('backend_status_icon', 'name', )
-    fieldsets = [(None,             {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description', "view_url", "icon_url", "service_specific_attribute", "node_label"],
-                                     'classes':['suit-tab suit-tab-general']}),
-                 ("backend config", {'fields': [ "url_filter_kind" ],
-                                     'classes':['suit-tab suit-tab-backend']}),
-                 ("vSG config", {'fields': ["dns_servers", "docker_image_name", "docker_insecure_registry"],
-                                     'classes':['suit-tab suit-tab-vsg']}) ]
-    readonly_fields = ('backend_status_text', "service_specific_attribute")
-    inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline, ProviderDependencyInline,SubscriberDependencyInline]
-
-    extracontext_registered_admins = True
-
-    user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
-
-    suit_form_tabs =(('general', 'Service Details'),
-        ('backend', 'Backend Config'),
-        ('vsg', 'vSG Config'),
-        ('administration', 'Administration'),
-        #('tools', 'Tools'),
-        ('slices','Slices'),
-        ('serviceattrs','Additional Attributes'),
-        ('servicetenants', 'Dependencies'),
-        ('serviceprivileges','Privileges') ,
-    )
-
-    suit_form_includes = (('vcpeadmin.html', 'top', 'administration'),
-                           ) #('hpctools.html', 'top', 'tools') )
-
-    def get_queryset(self, request):
-        return VSGService.select_by_user(request.user)
-
-class VSGTenantForm(forms.ModelForm):
-    last_ansible_hash = forms.CharField(required=False)
-    wan_container_ip = forms.CharField(required=False)
-    wan_container_mac = forms.CharField(required=False)
-
-    def __init__(self,*args,**kwargs):
-        super (VSGTenantForm,self ).__init__(*args,**kwargs)
-        self.fields['owner'].queryset = VSGService.objects.all()
-        if self.instance:
-            # fields for the attributes
-            self.fields['last_ansible_hash'].initial = self.instance.last_ansible_hash
-            self.fields['wan_container_ip'].initial = self.instance.wan_container_ip
-            self.fields['wan_container_mac'].initial = self.instance.wan_container_mac
-        if (not self.instance) or (not self.instance.pk):
-            # default fields for an 'add' form
-            self.fields['creator'].initial = get_request().user
-            if VSGService.objects.exists():
-               self.fields["owner"].initial = VSGService.objects.all()[0]
-
-    def save(self, commit=True):
-        self.instance.creator = self.cleaned_data.get("creator")
-        self.instance.instance = self.cleaned_data.get("instance")
-        self.instance.last_ansible_hash = self.cleaned_data.get("last_ansible_hash")
-        return super(VSGTenantForm, self).save(commit=commit)
-
-    class Meta:
-        model = VSGTenant
-        fields = '__all__'
-
-class VSGTenantAdmin(ReadOnlyAwareAdmin):
-    list_display = ('backend_status_icon', 'id', )
-    list_display_links = ('backend_status_icon', 'id')
-    fieldsets = [ (None, {'fields': ['backend_status_text', 'owner', 'service_specific_id',
-                                     'wan_container_ip', 'wan_container_mac', 'creator', 'instance', 'last_ansible_hash'],
-                          'classes':['suit-tab suit-tab-general']})]
-    readonly_fields = ('backend_status_text', 'service_specific_attribute', 'wan_container_ip', 'wan_container_mac')
-    inlines = (ProviderLinkInline, SubscriberLinkInline)
-    form = VSGTenantForm
-
-    suit_form_tabs = (('general','Details'),
-                      ('servicelinks','Links'),)
-
-    def get_queryset(self, request):
-        return VSGTenant.select_by_user(request.user)
-
-
-admin.site.register(VSGService, VSGServiceAdmin)
-admin.site.register(VSGTenant, VSGTenantAdmin)
-
diff --git a/xos/attic/header.py b/xos/attic/header.py
index 7883c2d..0068eb3 100644
--- a/xos/attic/header.py
+++ b/xos/attic/header.py
@@ -24,6 +24,7 @@
 from operator import itemgetter, attrgetter, methodcaller
 from core.models import Tag
 from core.models.service import LeastLoadedNodeScheduler
+# TODO: Break hardcoded dependencies / See if the addressmanager can be removed
 from services.addressmanager.models import AddressManagerService, AddressManagerServiceInstance
 import traceback
 from xos.exceptions import *
@@ -32,6 +33,5 @@
 class ConfigurationError(Exception):
     pass
 
-VCPE_KIND = "vCPE"
-CORD_SUBSCRIBER_KIND = "CordSubscriberRoot"
+
 
diff --git a/xos/attic/vsgserviceinstance_model.py b/xos/attic/vsgserviceinstance_model.py
new file mode 100644
index 0000000..08ede12
--- /dev/null
+++ b/xos/attic/vsgserviceinstance_model.py
@@ -0,0 +1,29 @@
+
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def __init__(self, *args, **kwargs):
+    super(VSGServiceInstance, self).__init__(*args, **kwargs)
+
+def __xos_save_base(self, *args, **kwargs):
+    if not self.creator:
+        if not getattr(self, "caller", None):
+            # caller must be set when creating a vCPE since it creates a slice
+            raise XOSProgrammingError("VSGServiceInstance's self.caller was not set")
+        self.creator = self.caller
+        if not self.creator:
+            raise XOSProgrammingError("VSGServiceInstance's self.creator was not set")
+
+    return False
diff --git a/xos/attic/vsgtenant_model.py b/xos/attic/vsgtenant_model.py
deleted file mode 100644
index 0ef8284..0000000
--- a/xos/attic/vsgtenant_model.py
+++ /dev/null
@@ -1,129 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-sync_attributes = ("wan_container_ip", "wan_container_mac", "wan_container_netbits",
-                   "wan_container_gateway_ip", "wan_container_gateway_mac",
-                   "wan_vm_ip", "wan_vm_mac")
-
-def __init__(self, *args, **kwargs):
-    super(VSGTenant, self).__init__(*args, **kwargs)
-    self.cached_address_service_instance=None
-
-@property
-def address_service_instance(self):
-    address_service_instance = self.get_newest_subscribed_tenant(AddressManagerServiceInstance)
-    if not address_service_instance:
-        return None
-
-    # always return the same object when possible
-    if (self.cached_address_service_instance) and (self.cached_address_service_instance.id == address_service_instance.id):
-        return self.cached_address_service_instance
-
-    address_service_instance.caller = self.creator
-    self.cached_address_service_instance = address_service_instance
-    return address_service_instance
-
-@address_service_instance.setter
-def address_service_instance(self, value):
-    raise XOSConfigurationError("VSGTenant.address_service_instance setter is not implemented")
-
-@property
-def volt(self):
-    from services.volt.models import VOLTTenant
-    if not self.subscriber_tenant:
-        return None
-    volts = VOLTTenant.objects.filter(id=self.subscriber_tenant.id)
-    if not volts:
-        return None
-    return volts[0]
-
-@volt.setter
-def volt(self, value):
-    raise XOSConfigurationError("VSGTenant.volt setter is not implemented")
-
-@property
-def ssh_command(self):
-    if self.instance:
-        return self.instance.get_ssh_command()
-    else:
-        return "no-instance"
-
-def get_address_service_instance_field(self, name, default=None):
-    if self.address_service_instance:
-        return getattr(self.address_service_instance, name, default)
-    else:
-        return default
-
-@property
-def wan_container_ip(self):
-    return self.get_address_service_instance_field("public_ip", None)
-
-@property
-def wan_container_mac(self):
-    return self.get_address_service_instance_field("public_mac", None)
-
-@property
-def wan_container_netbits(self):
-    return self.get_address_service_instance_field("netbits", None)
-
-@property
-def wan_container_gateway_ip(self):
-    return self.get_address_service_instance_field("gateway_ip", None)
-
-@property
-def wan_container_gateway_mac(self):
-    return self.get_address_service_instance_field("gateway_mac", None)
-
-@property
-def wan_vm_ip(self):
-    tags = Tag.objects.filter(content_type=self.instance.get_content_type_key(), object_id=self.instance.id, name="vm_vrouter_tenant")
-    if tags:
-        tenant = AddressManagerServiceInstance.objects.get(id=tags[0].value)
-        return tenant.public_ip
-    else:
-        raise Exception("no vm_vrouter_tenant tag for instance %s" % o.instance)
-
-@property
-def wan_vm_mac(self):
-    tags = Tag.objects.filter(content_type=self.instance.get_content_type_key(), object_id=self.instance.id, name="vm_vrouter_tenant")
-    if tags:
-        tenant = AddressManagerServiceInstance.objects.get(id=tags[0].value)
-        return tenant.public_mac
-    else:
-        raise Exception("no vm_vrouter_tenant tag for instance %s" % o.instance)
-
-@property
-def is_synced(self):
-    return (self.enacted is not None) and (self.enacted >= self.updated)
-
-@is_synced.setter
-def is_synced(self, value):
-    pass
-
-def __xos_save_base(self, *args, **kwargs):
-    if not self.creator:
-        if not getattr(self, "caller", None):
-            # caller must be set when creating a vCPE since it creates a slice
-            raise XOSProgrammingError("VSGTenant's self.caller was not set")
-        self.creator = self.caller
-        if not self.creator:
-            raise XOSProgrammingError("VSGTenant's self.creator was not set")
-
-    return False
-
-def delete(self, *args, **kwargs):
-    super(VSGTenant, self).delete(*args, **kwargs)
-
diff --git a/xos/synchronizer/files/etc/rc.local b/xos/synchronizer/files/etc/rc.local
deleted file mode 100755
index 49ee927..0000000
--- a/xos/synchronizer/files/etc/rc.local
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh -e
-#
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# 
-# rc.local
-#
-# This script is executed at the end of each multiuser runlevel.
-# Make sure that the script will "exit 0" on success or any other
-# value on error.
-#
-# In order to enable or disable this script just change the execution
-# bits.
-#
-# By default this script does nothing.
-
-ufw enable
-ufw allow bootps
-ufw allow from 192.168.0.0/24
-ufw route allow in on eth1 out on eth0
-ufw route allow in on eth1 out on eth2
-
-BWLIMIT=/usr/local/sbin/bwlimit.sh
-[ -e $BWLIMIT ] && $BWLIMIT start || true
-
-exit 0
diff --git a/xos/synchronizer/files/vcpe.conf b/xos/synchronizer/files/vcpe.conf
deleted file mode 100644
index 752c57a..0000000
--- a/xos/synchronizer/files/vcpe.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-# Upstart script for vCPE 
-description "vCPE container"
-author "andy@onlab.us"
-start on filesystem and started docker
-stop on runlevel [!2345]
-respawn
-
-script
-  /usr/local/sbin/start-vcpe.sh
-end script
diff --git a/xos/synchronizer/files/vm-resolv.conf b/xos/synchronizer/files/vm-resolv.conf
deleted file mode 100644
index cae093a..0000000
--- a/xos/synchronizer/files/vm-resolv.conf
+++ /dev/null
@@ -1 +0,0 @@
-nameserver 8.8.8.8
diff --git a/xos/synchronizer/files/vcpe.dnsmasq b/xos/synchronizer/files/vsg.dnsmasq
similarity index 100%
rename from xos/synchronizer/files/vcpe.dnsmasq
rename to xos/synchronizer/files/vsg.dnsmasq
diff --git a/xos/synchronizer/model_policies/model_policy_vsgtenant.py b/xos/synchronizer/model_policies/model_policy_vsgserviceinstance.py
similarity index 62%
rename from xos/synchronizer/model_policies/model_policy_vsgtenant.py
rename to xos/synchronizer/model_policies/model_policy_vsgserviceinstance.py
index 33b8877..e5a0562 100644
--- a/xos/synchronizer/model_policies/model_policy_vsgtenant.py
+++ b/xos/synchronizer/model_policies/model_policy_vsgserviceinstance.py
@@ -18,56 +18,56 @@
 from synchronizers.new_base.model_policies.model_policy_tenantwithcontainer import TenantWithContainerPolicy, LeastLoadedNodeScheduler
 from synchronizers.new_base.exceptions import *
 
-class VSGTenantPolicy(TenantWithContainerPolicy):
-    model_name = "VSGTenant"
+class VSGServiceInstancePolicy(TenantWithContainerPolicy):
+    model_name = "VSGServiceInstance"
 
-    def handle_create(self, tenant):
-        return self.handle_update(tenant)
+    def handle_create(self, service_instance):
+        return self.handle_update(service_instance)
 
-    def handle_update(self, tenant):
-        if (tenant.link_deleted_count>0) and (not tenant.provided_links.exists()):
+    def handle_update(self, service_instance):
+        if (service_instance.link_deleted_count>0) and (not service_instance.provided_links.exists()):
             # if the last provided_link has just gone away, then self-destruct
-            self.logger.info("The last provided link has been deleted -- self-destructing.");
+            self.logger.info("The last provided link has been deleted -- self-destructing.")
             # TODO: We shouldn't have to call handle_delete ourselves. The model policy framework should handle this
-            #       for us, but it isn't. I think that's happening is that tenant.delete() isn't setting a new
+            #       for us, but it isn't. I think that's happening is that serviceinstance.delete() isn't setting a new
             #       updated timestamp, since there's no way to pass `always_update_timestamp`, and therefore the
             #       policy framework doesn't know that the object has changed and needs new policies. For now, the
             #       workaround is to just call handle_delete ourselves.
-            self.handle_delete(tenant)
-            # Note that if we deleted the Instance in handle_delete, then django may have cascade-deleted the tenant
-            # by now. Thus we have to guard our delete, to check that the tenant still exists.
-            if VSGTenant.objects.filter(id=tenant.id).exists():
-                tenant.delete()
+            self.handle_delete(service_instance)
+            # Note that if we deleted the Instance in handle_delete, then django may have cascade-deleted the service
+            # instance by now. Thus we have to guard our delete, to check that the service instance still exists.
+            if VSGServiceInstance.objects.filter(id=service_instance.id).exists():
+                service_instance.delete()
             else:
-                self.logger.info("Tenant %s is already deleted" % tenant)
+                self.logger.info("Tenant %s is already deleted" % service_instance)
             return
 
-        self.manage_container(tenant)
-        self.manage_address_service_instance(tenant)
-        self.cleanup_orphans(tenant)
+        self.manage_container(service_instance)
+        self.manage_address_service_instance(service_instance)
+        self.cleanup_orphans(service_instance)
 
-    def handle_delete(self, tenant):
-        if tenant.instance and (not tenant.instance.deleted):
-            all_tenants_this_instance = VSGTenant.objects.filter(instance_id=tenant.instance.id)
-            other_tenants_this_instance = [x for x in all_tenants_this_instance if x.id != tenant.id]
-            if (not other_tenants_this_instance):
-                self.logger.info("VSG Instance %s is now unused -- deleting" % tenant.instance)
-                self.delete_instance(tenant, tenant.instance)
+    def handle_delete(self, service_instance):
+        if service_instance.instance and (not service_instance.instance.deleted):
+            all_service_instances_this_instance = VSGServiceInstance.objects.filter(instance_id=service_instance.instance.id)
+            other_service_instances_this_instance = [x for x in all_service_instances_this_instance if x.id != service_instance.id]
+            if (not other_service_instances_this_instance):
+                self.logger.info("VSG Instance %s is now unused -- deleting" % service_instance.instance)
+                self.delete_instance(service_instance, service_instance.instance)
             else:
-                self.logger.info("VSG Instance %s has %d other service instances attached" % (tenant.instance, len(other_tenants_this_instance)))
+                self.logger.info("VSG Instance %s has %d other service instances attached" % (service_instance.instance, len(other_service_instances_this_instance)))
 
-    def manage_address_service_instance(self, tenant):
-        if tenant.deleted:
+    def manage_address_service_instance(self, service_instance):
+        if service_instance.deleted:
             return
 
-        if tenant.address_service_instance is None:
-            address_service_instance = self.allocate_public_service_instance(address_pool_name="addresses_vsg", subscriber_tenant=tenant)
+        if service_instance.address_service_instance is None:
+            address_service_instance = self.allocate_public_service_instance(address_pool_name="addresses_vsg", subscriber_tenant=service_instance)
             address_service_instance.save()
 
-    def cleanup_orphans(self, tenant):
+    def cleanup_orphans(self, service_instance):
         # ensure vSG only has one AddressManagerServiceInstance
-        cur_asi = tenant.address_service_instance
-        for link in tenant.subscribed_links.all():
+        cur_asi = service_instance.address_service_instance
+        for link in service_instance.subscribed_links.all():
             # TODO: hardcoded dependency
             # cast from ServiceInstance to AddressManagerServiceInstance
             asis = AddressManagerServiceInstance.objects.filter(id = link.provider_service_instance.id)
@@ -75,8 +75,8 @@
                 if (not cur_asi) or (asi.id != cur_asi.id):
                     asi.delete()
 
-    def get_vsg_service(self, tenant):
-        return VSGService.objects.get(id=tenant.owner.id)
+    def get_vsg_service(self, service_instance):
+        return VSGService.objects.get(id=service_instance.owner.id)
 
     def find_instance_for_s_tag(self, s_tag):
         tags = Tag.objects.filter(name="s_tag", value=s_tag)
@@ -85,59 +85,59 @@
 
         return None
 
-    def find_or_make_instance_for_s_tag(self, tenant, s_tag):
-        instance = self.find_instance_for_s_tag(tenant.volt.s_tag)
+    def find_or_make_instance_for_s_tag(self, service_instance):
+        instance = self.find_instance_for_s_tag(service_instance.volt.s_tag)
         if instance:
             if instance.no_sync:
                 # if no_sync is still set, then perhaps we failed while saving it and need to retry.
-                self.save_instance(tenant, instance)
+                self.save_instance(service_instance, instance)
             return instance
 
-        desired_image = self.get_image(tenant)
+        desired_image = self.get_image(service_instance)
 
         flavors = Flavor.objects.filter(name="m1.small")
         if not flavors:
             raise SynchronizerConfigurationError("No m1.small flavor")
 
-        slice = tenant.owner.slices.first()
+        slice = service_instance.owner.slices.first()
 
-        (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_vsg_service(tenant).node_label).pick()
+        (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_vsg_service(service_instance).node_label).pick()
 
         assert (slice is not None)
         assert (node is not None)
         assert (desired_image is not None)
-        assert (tenant.creator is not None)
+        assert (service_instance.creator is not None)
         assert (node.site_deployment.deployment is not None)
         assert (desired_image is not None)
 
         instance = Instance(slice=slice,
                             node=node,
                             image=desired_image,
-                            creator=tenant.creator,
+                            creator=service_instance.creator,
                             deployment=node.site_deployment.deployment,
                             flavor=flavors[0],
                             isolation=slice.default_isolation,
                             parent=parent)
 
-        self.save_instance(tenant, instance)
+        self.save_instance(service_instance, instance)
 
         return instance
 
-    def manage_container(self, tenant):
-        if tenant.deleted:
+    def manage_container(self, service_instance):
+        if service_instance.deleted:
             return
 
-        if not tenant.volt:
+        if not service_instance.volt:
             raise SynchronizerConfigurationError("This VSG container has no volt")
 
-        if tenant.instance:
+        if service_instance.instance:
             # We're good.
             return
 
-        instance = self.find_or_make_instance_for_s_tag(tenant, tenant.volt.s_tag)
-        tenant.instance = instance
+        instance = self.find_or_make_instance_for_s_tag(service_instance)
+        service_instance.instance = instance
         # TODO: possible for partial failure here?
-        tenant.save()
+        service_instance.save()
 
     def find_or_make_port(self, instance, network, **kwargs):
         port = Port.objects.filter(instance_id=instance.id, network_id=network.id)
@@ -148,8 +148,8 @@
             port.save()
         return port
 
-    def get_lan_network(self, tenant, instance):
-        slice = tenant.owner.slices.all()[0]
+    def get_lan_network(self, service_instance, instance):
+        slice = service_instance.owner.slices.all()[0]
         # there should only be one network private network, and its template should not be the management template
         lan_networks = [x for x in slice.networks.all() if
                         x.template.visibility == "private" and (not "management" in x.template.name)]
@@ -171,9 +171,9 @@
             p = NetworkParameter(parameter=pt, content_type=port.self_content_type_id, object_id=port.id, value=str(value))
             p.save()
 
-    def delete_instance(self, tenant, instance):
+    def delete_instance(self, service_instance, instance):
         # delete the `s_tag` tags
-        tags = Tag.objects.filter(service_id=tenant.owner.id, content_type=instance.self_content_type_id,
+        tags = Tag.objects.filter(service_id=service_instance.owner.id, content_type=instance.self_content_type_id,
                                   object_id=instance.id, name="s_tag")
         for tag in tags:
             tag.delete()
@@ -192,28 +192,28 @@
 
         instance.delete()
 
-    def save_instance(self, tenant, instance):
+    def save_instance(self, service_instance, instance):
         instance.volumes = "/etc/dnsmasq.d,/etc/ufw"
         instance.no_sync = True   # prevent instance from being synced until we're done with it
-        super(VSGTenantPolicy, self).save_instance(instance)
+        super(VSGServiceInstancePolicy, self).save_instance(instance)
         try:
             if instance.isolation in ["container", "container_vm"]:
                 raise Exception("Not supported")
 
             if instance.isolation in ["vm"]:
-                lan_network = self.get_lan_network(tenant, instance)
+                lan_network = self.get_lan_network(service_instance, instance)
                 port = self.find_or_make_port(instance, lan_network)
-                self.port_set_parameter(port, "c_tag", tenant.volt.c_tag)
-                self.port_set_parameter(port, "s_tag", tenant.volt.s_tag)
-                self.port_set_parameter(port, "neutron_port_name", "stag-%s" % tenant.volt.s_tag)
+                self.port_set_parameter(port, "c_tag", service_instance.volt.c_tag)
+                self.port_set_parameter(port, "s_tag", service_instance.volt.s_tag)
+                self.port_set_parameter(port, "neutron_port_name", "stag-%s" % service_instance.volt.s_tag)
                 port.save()
 
             # tag the instance with the s-tag, so we can easily find the
             # instance later
-            if tenant.volt and tenant.volt.s_tag:
-                tags = Tag.objects.filter(name="s_tag", value=tenant.volt.s_tag)
+            if service_instance.volt and service_instance.volt.s_tag:
+                tags = Tag.objects.filter(name="s_tag", value=service_instance.volt.s_tag)
                 if not tags:
-                    tag = Tag(service=tenant.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="s_tag", value=str(tenant.volt.s_tag))
+                    tag = Tag(service=service_instance.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="s_tag", value=str(service_instance.volt.s_tag))
                     tag.save()
 
             # VTN-CORD needs a WAN address for the VM, so that the VM can
@@ -221,15 +221,15 @@
             tags = Tag.objects.filter(content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant")
             if not tags:
                 address_service_instance = self.allocate_public_service_instance(address_pool_name="addresses_vsg",
-                                                                                 subscriber_service=tenant.owner)
+                                                                                 subscriber_service=service_instance.owner)
                 address_service_instance.set_attribute("tenant_for_instance_id", instance.id)
                 address_service_instance.save()
                 # TODO: potential partial failure
-                tag = Tag(service=tenant.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant", value="%d" % address_service_instance.id)
+                tag = Tag(service=service_instance.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant", value="%d" % address_service_instance.id)
                 tag.save()
 
             instance.no_sync = False   # allow the synchronizer to run now
-            super(VSGTenantPolicy, self).save_instance(instance)
+            super(VSGServiceInstancePolicy, self).save_instance(instance)
         except:
             # need to clean up any failures here
             raise
diff --git a/xos/synchronizer/model_policies/test_model_policy_vsgtenant.py b/xos/synchronizer/model_policies/test_model_policy_vsgserviceinstance.py
similarity index 87%
rename from xos/synchronizer/model_policies/test_model_policy_vsgtenant.py
rename to xos/synchronizer/model_policies/test_model_policy_vsgserviceinstance.py
index 4fb4542..365604a 100644
--- a/xos/synchronizer/model_policies/test_model_policy_vsgtenant.py
+++ b/xos/synchronizer/model_policies/test_model_policy_vsgserviceinstance.py
@@ -27,8 +27,8 @@
 import synchronizers.new_base.modelaccessor
 
 import synchronizers.new_base.model_policies.model_policy_tenantwithcontainer
-import model_policy_vsgtenant
-from model_policy_vsgtenant import VSGTenantPolicy
+import model_policy_vsgserviceinstance
+from model_policy_vsgserviceinstance import VSGServiceInstancePolicy
 from synchronizers.new_base.model_policies.model_policy_tenantwithcontainer import LeastLoadedNodeScheduler
 
 # ---------------------------------------------------------------------------------------------------------------------
@@ -227,9 +227,9 @@
     def __init__(self, **kwargs):
         super(MockVSGService, self).__init__(**kwargs)
 
-class MockVSGTenantObjects(MockObjectStore): pass
-class MockVSGTenant(MockObject):
-    objects = get_MockObjectStore("VSGTenant")
+class MockVSGServiceInstanceObjects(MockObjectStore): pass
+class MockVSGServiceInstance(MockObject):
+    objects = get_MockObjectStore("VSGServiceInstance")
     owner = None
     deleted = False
     instance = None
@@ -247,15 +247,15 @@
 
 class TestModelPolicyVsgTenant(unittest.TestCase):
     def setUp(self):
-        # Some of the functions we call have side-effects. For example, creating a VSGTenant may lead to creation of
+        # Some of the functions we call have side-effects. For example, creating a VSGServiceInstance may lead to creation of
         # tags. Ideally, this wouldn't happen, but it does. So make sure we reset the world.
         for store in AllMockObjectStores:
             store.items = []
 
-        self.policy = VSGTenantPolicy()
-        self.tenant = MockVSGTenant()
+        self.policy = VSGServiceInstancePolicy()
+        self.tenant = MockVSGServiceInstance()
         self.user = MockUser(email="testadmin@test.org")
-        self.tenant = MockVSGTenant(creator=self.user, id=1)
+        self.tenant = MockVSGServiceInstance(creator=self.user, id=1)
         self.flavor = MockFlavor(name="m1.small")
         self.npt_ctag = MockNetworkParameterType(name="c_tag", id=1)
         self.npt_stag = MockNetworkParameterType(name="s_tag", id=2)
@@ -270,32 +270,32 @@
         synchronizers.new_base.model_policies.model_policy_tenantwithcontainer.Flavor = MockFlavor
         synchronizers.new_base.model_policies.model_policy_tenantwithcontainer.Tag = MockTag
         synchronizers.new_base.model_policies.model_policy_tenantwithcontainer.Node = MockNode
-        model_policy_vsgtenant.Instance = MockInstance
-        model_policy_vsgtenant.Flavor = MockFlavor
-        model_policy_vsgtenant.Tag = MockTag
-        model_policy_vsgtenant.VSGService = MockVSGService
-        model_policy_vsgtenant.VSGTenant = MockVSGTenant
-        model_policy_vsgtenant.Node = MockNode
-        model_policy_vsgtenant.Port = MockPort
-        model_policy_vsgtenant.NetworkParameterType = MockNetworkParameterType
-        model_policy_vsgtenant.NetworkParameter = MockNetworkParameter
-        model_policy_vsgtenant.ServiceInstance = MockServiceInstance
-        model_policy_vsgtenant.AddressManagerServiceInstance = MockAddressManagerServiceInstance
+        model_policy_vsgserviceinstance.Instance = MockInstance
+        model_policy_vsgserviceinstance.Flavor = MockFlavor
+        model_policy_vsgserviceinstance.Tag = MockTag
+        model_policy_vsgserviceinstance.VSGService = MockVSGService
+        model_policy_vsgserviceinstance.VSGServiceInstance = MockVSGServiceInstance
+        model_policy_vsgserviceinstance.Node = MockNode
+        model_policy_vsgserviceinstance.Port = MockPort
+        model_policy_vsgserviceinstance.NetworkParameterType = MockNetworkParameterType
+        model_policy_vsgserviceinstance.NetworkParameter = MockNetworkParameter
+        model_policy_vsgserviceinstance.ServiceInstance = MockServiceInstance
+        model_policy_vsgserviceinstance.AddressManagerServiceInstance = MockAddressManagerServiceInstance
 
         MockTag.objects.item_list = []
 
-    @patch.object(VSGTenantPolicy, "manage_container")
-    @patch.object(VSGTenantPolicy, "manage_address_service_instance")
-    @patch.object(VSGTenantPolicy, "cleanup_orphans")
+    @patch.object(VSGServiceInstancePolicy, "manage_container")
+    @patch.object(VSGServiceInstancePolicy, "manage_address_service_instance")
+    @patch.object(VSGServiceInstancePolicy, "cleanup_orphans")
     def test_handle_create(self, cleanup_orphans, manage_address_service_instance, manage_container):
         self.policy.handle_create(self.tenant)
         manage_container.assert_called_with(self.tenant)
         manage_address_service_instance.assert_called_with(self.tenant)
         cleanup_orphans.assert_called_with(self.tenant)
 
-    @patch.object(VSGTenantPolicy, "manage_container")
-    @patch.object(VSGTenantPolicy, "manage_address_service_instance")
-    @patch.object(VSGTenantPolicy, "cleanup_orphans")
+    @patch.object(VSGServiceInstancePolicy, "manage_container")
+    @patch.object(VSGServiceInstancePolicy, "manage_address_service_instance")
+    @patch.object(VSGServiceInstancePolicy, "cleanup_orphans")
     def test_handle_update(self, cleanup_orphans, manage_address_service_instance, manage_container):
         self.policy.handle_create(self.tenant)
         manage_container.assert_called_with(self.tenant)
@@ -317,10 +317,10 @@
         self.policy.handle_delete(self.tenant)
         amsi_delete.assert_not_called()
 
-    @patch.object(MockVSGTenantObjects, "get_items")
+    @patch.object(MockVSGServiceInstanceObjects, "get_items")
     @patch.object(MockInstanceObjects, "get_items")
     @patch.object(MockInstance, "delete")
-    def test_handle_delete_cleanup_instance(self, instance_delete, instance_objects, vsgtenant_objects):
+    def test_handle_delete_cleanup_instance(self, instance_delete, instance_objects, vsgserviceinstance_objects):
         vsg_service = MockVSGService()
         instance = MockInstance(id=1)
         instance_objects.return_value = [instance]
@@ -328,14 +328,14 @@
         self.tenant.instance = instance
         self.tenant.instance_id = instance.id
         self.tenant.owner = vsg_service
-        vsgtenant_objects.return_value = [self.tenant]
+        vsgserviceinstance_objects.return_value = [self.tenant]
         self.policy.handle_delete(self.tenant)
         instance_delete.assert_called()
 
-    @patch.object(MockVSGTenantObjects, "get_items")
+    @patch.object(MockVSGServiceInstanceObjects, "get_items")
     @patch.object(MockInstanceObjects, "get_items")
     @patch.object(MockInstance, "delete")
-    def test_handle_delete_cleanup_instance_live(self, instance_delete, instance_objects, vsgtenant_objects):
+    def test_handle_delete_cleanup_instance_live(self, instance_delete, instance_objects, vsgserviceinstance_objects):
         # Make sure if an Instance still has active VSG Tenants, that we don't clean it up
         vsg_service = MockVSGService()
         instance = MockInstance(id=1)
@@ -345,13 +345,13 @@
         self.tenant.instance_id = instance.id
         self.tenant.owner = vsg_service
 
-        other_tenant = MockVSGTenant()
+        other_tenant = MockVSGServiceInstance()
         other_tenant.address_service_instance = None
         other_tenant.instance = instance
         other_tenant.instance_id = instance.id
         other_tenant.owner = vsg_service
 
-        vsgtenant_objects.return_value = [self.tenant, other_tenant]
+        vsgserviceinstance_objects.return_value = [self.tenant, other_tenant]
 
         self.policy.handle_delete(self.tenant)
         instance_delete.assert_not_called()
@@ -359,13 +359,13 @@
     @patch.object(MockServiceInstanceObjects, "get_items")
     @patch.object(MockAddressManagerServiceInstanceObjects, "get_items")
     @patch.object(MockTagObjects, "get_items")
-    @patch.object(MockVSGTenantObjects, "get_items")
+    @patch.object(MockVSGServiceInstanceObjects, "get_items")
     @patch.object(MockInstanceObjects, "get_items")
     @patch.object(MockAddressManagerServiceInstance, "delete")
     @patch.object(MockTag, "delete")
     @patch.object(MockInstance, "delete")
     def test_handle_delete_cleanup_instance_and_tags_and_stuff(self, instance_delete, tag_delete, amsi_delete,
-                                                            instance_objects, vsgtenant_objects, tag_objects,
+                                                            instance_objects, vsgserviceinstance_objects, tag_objects,
                                                             amsi_objects, si_objects):
         vsg_service = MockVSGService()
         am_instance = MockAddressManagerServiceInstance()
@@ -377,7 +377,7 @@
         self.tenant.instance = instance
         self.tenant.instance_id = instance.id
         self.tenant.owner = vsg_service
-        vsgtenant_objects.return_value = [self.tenant]
+        vsgserviceinstance_objects.return_value = [self.tenant]
         stag_tag = MockTag(service_id=self.tenant.owner.id, content_type=instance.self_content_type_id,
                        object_id=instance.id, name="s_tag")
         vrouter_tag = MockTag(service_id=self.tenant.owner.id, content_type=instance.self_content_type_id,
@@ -420,9 +420,9 @@
             self.policy.manage_container(self.tenant)
         self.assertEqual(e.exception.message, "This VSG container has no volt")
 
-    @patch.object(VSGTenantPolicy, "find_or_make_instance_for_s_tag")
-    @patch.object(MockVSGTenant, "save")
-    @patch.object(MockVSGTenant, "volt")
+    @patch.object(VSGServiceInstancePolicy, "find_or_make_instance_for_s_tag")
+    @patch.object(MockVSGServiceInstance, "save")
+    @patch.object(MockVSGServiceInstance, "volt")
     def test_manage_container_noinstance(self, volt, tenant_save, find_or_make_instance_for_s_tag):
         instance = MockInstance()
         volt.s_tag=222
@@ -432,9 +432,9 @@
         self.assertEqual(self.tenant.instance, instance)
         tenant_save.assert_called()
 
-    @patch.object(VSGTenantPolicy, "find_or_make_instance_for_s_tag")
-    @patch.object(MockVSGTenant, "save")
-    @patch.object(MockVSGTenant, "volt")
+    @patch.object(VSGServiceInstancePolicy, "find_or_make_instance_for_s_tag")
+    @patch.object(MockVSGServiceInstance, "save")
+    @patch.object(MockVSGServiceInstance, "volt")
     def test_manage_container_hasinstance(self, volt, tenant_save, find_or_make_instance_for_s_tag):
         instance = MockInstance()
         volt.s_tag=222
@@ -445,9 +445,9 @@
         self.assertEqual(self.tenant.instance, instance)
         tenant_save.assert_not_called()
 
-    @patch.object(VSGTenantPolicy, "find_or_make_instance_for_s_tag")
-    @patch.object(MockVSGTenant, "save")
-    @patch.object(MockVSGTenant, "volt")
+    @patch.object(VSGServiceInstancePolicy, "find_or_make_instance_for_s_tag")
+    @patch.object(MockVSGServiceInstance, "save")
+    @patch.object(MockVSGServiceInstance, "volt")
     def test_manage_container_deleted(self, volt, tenant_save, find_or_make_instance_for_s_tag):
         self.tenant.deleted = True
         self.policy.manage_container(self.tenant)
@@ -535,15 +535,15 @@
     @patch.object(MockNodeObjects, "get_items")
     @patch.object(MockFlavorObjects, "get_items")
     @patch.object(MockVSGServiceObjects, "get_items")
-    @patch.object(MockVSGTenant, "volt")
-    @patch.object(MockVSGTenant, "save")
-    @patch.object(VSGTenantPolicy, "get_image")
-    @patch.object(VSGTenantPolicy, "allocate_public_service_instance")
+    @patch.object(MockVSGServiceInstance, "volt")
+    @patch.object(MockVSGServiceInstance, "save")
+    @patch.object(VSGServiceInstancePolicy, "get_image")
+    @patch.object(VSGServiceInstancePolicy, "allocate_public_service_instance")
     @patch.object(LeastLoadedNodeScheduler, "pick")
     @patch.object(MockNode, "site_deployment")
     @patch.object(MockInstance, "save")
     @patch.object(MockInstance, "delete")
-    @patch.object(VSGTenantPolicy, "port_set_parameter")
+    @patch.object(VSGServiceInstancePolicy, "port_set_parameter")
     def test_find_or_make_instance_for_s_tag(self, port_set_parameter, instance_delete, instance_save, site_deployment,
                               pick, get_psi, get_image, tenant_save, volt,
                               vsgservice_objects, flavor_objects, node_objects, npt_objects):
@@ -565,7 +565,7 @@
         # done setup mocks
 
         # call the function under test
-        instance = self.policy.find_or_make_instance_for_s_tag(self.tenant, self.tenant.volt.s_tag)
+        instance = self.policy.find_or_make_instance_for_s_tag(self.tenant)
 
         # make sure Instance was created
         self.assertNotEqual(instance, None)
@@ -600,7 +600,7 @@
         # Allocate_public_service_instance should have been called
         get_psi.assert_called()
 
-    @patch.object(VSGTenantPolicy, "allocate_public_service_instance")
+    @patch.object(VSGServiceInstancePolicy, "allocate_public_service_instance")
     def test_manage_address_service_instance(self, get_psi):
         vrtenant = MockAddressManagerServiceInstance(public_ip="1.2.3.4", public_mac="01:02:03:04:05:06")
         get_psi.return_value = vrtenant
diff --git a/xos/synchronizer/run-from-api.sh b/xos/synchronizer/run-from-api.sh
index 310065d..d52b264 100755
--- a/xos/synchronizer/run-from-api.sh
+++ b/xos/synchronizer/run-from-api.sh
@@ -14,4 +14,4 @@
 # limitations under the License.
 
 
-python vcpe-synchronizer.py
+python vsg-synchronizer.py
diff --git a/xos/synchronizer/run-vtn.sh b/xos/synchronizer/run-vtn.sh
deleted file mode 100755
index b6564ac..0000000
--- a/xos/synchronizer/run-vtn.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#if [[ ! -e ./vcpe-observer.py ]]; then
-#    ln -s ../../xos-observer.py vcpe-observer.py
-#fi
-
-export XOS_DIR=/opt/xos
-cp /root/setup/node_key $XOS_DIR/synchronizers/vsg/node_key
-chmod 0600 $XOS_DIR/synchronizers/vsg/node_key
-python vcpe-synchronizer.py  -C $XOS_DIR/synchronizers/vsg/vsg_synchronizer_config
diff --git a/xos/synchronizer/steps/sync_monitoring_agent.yaml b/xos/synchronizer/steps/sync_monitoring_agent.yaml
index e617504..677c4c7 100644
--- a/xos/synchronizer/steps/sync_monitoring_agent.yaml
+++ b/xos/synchronizer/steps/sync_monitoring_agent.yaml
@@ -27,8 +27,8 @@
       rabbit_host: {{ rabbit_host }}
 
   tasks:
-  - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
-    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+  - name: Verify if vsg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]sg_stats_notifier | wc -l
     register: cron_job_pids_count
 
   - name: DEBUG
@@ -40,8 +40,8 @@
     when: cron_job_pids_count.stdout == "0"
 
   - name: Copy cron job to destination
-    copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
-      dest=/usr/local/share/vsg_monitoring_agent/vcpe_stats_notifier.py
+    copy: src=/opt/xos/synchronizers/vsg/vsg_stats_notifier.py
+      dest=/usr/local/share/vsg_monitoring_agent/vsg_stats_notifier.py
     become: yes
     when: cron_job_pids_count.stdout == "0"
 
@@ -50,8 +50,8 @@
     become: yes
     when: cron_job_pids_count.stdout == "0"
 
-  - name: Initiate vcpe_stats_notifier cron job
-    command: sudo python /usr/local/share/vsg_monitoring_agent/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
+  - name: Initiate vsg_stats_notifier cron job
+    command: sudo python /usr/local/share/vsg_monitoring_agent/vsg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
     async: 9999999999999999
     poll: 0
     become: yes
diff --git a/xos/synchronizer/steps/sync_vcpetenant.yaml b/xos/synchronizer/steps/sync_vcpetenant.yaml
deleted file mode 100644
index d3109e2..0000000
--- a/xos/synchronizer/steps/sync_vcpetenant.yaml
+++ /dev/null
@@ -1,195 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
----
-- hosts: {{ instance_name }}
-  gather_facts: False
-  connection: ssh
-  become_user: ubuntu
-
-  vars:
-      cdn_enable: {{ cdn_enable }}
-      dnsdemux_ip: {{ dnsdemux_ip }}
-      firewall_enable: {{ firewall_enable }}
-      url_filter_enable: {{ url_filter_enable }}
-      c_tags:
-        {% for c_tag in c_tags %}
-        - {{ c_tag }}
-        {% endfor %}
-      s_tags:
-        {% for s_tag in s_tags %}
-        - {{ s_tag }}
-        {% endfor %}
-      firewall_rules:
-        {% for firewall_rule in firewall_rules.split("\n") %}
-        - {{ firewall_rule }}
-        {% endfor %}
-      cdn_prefixes:
-        {% for prefix in cdn_prefixes %}
-        - {{ prefix }}
-        {% endfor %}
-      bbs_addrs:
-        {% for bbs_addr in bbs_addrs %}
-        - {{ bbs_addr }}
-        {% endfor %}
-      dns_servers:
-        {% for dns_server in dns_servers %}
-        - {{ dns_server }}
-        {% endfor %}
-      nat_ip: {{ nat_ip }}
-      nat_mac: {{ nat_mac }}
-      lan_ip: {{ lan_ip }}
-      lan_mac: {{ lan_mac }}
-      wan_ip: {{ wan_ip }}
-      wan_mac: {{ wan_mac }}
-      wan_container_mac: {{ wan_container_mac }}
-      wan_next_hop: 10.0.1.253   # FIX ME
-      private_ip: {{ private_ip }}
-      private_mac: {{ private_mac }}
-      hpc_client_ip: {{ hpc_client_ip }}
-      hpc_client_mac: {{ hpc_client_mac }}
-      keystone_tenant_id: {{ keystone_tenant_id }}
-      keystone_user_id: {{ keystone_user_id }}
-      rabbit_user: {{ rabbit_user }}
-      rabbit_password: {{ rabbit_password }}
-      rabbit_host: {{ rabbit_host }}
-      safe_browsing:
-        {% for mac in safe_browsing_macs %}
-        - {{ mac }}
-        {% endfor %}
-      uplink_speed: {{ uplink_speed }}
-      downlink_speed: {{ downlink_speed }}
-      status: {{ status }}
-      enable_uverse: {{ enable_uverse }}
-      url_filter_kind: {{ url_filter_kind }}
-
-  tasks:
-{% if full_setup %}
-  - name: Docker repository
-    copy: src=/opt/xos/synchronizers/vsg/files/docker.list
-      dest=/etc/apt/sources.list.d/docker.list
-
-  - name: Import the repository key
-    apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
-
-  - name: install Docker
-    apt: name=lxc-docker state=present update_cache=yes
-
-  - name: install python-setuptools
-    apt: name=python-setuptools state=present
-
-  - name: install pip
-    easy_install: name=pip
-
-  - name: install docker-py
-    pip: name=docker-py version=0.5.3
-
-  - name: install Pipework
-    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
-       dest=/usr/local/bin/pipework
-       mode=0755
-
-  - name: make sure /etc/dnsmasq.d exists
-    file: path=/etc/dnsmasq.d state=directory owner=root group=root
-
-  - name: Disable resolvconf service
-    shell: service resolvconf stop
-    shell: echo manual > /etc/init/resolvconf.override
-    shell: rm -f /etc/resolv.conf
-
-  - name: Install resolv.conf
-    copy: src=/opt/xos/synchronizers/vsg/files/vm-resolv.conf
-      dest=/etc/resolv.conf
-
-  - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
-    shell: pgrep -f [v]cpe_stats_notifier | wc -l
-    register: cron_job_pids_count
-
-#  - name: DEBUG
-#    debug: var=cron_job_pids_count.stdout
-
-#  - name: make sure ~/bin exists
-#    file: path=~/bin state=directory owner=root group=root
-#    when: cron_job_pids_count.stdout == "0"
-
-  - name: Copy cron job to destination
-    copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
-      dest=/usr/local/sbin/vcpe_stats_notifier.py
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: install python-kombu
-    apt: name=python-kombu state=present
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: Initiate vcpe_stats_notifier cron job
-    command: sudo python /usr/local/sbin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
-    async: 9999999999999999
-    poll: 0
-    when: cron_job_pids_count.stdout == "0"
-{% endif %}
-
-  - name: vCPE upstart
-    template: src=/opt/xos/synchronizers/vsg/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.conf
-
-  - name: vCPE startup script
-    template: src=/opt/xos/synchronizers/vsg/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh mode=0755
-    notify:
-#    - restart vcpe
-     - stop vcpe
-     - remove container
-     - start vcpe
-
-  - name: create /etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d
-    file: path=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d state=directory owner=root group=root
-
-  - name: vCPE basic dnsmasq config
-    copy: src=/opt/xos/synchronizers/vsg/files/vcpe.dnsmasq dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-  - name: dnsmasq config
-    template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/servers.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-# These are samples, not necessary for correct function of demo
-
-#  - name: networking info
-#    template: src=/opt/xos/synchronizers/vsg/templates/vlan_sample.j2 dest=/etc/vlan_sample owner=root group=root
-
-#  - name: firewall info
-#    template: src=/opt/xos/synchronizers/vsg/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
-
-  - name: Make sure vCPE service is running
-    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
-
-  handlers:
-  # Dnsmasq is automatically restarted in the container
-  - name: restart dnsmasq
-    shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} killall dnsmasq
-
-  - name: restart vcpe
-    shell: service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} stop; sleep 1; service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} start
-
-  - name: stop vcpe
-    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=stopped
-
-  - name: remove container
-    docker: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=absent image=docker-vcpe
-
-  - name: start vcpe
-    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
-
diff --git a/xos/synchronizer/steps/sync_vcpetenant_new.yaml b/xos/synchronizer/steps/sync_vcpetenant_new.yaml
deleted file mode 100644
index 723c1d9..0000000
--- a/xos/synchronizer/steps/sync_vcpetenant_new.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
----
-- hosts: {{ instance_name }}
-  gather_facts: False
-  connection: ssh
-  become_user: {{ username }}
-
-  vars:
-      container_name: {{ container_name }}
-      cdn_enable: {{ cdn_enable }}
-      dnsdemux_ip: {{ dnsdemux_ip }}
-      firewall_enable: {{ firewall_enable }}
-      url_filter_enable: {{ url_filter_enable }}
-      c_tags:
-        {% for c_tag in c_tags %}
-        - {{ c_tag }}
-        {% endfor %}
-      s_tags:
-        {% for s_tag in s_tags %}
-        - {{ s_tag }}
-        {% endfor %}
-      firewall_rules:
-        {% for firewall_rule in firewall_rules.split("\n") %}
-        - {{ firewall_rule }}
-        {% endfor %}
-      cdn_prefixes:
-        {% for prefix in cdn_prefixes %}
-        - {{ prefix }}
-        {% endfor %}
-      bbs_addrs:
-        {% for bbs_addr in bbs_addrs %}
-        - {{ bbs_addr }}
-        {% endfor %}
-      dns_servers:
-        {% for dns_server in dns_servers %}
-        - {{ dns_server }}
-        {% endfor %}
-      nat_ip: {{ nat_ip }}
-      nat_mac: {{ nat_mac }}
-      lan_ip: {{ lan_ip }}
-      lan_mac: {{ lan_mac }}
-      wan_ip: {{ wan_ip }}
-      wan_mac: {{ wan_mac }}
-      wan_container_mac: {{ wan_container_mac }}
-      wan_next_hop: 10.0.1.253   # FIX ME
-      private_ip: {{ private_ip }}
-      private_mac: {{ private_mac }}
-      hpc_client_ip: {{ hpc_client_ip }}
-      hpc_client_mac: {{ hpc_client_mac }}
-      keystone_tenant_id: {{ keystone_tenant_id }}
-      keystone_user_id: {{ keystone_user_id }}
-      rabbit_user: {{ rabbit_user }}
-      rabbit_password: {{ rabbit_password }}
-      rabbit_host: {{ rabbit_host }}
-      safe_browsing:
-        {% for mac in safe_browsing_macs %}
-        - {{ mac }}
-        {% endfor %}
-      uplink_speed: {{ uplink_speed }}
-      downlink_speed: {{ downlink_speed }}
-      status: {{ status }}
-      enable_uverse: {{ enable_uverse }}
-      url_filter_kind: {{ url_filter_kind }}
-
-  tasks:
-  - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
-    shell: pgrep -f [v]cpe_stats_notifier | wc -l
-    register: cron_job_pids_count
-
-#  - name: DEBUG
-#    debug: var=cron_job_pids_count.stdout
-
-  - name: make sure ~/bin exists
-    file: path=~/bin state=directory owner=root group=root
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: Copy cron job to destination
-    copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
-      dest=~/bin/vcpe_stats_notifier.py
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: install python-kombu
-    apt: name=python-kombu state=present
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: Initiate vcpe_stats_notifier cron job
-    command: python ~/bin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
-    async: 9999999999999999
-    poll: 0
-    when: cron_job_pids_count.stdout == "0"
-
-  - name: vCPE basic dnsmasq config
-    copy: src=/opt/xos/synchronizers/vsg/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-  - name: dnsmasq config
-    template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-  - name: create directory for "safe" config
-    file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory
-
-  - name: dnsmasq "safe" config
-    template: src=/opt/xos/synchronizers/vsg/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
-    notify:
-    - restart dnsmasq
-
-  - name: copy base ufw files
-    synchronize: src=/opt/xos/synchronizers/vsg/files/etc/ufw/ dest=/var/container_volumes/{{ container_name }}/etc/ufw/
-    notify:
-    - reload ufw
-
-  - name: redirection rules for safe DNS
-    template: src=/opt/xos/synchronizers/vsg/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/etc/ufw/before.rules owner=root group=root
-    notify:
-    - reload ufw
-
-  - name: base ufw setup uses /etc/rc.local
-    copy: src=/opt/xos/synchronizers/vsg/files/etc/rc.local dest=/var/container_volumes/{{ container_name }}/etc/ owner=root group=root
-    notify:
-    - copy in /etc/rc.local
-
-  handlers:
-  # Dnsmasq is automatically restarted in the container
-  - name: restart dnsmasq
-    shell: docker exec {{ container_name }} /usr/bin/killall dnsmasq
-
-  - name: reload ufw
-    shell: docker exec {{ container_name }} bash -c "/sbin/iptables -t nat -F PREROUTING; /usr/sbin/ufw reload"
-
-  # Use docker cp instead of single-file volume
-  # The reason is that changes to external file volume don't show up inside the container
-  # Probably Ansible deletes and then recreates the external file, and container has old version
-  - name: copy in /etc/rc.local
-    shell: docker cp /var/container_volumes/{{ container_name }}/etc/rc.local {{ container_name }}:/etc/
diff --git a/xos/synchronizer/steps/sync_vcpetenant.py b/xos/synchronizer/steps/sync_vsgserviceinstance.py
similarity index 79%
rename from xos/synchronizer/steps/sync_vcpetenant.py
rename to xos/synchronizer/steps/sync_vsgserviceinstance.py
index a27c8ce..b063065 100644
--- a/xos/synchronizer/steps/sync_vcpetenant.py
+++ b/xos/synchronizer/steps/sync_vsgserviceinstance.py
@@ -34,34 +34,34 @@
 
 ENABLE_QUICK_UPDATE=False
 
-class SyncVSGTenant(SyncInstanceUsingAnsible):
-    provides=[VSGTenant]
-    observes=VSGTenant
+class SyncVSGServiceInstance(SyncInstanceUsingAnsible):
+    provides=[VSGServiceInstance]
+    observes=VSGServiceInstance
     requested_interval=0
-    template_name = "sync_vcpetenant.yaml"
+    template_name = "sync_vsgserviceinstance.yaml"
     watches = [ModelLink(ServiceDependency,via='servicedependency'), ModelLink(ServiceMonitoringAgentInfo,via='monitoringagentinfo')]
 
     def __init__(self, *args, **kwargs):
-        super(SyncVSGTenant, self).__init__(*args, **kwargs)
+        super(SyncVSGServiceInstance, self).__init__(*args, **kwargs)
 
-    def get_vcpe_service(self, o):
+    def get_vsg_service(self, o):
         if not o.owner:
             return None
 
-        vcpes = VSGService.objects.filter(id=o.owner.id)
-        if not vcpes:
+        vsg_services = VSGService.objects.filter(id=o.owner.id)
+        if not vsg_services:
             return None
 
-        return vcpes[0]
+        return vsg_services[0]
 
     def get_extra_attributes(self, o):
         # This is a place to include extra attributes that aren't part of the
-        # object itself. In the case of vCPE, we need to know:
-        #   1) the addresses of dnsdemux, to setup dnsmasq in the vCPE
+        # object itself. In the case of vSG, we need to know:
+        #   1) the addresses of dnsdemux, to setup dnsmasq in the vSG
         #   2) CDN prefixes, so we know what URLs to send to dnsdemux
-        #   4) vlan_ids, for setting up networking in the vCPE VM
+        #   4) vlan_ids, for setting up networking in the vSG VM
 
-        vcpe_service = self.get_vcpe_service(o)
+        vsg_service = self.get_vsg_service(o)
 
         dnsdemux_ip = None
         cdn_prefixes = []
@@ -87,7 +87,7 @@
         full_setup = True
 
         safe_macs=[]
-        if vcpe_service.url_filter_kind == "safebrowsing":
+        if vsg_service.url_filter_kind == "safebrowsing":
             if o.volt and o.volt.subscriber:
                 for user in o.volt.subscriber.devices:
                     level = user.get("level",None)
@@ -97,23 +97,23 @@
                             safe_macs.append(mac)
 
         docker_opts = []
-        if vcpe_service.docker_insecure_registry:
-            reg_name = vcpe_service.docker_image_name.split("/",1)[0]
+        if vsg_service.docker_insecure_registry:
+            reg_name = vsg_service.docker_image_name.split("/",1)[0]
             docker_opts.append("--insecure-registry " + reg_name)
 
         fields = {"s_tags": s_tags,
                 "c_tags": c_tags,
-                "docker_remote_image_name": vcpe_service.docker_image_name,
-                "docker_local_image_name": vcpe_service.docker_image_name,
+                "docker_remote_image_name": vsg_service.docker_image_name,
+                "docker_local_image_name": vsg_service.docker_image_name,
                 "docker_opts": " ".join(docker_opts),
                 "dnsdemux_ip": dnsdemux_ip,
                 "cdn_prefixes": cdn_prefixes,
                 "full_setup": full_setup,
                 "isolation": o.instance.isolation,
                 "safe_browsing_macs": safe_macs,
-                "container_name": "vcpe-%s-%s" % (s_tags[0], c_tags[0]),
-                "dns_servers": [x.strip() for x in vcpe_service.dns_servers.split(",")],
-                "url_filter_kind": vcpe_service.url_filter_kind }
+                "container_name": "vsg-%s-%s" % (s_tags[0], c_tags[0]),
+                "dns_servers": [x.strip() for x in vsg_service.dns_servers.split(",")],
+                "url_filter_kind": vsg_service.url_filter_kind }
 
         # add in the sync_attributes that come from the SubscriberRoot object
 
@@ -125,7 +125,7 @@
 
     def sync_fields(self, o, fields):
         # the super causes the playbook to be run
-        super(SyncVSGTenant, self).sync_fields(o, fields)
+        super(SyncVSGServiceInstance, self).sync_fields(o, fields)
 
     def run_playbook(self, o, fields):
         ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
@@ -135,17 +135,16 @@
             logger.info("quick_update triggered; skipping ansible recipe",extra=o.tologdict())
         else:
             if o.instance.isolation in ["container", "container_vm"]:
-                raise Exception("probably not implemented")
-                super(SyncVSGTenant, self).run_playbook(o, fields, "sync_vcpetenant_new.yaml")
+                raise Exception("Not implemented")
             else:
-                super(SyncVSGTenant, self).run_playbook(o, fields, template_name="sync_vcpetenant_vtn.yaml")
+                super(SyncVSGServiceInstance, self).run_playbook(o, fields)
 
         o.last_ansible_hash = ansible_hash
 
     def sync_record(self, o):
         if (not o.policed) or (o.policed<o.updated):
             self.defer_sync(o, "waiting on model policy")
-        super(SyncVSGTenant, self).sync_record(o)
+        super(SyncVSGServiceInstance, self).sync_record(o)
 
     def delete_record(self, o):
         if (not o.policed) or (o.policed<o.updated):
@@ -161,7 +160,7 @@
             logger.info("handle watch notifications for service monitoring agent info...ignoring because target_uri attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
             return
 
-        objs = VSGTenant.objects.all()
+        objs = VSGServiceInstance.objects.all()
         for obj in objs:
             if obj.owner.id != monitoring_agent_info.service.id:
                 logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is not matching" % (monitoring_agent_info))
@@ -172,7 +171,7 @@
                logger.warn("handle watch notifications for service monitoring agent info...: No valid instance found for object %s" % (str(obj)))
                return
 
-            logger.info("handling watch notification for monitoring agent info:%s for VSGTenant object:%s" % (monitoring_agent_info, obj))
+            logger.info("handling watch notification for monitoring agent info:%s for VSGServiceInstance object:%s" % (monitoring_agent_info, obj))
 
             #Run ansible playbook to update the routing table entries in the instance
             fields = self.get_ansible_fields(instance)
@@ -187,5 +186,5 @@
             fields["rabbit_host"] = url.hostname
 
             template_name = "sync_monitoring_agent.yaml"
-            super(SyncVSGTenant, self).run_playbook(obj, fields, template_name)
+            super(SyncVSGServiceInstance, self).run_playbook(obj, fields, template_name)
 
diff --git a/xos/synchronizer/steps/sync_vcpetenant_vtn.yaml b/xos/synchronizer/steps/sync_vsgserviceinstance.yaml
similarity index 86%
rename from xos/synchronizer/steps/sync_vcpetenant_vtn.yaml
rename to xos/synchronizer/steps/sync_vsgserviceinstance.yaml
index 81c9252..b7968b2 100644
--- a/xos/synchronizer/steps/sync_vcpetenant_vtn.yaml
+++ b/xos/synchronizer/steps/sync_vsgserviceinstance.yaml
@@ -125,14 +125,14 @@
     stat: path=/root/environment_is_setup
     register: environment_is_setup
 
-# Everything here is now baked into the vCPE image
+# Everything here is now baked into the vSG image
 # Leave this spot in place for future temporary setup stuff
 
   - name: Remember that the environment is setup, so we never do the above again
     shell: touch /root/environment_is_setup
 
-  - name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
-    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+  - name: Verify if vsg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]sg_stats_notifier | wc -l
     register: cron_job_pids_count
 
 #  - name: DEBUG
@@ -143,16 +143,16 @@
 #    when: cron_job_pids_count.stdout == "0"
 
 #  - name: Copy cron job to destination
-#    copy: src=/opt/xos/synchronizers/vsg/vcpe_stats_notifier.py
-#      dest=/usr/local/sbin/vcpe_stats_notifier.py
+#    copy: src=/opt/xos/synchronizers/vsg/vsg_stats_notifier.py
+#      dest=/usr/local/sbin/vsg_stats_notifier.py
 #    when: cron_job_pids_count.stdout == "0"
 
 #  - name: install python-kombu
 #    apt: name=python-kombu state=present
 #    when: cron_job_pids_count.stdout == "0"
 
-#  - name: Initiate vcpe_stats_notifier cron job
-#    command: sudo python /usr/local/sbin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
+#  - name: Initiate vsg_stats_notifier cron job
+#    command: sudo python /usr/local/sbin/vsg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
 #    async: 9999999999999999
 #    poll: 0
 #    when: cron_job_pids_count.stdout == "0"
@@ -163,22 +163,22 @@
     notify:
      - restart docker
 
-  - name: vCPE upstart
-    template: src=/opt/xos/synchronizers/vsg/templates/vcpe.conf.j2 dest=/etc/init/{{ container_name }}.conf
+  - name: vSG upstart
+    template: src=/opt/xos/synchronizers/vsg/templates/vsg.conf.j2 dest=/etc/init/{{ container_name }}.conf
 
-  - name: vCPE startup script
-    template: src=/opt/xos/synchronizers/vsg/templates/start-vcpe-vtn.sh.j2 dest=/usr/local/sbin/start-{{ container_name }}.sh mode=0755
+  - name: vSG startup script
+    template: src=/opt/xos/synchronizers/vsg/templates/start-vsg-vtn.sh.j2 dest=/usr/local/sbin/start-{{ container_name }}.sh mode=0755
     notify:
-#    - restart vcpe
-     - stop vcpe
+#    - restart vsg
+     - stop vsg
      - remove container
-     - start vcpe
+     - start vsg
 
   - name: create /var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/
     file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory owner=root group=root
 
-  - name: vCPE basic dnsmasq config
-    copy: src=/opt/xos/synchronizers/vsg/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
+  - name: vSG basic dnsmasq config
+    copy: src=/opt/xos/synchronizers/vsg/files/vsg.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vsg.conf owner=root group=root
     notify:
     - restart dnsmasq
 
@@ -227,14 +227,14 @@
   - name: generate the message page
     template: src=/opt/xos/synchronizers/vsg/templates/message.html.j2 dest=/var/container_volumes/{{ container_name }}/etc/service/message/message.html owner=root group=root mode=0644
     when: status != "enabled"
-    #notify: restart vcpe
+    #notify: restart vsg
 
   - name: remove simple webserver
     file: path=/var/container_volumes/{{ container_name }}/etc/service/message/run state=absent
     when: status == "enabled"
-    #notify: restart vcpe
+    #notify: restart vsg
 
-  - name: Make sure vCPE service is running
+  - name: Make sure vSG service is running
     service: name={{ container_name }} state=started
 
   handlers:
@@ -242,13 +242,13 @@
   - name: restart dnsmasq
     shell: docker exec {{ container_name }} killall dnsmasq
 
-  - name: stop vcpe
+  - name: stop vsg
     service: name={{ container_name }} state=stopped
 
   - name: remove container
-    docker: name={{ container_name }} state=absent image=docker-vcpe
+    docker: name={{ container_name }} state=absent image=docker-vsg
 
-  - name: start vcpe
+  - name: start vsg
     service: name={{ container_name }} state=started
 
   - name: reload ufw
@@ -260,7 +260,7 @@
   - name: reset bwlimits
     shell: docker exec {{ container_name }} bash -c "/usr/local/sbin/bwlimit.sh restart"
 
-  - name: restart vcpe
+  - name: restart vsg
     shell: service {{ container_name }} stop; sleep 1; service {{ container_name }} start
 
   - name: restart docker
diff --git a/xos/synchronizer/templates/dnsmasq_safe_servers.j2 b/xos/synchronizer/templates/dnsmasq_safe_servers.j2
index fdcaf4d..4082195 100644
--- a/xos/synchronizer/templates/dnsmasq_safe_servers.j2
+++ b/xos/synchronizer/templates/dnsmasq_safe_servers.j2
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 
-# This file autogenerated by vCPE observer
+# This file autogenerated by vSG observer
 # It contains a list of DNS servers for dnsmasq to use.
 no-resolv
 
diff --git a/xos/synchronizer/templates/dnsmasq_servers.j2 b/xos/synchronizer/templates/dnsmasq_servers.j2
index f1b5578..36a9f54 100644
--- a/xos/synchronizer/templates/dnsmasq_servers.j2
+++ b/xos/synchronizer/templates/dnsmasq_servers.j2
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 
-# This file autogenerated by vCPE observer
+# This file autogenerated by vSG observer
 # It contains a list of DNS servers for dnsmasq to use.
 no-resolv
 
diff --git a/xos/synchronizer/templates/start-vcpe.sh.j2 b/xos/synchronizer/templates/start-vcpe.sh.j2
deleted file mode 100755
index 76d2d32..0000000
--- a/xos/synchronizer/templates/start-vcpe.sh.j2
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/bin/bash
-
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-
-function mac_to_iface {
-    MAC=$1
-    ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
-}
-
-iptables -L > /dev/null
-ip6tables -L > /dev/null
-
-STAG={{ s_tags[0] }}
-CTAG={{ c_tags[0] }}
-VCPE=vcpe-$STAG-$CTAG
-
-docker inspect $VCPE > /dev/null 2>&1
-if [ "$?" == 1 ]
-then
-    docker pull andybavier/docker-vcpe
-    docker run -d --name=$VCPE --privileged=true --net=none -v /etc/$VCPE/dnsmasq.d:/etc/dnsmasq.d andybavier/docker-vcpe
-else
-    docker start $VCPE
-fi
-
-# Set up networking via pipework
-WAN_IFACE=$( mac_to_iface {{ wan_mac }} )
-docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_ip }}/24@{{ wan_next_hop }} {{ wan_container_mac }}
-
-# LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
-# Need to encapsulate VLAN traffic so that Neutron doesn't eat it
-# Assumes that br-lan has been set up appropriately by a previous step
-LAN_IFACE=br-lan
-ifconfig $LAN_IFACE >> /dev/null
-if [ "$?" == 0 ]
-then
-    ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
-    ifconfig $LAN_IFACE.$STAG up
-    docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
-fi
-
-#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
-
-# Make sure VM's eth0 (hpc_client) has no IP address
-#ifconfig $HPC_IFACE 0.0.0.0
-
-# Now can start up dnsmasq
-docker exec $VCPE service dnsmasq start
-
-# Attach to container
-docker start -a $VCPE
diff --git a/xos/synchronizer/templates/start-vcpe-vtn.sh.j2 b/xos/synchronizer/templates/start-vsg-vtn.sh.j2
similarity index 61%
rename from xos/synchronizer/templates/start-vcpe-vtn.sh.j2
rename to xos/synchronizer/templates/start-vsg-vtn.sh.j2
index 730d4ad..ff0b4b0 100644
--- a/xos/synchronizer/templates/start-vcpe-vtn.sh.j2
+++ b/xos/synchronizer/templates/start-vsg-vtn.sh.j2
@@ -24,25 +24,25 @@
 
 STAG={{ s_tags[0] }}
 CTAG={{ c_tags[0] }}
-VCPE=vcpe-$STAG-$CTAG
+VSG=vsg-$STAG-$CTAG
 
-docker inspect $VCPE > /dev/null 2>&1
+docker inspect $VSG > /dev/null 2>&1
 if [ "$?" == 1 ]
 then
     docker pull {{ docker_remote_image_name }}
-    docker run -d --name=$VCPE --privileged=true --net=none \
-    -v /var/container_volumes/$VCPE/mount:/mount:ro \
-    -v /var/container_volumes/$VCPE/etc/dnsmasq.d:/etc/dnsmasq.d:ro \
-    -v /var/container_volumes/$VCPE/etc/service/message:/etc/service/message \
-    -v /var/container_volumes/$VCPE/usr/local/sbin:/usr/local/sbin:ro \
+    docker run -d --name=$VSG --privileged=true --net=none \
+    -v /var/container_volumes/$VSG/mount:/mount:ro \
+    -v /var/container_volumes/$VSG/etc/dnsmasq.d:/etc/dnsmasq.d:ro \
+    -v /var/container_volumes/$VSG/etc/service/message:/etc/service/message \
+    -v /var/container_volumes/$VSG/usr/local/sbin:/usr/local/sbin:ro \
     {{ docker_local_image_name }}
 else
-    docker start $VCPE
+    docker start $VSG
 fi
 
 # Set up networking via pipework
 WAN_IFACE=br-wan
-docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_container_ip }}/{{ wan_container_netbits }}@{{ wan_container_gateway_ip }} {{ wan_container_mac }}
+docker exec $VSG ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VSG {{ wan_container_ip }}/{{ wan_container_netbits }}@{{ wan_container_gateway_ip }} {{ wan_container_mac }}
 
 LAN_IFACE=eth0
 ifconfig $LAN_IFACE >> /dev/null
@@ -50,14 +50,14 @@
 then
     ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
     ifconfig $LAN_IFACE.$STAG up
-    docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
+    docker exec $VSG ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VSG 192.168.0.1/24 @$CTAG
 fi
 
 #HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
+#docker exec $VSG ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VSG {{ hpc_client_ip }}/24
 
 # Make sure VM's eth0 (hpc_client) has no IP address
 #ifconfig $HPC_IFACE 0.0.0.0
 
 # Attach to container
-docker start -a $VCPE
+docker start -a $VSG
diff --git a/xos/synchronizer/templates/vlan_sample.j2 b/xos/synchronizer/templates/vlan_sample.j2
index 404a539..e1ab857 100644
--- a/xos/synchronizer/templates/vlan_sample.j2
+++ b/xos/synchronizer/templates/vlan_sample.j2
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 
-# below is a list of all vlan_ids associated with this vcpe
+# below is a list of all vlan_ids associated with this vsg
 
 {% for vlan_id in c_tags %}
 {{ vlan_id }}
diff --git a/xos/synchronizer/templates/vcpe.conf.j2 b/xos/synchronizer/templates/vsg.conf.j2
similarity index 82%
rename from xos/synchronizer/templates/vcpe.conf.j2
rename to xos/synchronizer/templates/vsg.conf.j2
index 39e74ae..797f645 100644
--- a/xos/synchronizer/templates/vcpe.conf.j2
+++ b/xos/synchronizer/templates/vsg.conf.j2
@@ -13,13 +13,13 @@
 # limitations under the License.
 
 
-# Upstart script for vCPE
-description "vCPE container"
-author "andy@onlab.us"
+# Upstart script for vSG
+description "vSG container"
+author "andy@opennetworking.org"
 start on filesystem and started docker
 stop on runlevel [!2345]
 respawn
 
 script
-  /usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
+  /usr/local/sbin/start-vsg-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
 end script
diff --git a/xos/synchronizer/vcpe-synchronizer.py b/xos/synchronizer/vsg-synchronizer.py
similarity index 100%
rename from xos/synchronizer/vcpe-synchronizer.py
rename to xos/synchronizer/vsg-synchronizer.py
diff --git a/xos/synchronizer/vsg_from_api_config b/xos/synchronizer/vsg_from_api_config
deleted file mode 100644
index 65f021f..0000000
--- a/xos/synchronizer/vsg_from_api_config
+++ /dev/null
@@ -1,20 +0,0 @@
-# Sets options for the synchronizer
-[observer]
-name=vsg
-dependency_graph=/opt/xos/synchronizers/vsg/model-deps
-steps_dir=/opt/xos/synchronizers/vsg/steps
-sys_dir=/opt/xos/synchronizers/vsg/sys
-#logfile=/var/log/xos_backend.log
-log_file=console
-log_level=debug
-pretend=False
-backoff_disabled=True
-save_ansible_output=True
-proxy_ssh=True
-proxy_ssh_key=/opt/cord_profile/node_key
-proxy_ssh_user=root
-accessor_kind=api
-accessor_password=@/opt/xos/services/vsg/credentials/xosadmin@opencord.org
-
-[networking]
-use_vtn=True
diff --git a/xos/synchronizer/vcpe_stats_notifier.py b/xos/synchronizer/vsg_stats_notifier.py
similarity index 100%
rename from xos/synchronizer/vcpe_stats_notifier.py
rename to xos/synchronizer/vsg_stats_notifier.py
diff --git a/xos/templates/vcpeadmin.html b/xos/templates/vcpeadmin.html
deleted file mode 100644
index 42ad781..0000000
--- a/xos/templates/vcpeadmin.html
+++ /dev/null
@@ -1,27 +0,0 @@
-
-<!--
-Copyright 2017-present Open Networking Foundation
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-
-<div class = "row text-center">
-    <div class="col-xs-6">
-        <a class="btn btn-primary" href="/admin/vsg/vsgtenant/">vSG Tenants</a>
-    </div>
-    <div class="col-xs-6">
-        <a class="btn btn-primary" href="/admin/dashboard/subscribers/">Subscriber View</a>
-    </div>
-</div>
-
diff --git a/xos/tosca/resources/vcpeservice.py b/xos/tosca/resources/vsgservice.py
similarity index 100%
rename from xos/tosca/resources/vcpeservice.py
rename to xos/tosca/resources/vsgservice.py
diff --git a/xos/vsg-onboard.yaml b/xos/vsg-onboard.yaml
index 7be307b..b21dc74 100644
--- a/xos/vsg-onboard.yaml
+++ b/xos/vsg-onboard.yaml
@@ -30,10 +30,7 @@
           # The following will concatenate with base_url automatically, if
           # base_url is non-null.
           xproto: ./
-          admin: admin.py
-          admin_template: templates/vcpeadmin.html
-          #tosca_custom_types: exampleservice.yaml
-          tosca_resource: tosca/resources/vcpeservice.py
+          tosca_resource: tosca/resources/vsgservice.py
           private_key: file:///opt/xos/key_import/vsg_rsa
           public_key: file:///opt/xos/key_import/vsg_rsa.pub
 
diff --git a/xos/vsg.xproto b/xos/vsg.xproto
index 72e7b1b..55985b7 100644
--- a/xos/vsg.xproto
+++ b/xos/vsg.xproto
@@ -13,8 +13,8 @@
 }
 
 
-message VSGTenant (TenantWithContainer){
-    option verbose_name = "vSG Tenant";
+message VSGServiceInstance (TenantWithContainer){
+    option verbose_name = "vSG Service Instance";
     
     optional string last_ansible_hash = 1 [db_index = False, max_length = 128, null = True, content_type = "stripped", blank = True];
 }