SEBA-457 Core xproto cleanup

Change-Id: Ib99680dd81016694094a5c230afdf9dcf2cb307e
diff --git a/VERSION b/VERSION
index 530cdd9..21bb5e1 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.2.4
+2.2.5
diff --git a/containers/chameleon/Dockerfile.chameleon b/containers/chameleon/Dockerfile.chameleon
index 33f64b0..7cc050d 100644
--- a/containers/chameleon/Dockerfile.chameleon
+++ b/containers/chameleon/Dockerfile.chameleon
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 # xosproject/chameleon
-FROM xosproject/xos-base:2.2.4
+FROM xosproject/xos-base:2.2.5
 
 # xos-base already has protoc and dependencies installed
 
diff --git a/containers/xos/Dockerfile.client b/containers/xos/Dockerfile.client
index ea54ab0..e995605 100644
--- a/containers/xos/Dockerfile.client
+++ b/containers/xos/Dockerfile.client
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 # xosproject/xos-client
-FROM xosproject/xos-libraries:2.2.4
+FROM xosproject/xos-libraries:2.2.5
 
 # Install XOS client
 COPY lib/xos-api /tmp/xos-api
diff --git a/containers/xos/Dockerfile.libraries b/containers/xos/Dockerfile.libraries
index 6a58d7d..3b963d5 100644
--- a/containers/xos/Dockerfile.libraries
+++ b/containers/xos/Dockerfile.libraries
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 # xosproject/xos-libraries
-FROM xosproject/xos-base:2.2.4
+FROM xosproject/xos-base:2.2.5
 
 # Add libraries
 COPY lib /opt/xos/lib
diff --git a/containers/xos/Dockerfile.synchronizer-base b/containers/xos/Dockerfile.synchronizer-base
index 042f7c6..58c5a07 100644
--- a/containers/xos/Dockerfile.synchronizer-base
+++ b/containers/xos/Dockerfile.synchronizer-base
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 # xosproject/xos-synchronizer-base
-FROM xosproject/xos-client:2.2.4
+FROM xosproject/xos-client:2.2.5
 
 COPY xos/synchronizers/new_base /opt/xos/synchronizers/new_base
 COPY xos/xos/logger.py /opt/xos/xos/logger.py
diff --git a/containers/xos/Dockerfile.xos-core b/containers/xos/Dockerfile.xos-core
index d0d7605..f8a356d 100644
--- a/containers/xos/Dockerfile.xos-core
+++ b/containers/xos/Dockerfile.xos-core
@@ -13,7 +13,7 @@
 # limitations under the License.
 
 # xosproject/xos-core
-FROM xosproject/xos-libraries:2.2.4
+FROM xosproject/xos-libraries:2.2.5
 
 # Install XOS
 ADD xos /opt/xos
diff --git a/lib/xos-genx/xosgenx/jinja2_extensions/gui.py b/lib/xos-genx/xosgenx/jinja2_extensions/gui.py
index 245bbda..4cb644a 100644
--- a/lib/xos-genx/xosgenx/jinja2_extensions/gui.py
+++ b/lib/xos-genx/xosgenx/jinja2_extensions/gui.py
@@ -96,17 +96,23 @@
 
 
 def xproto_default_to_gui(default):
+    # TODO: Using `eval` here is potentially dangerous as it may allow code injection
     val = "null"
-    if is_number(default):
-        val = str(default)
-    elif eval(default) is True:
-        val = "true"
-    elif eval(default) is False:
-        val = "false"
-    elif eval(default) is None:
+    try:
+        if is_number(default):
+            val = str(default)
+        elif eval(default) is True:
+            val = "true"
+        elif eval(default) is False:
+            val = "false"
+        elif eval(default) is None:
+            val = "null"
+        else:
+            val = str(default)
+    except NameError:
+        # val was a function call, and we can't pass those to the GUI
         val = "null"
-    else:
-        val = str(default)
+
     return val
 
 
diff --git a/lib/xos-genx/xosgenx/targets/mock_classes.xtarget b/lib/xos-genx/xosgenx/targets/mock_classes.xtarget
index 61c2f1f..bc56747 100644
--- a/lib/xos-genx/xosgenx/targets/mock_classes.xtarget
+++ b/lib/xos-genx/xosgenx/targets/mock_classes.xtarget
@@ -225,6 +225,9 @@
 
 model_accessor = ModelAccessor()
 
+def get_first_site():
+    return model_accessor.Site.objects.first().id
+
 class ObjectSet(object):
     def __init__(self, objects):
         self.objects = objects
diff --git a/lib/xos-genx/xosgenx/targets/service.xtarget b/lib/xos-genx/xosgenx/targets/service.xtarget
index 1027680..4c47fab 100644
--- a/lib/xos-genx/xosgenx/targets/service.xtarget
+++ b/lib/xos-genx/xosgenx/targets/service.xtarget
@@ -2,14 +2,14 @@
 {% set legacy_tag = '_decl' %}
 {% set legacy = True %}
 from core.models.xosbase import *
-from core.models import ServiceInstance, TenantWithContainer
+from core.models import ServiceInstance
 {% else %}
 {% set legacy = False %}
 {% set legacy_tag = '' %}
 {% if file_exists('../header.py') or file_exists('header.py')-%}from header import *
 {% else %}
 from core.models.xosbase import *
-from core.models import ServiceInstance, TenantWithContainer
+from core.models import ServiceInstance
 {% endif %}
 {% endif %}
 
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/model-deps b/lib/xos-synchronizer/xos-synchronizer-tests/model-deps
index 247a190..559fa4c 100644
--- a/lib/xos-synchronizer/xos-synchronizer-tests/model-deps
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/model-deps
@@ -7,19 +7,7 @@
         
         
         
-         
-        
-        
-        
-        
-        
-        
-        
-        ["ControllerUser", "controllerusers", "user"],
-        
-        
-        ["Site", "site", "users"],
-        ["DashboardView", "dashboards", "user"]
+        ["Site", "site", "users"]
         
     ], 
     
@@ -29,17 +17,6 @@
         
         
         
-         
-        
-        
-        
-        
-        
-        
-        
-        ["ControllerPrivilege", "controllerprivileges", "privilege"]
-        
-        
         
     ], 
     
@@ -52,165 +29,17 @@
         ["Service", "service", "addresspools"]
         
     ], 
-     
     
-    "ControllerDashboardView": [
+    "ComputeServiceInstance": [
         
         
         
         
         
-        ["Controller", "controller", "controllerdashboardviews"],
-        ["DashboardView", "dashboardView", "controllerdashboardviews"]
-        
-    ], 
-    
-    "ControllerImages": [
-        
-        
-        
-        
-        
-        ["Image", "image", "controllerimages"],
-        ["Controller", "controller", "controllerimages"]
-        
-    ], 
-    
-    "ControllerNetwork": [
-        
-        
-        
-        
-        
-        ["Network", "network", "controllernetworks"],
-        ["Controller", "controller", "controllernetworks"]
-        
-    ], 
-    
-    "ControllerRole": [
-        
-        
-        
-        
-        
-        
-    ], 
-    
-    "ControllerSite": [
-        
-        
-        
-        
-        
-        ["Site", "site", "controllersite"],
-        ["Controller", "controller", "controllersite"]
-        
-    ], 
-    
-    "ControllerPrivilege": [
-        
-        
-        
-        
-        
-        ["Controller", "controller", "controllerprivileges"],
-        ["Privilege", "privilege", "controllerprivileges"]
-        
-    ], 
-    
-    "ControllerSitePrivilege": [
-        
-        
-        
-        
-        
-        ["Controller", "controller", "controllersiteprivileges"],
-        ["SitePrivilege", "site_privilege", "controllersiteprivileges"]
-        
-    ], 
-    
-    "ControllerSlice": [
-        
-        
-        
-        
-        
-        ["Controller", "controller", "controllerslices"],
-        ["Slice", "slice", "controllerslices"]
-        
-    ], 
-    
-    "ControllerSlicePrivilege": [
-        
-        
-        
-        
-        
-        ["Controller", "controller", "controllersliceprivileges"],
-        ["SlicePrivilege", "slice_privilege", "controllersliceprivileges"]
-        
-    ], 
-    
-    "ControllerUser": [
-        
-        
-        
-        
-        
-        ["User", "user", "controllerusers"],
-        ["Controller", "controller", "controllersusers"]
-        
-    ], 
-    
-    "DashboardView": [
-        
-        
-        
-        
-        
-         
-        
-        
-        
-        
-        
-        
-        
-        ["ControllerDashboardView", "controllerdashboardviews", "dashboardView"],
-        
-        
-        ["Controller", "controllers", "dashboardviews"],
-        ["Deployment", "deployments", "dashboardviews"]
-        
-    ], 
-    
-    "Deployment": [
-        
-        
-        
-        
-        
-        
-    ], 
-    
-    "DeploymentPrivilege": [
-        
-        
-        
-        
-        
-        ["User", "user", "deploymentprivileges"],
-        ["Deployment", "deployment", "deploymentprivileges"],
-        ["DeploymentRole", "role", "deploymentprivileges"]
-        
-    ], 
-    
-    "DeploymentRole": [
-        
-        
-        
-        
-        
+        ["Service", "owner", "service_instances"],
+        ["ServiceInstance", "master_serviceinstance", "child_serviceinstances"],
+        ["Slice", "slice", "computeserviceinstances"],
+        ["Image", "image", "computeserviceinstances"]
         
     ], 
     
@@ -229,44 +58,6 @@
         
         
         
-         
-        
-        
-        
-        
-        
-        
-        
-        ["ControllerImages", "controllerimages", "image"]
-        
-        
-        
-    ], 
-    
-    "ImageDeployments": [
-        
-        
-        
-        
-        
-        ["Image", "image", "imagedeployments"],
-        ["Deployment", "deployment", "imagedeployments"]
-        
-    ], 
-    
-    "Instance": [
-        
-        
-        
-        
-        
-        ["Image", "image", "instances"],
-        ["User", "creator", "instances"],
-        ["Slice", "slice", "instances"],
-        ["Deployment", "deployment", "instance_deployment"],
-        ["Node", "node", "instances"],
-        ["Flavor", "flavor", "instance"],
-        ["Instance", "parent", "instance"]
         
     ], 
     
@@ -276,20 +67,10 @@
         
         
         
-         
-        
-        
-        
-        
-        
-        
-        
-        ["ControllerNetwork", "controllernetworks", "network"],
-        
-        
         ["NetworkTemplate", "template", "network"],
         ["Slice", "owner", "ownedNetworks"],
         ["Slice", "permitted_slices", "availableNetworks"]
+        
     ], 
     
     "NetworkParameter": [
@@ -337,7 +118,7 @@
         
         
         
-        ["SiteDeployment", "site_deployment", "nodes"]
+        ["Site", "site", "nodes"]
         
     ], 
     
@@ -358,7 +139,17 @@
         
         
         ["Network", "network", "links"],
-        ["Instance", "instance", "ports"]
+        ["ServiceInstance", "service_instance", "ports"]
+        
+    ], 
+    
+    "Principal": [
+        
+        
+        
+        
+        
+        ["TrustDomain", "trust_domain", "principals"]
         
     ], 
     
@@ -368,11 +159,6 @@
         
         
         
-         
-        
-        
-        
-        
         
     ], 
     
@@ -385,6 +171,16 @@
         
     ], 
     
+    "ServicePort": [
+        
+        
+        
+        
+        
+        ["Service", "service", "serviceports"]
+        
+    ], 
+    
     "ServiceAttribute": [
         
         
@@ -406,67 +202,12 @@
         
     ], 
     
-    "ServiceMonitoringAgentInfo": [
-        
-        
-        
-        
-        
-        ["Service", "service", "servicemonitoringagents"]
-        
-    ], 
-    
-    "ServicePrivilege": [
-        
-        
-        
-        
-        
-        ["User", "user", "serviceprivileges"],
-        ["Service", "service", "serviceprivileges"],
-        ["ServiceRole", "role", "serviceprivileges"]
-        
-    ], 
-    
-    "ServiceRole": [
-        
-        
-        
-        
-        
-        
-    ], 
-    
     "Site": [
         
         
         
         
         
-         
-        
-        
-        
-        
-        
-        
-        
-        ["ControllerSite", "controllersite", "site"],
-        
-        
-        ["Deployment", "deployments", "sites"]
-        
-    ], 
-    
-    "SiteDeployment": [
-        
-        
-        
-        
-        
-        ["Site", "site", "sitedeployments"],
-        ["Deployment", "deployment", "sitedeployments"],
-        ["Controller", "controller", "sitedeployments"]
         
     ], 
     
@@ -476,17 +217,6 @@
         
         
         
-         
-        
-        
-        
-        
-        
-        
-        
-        ["ControllerSitePrivilege", "controllersiteprivileges", "site_privilege"],
-        
-        
         ["User", "user", "siteprivileges"],
         ["Site", "site", "siteprivileges"],
         ["SiteRole", "role", "siteprivileges"]
@@ -508,23 +238,14 @@
         
         
         
-         
-        
-        
-        
-        
-        
-        
-        
-        ["ControllerSlice", "controllerslices", "slice"],
-        
-        
         ["Site", "site", "slices"],
         ["Service", "service", "slices"],
         ["User", "creator", "slices"],
         ["Flavor", "default_flavor", "slices"],
         ["Image", "default_image", "slices"],
-        ["Node", "default_node", "slices"]
+        ["Node", "default_node", "slices"],
+        ["TrustDomain", "trust_domain", "slices"],
+        ["Principal", "principal", "slices"]
         
     ], 
     
@@ -534,17 +255,6 @@
         
         
         
-         
-        
-        
-        
-        
-        
-        
-        
-        ["ControllerSlicePrivilege", "controllersliceprivileges", "slice_privilege"],
-        
-        
         ["User", "user", "sliceprivileges"],
         ["Slice", "slice", "sliceprivileges"],
         ["SliceRole", "role", "sliceprivileges"]
@@ -596,7 +306,8 @@
         
         
         
-        ["Service", "owner", "service_instances"]
+        ["Service", "owner", "service_instances"],
+        ["ServiceInstance", "master_serviceinstance", "child_serviceinstances"]
         
     ], 
     
@@ -624,19 +335,17 @@
         
     ], 
     
-    "TenantWithContainer": [
+    "TrustDomain": [
         
         
         
         
         
-        ["Service", "owner", "service_instances"],
-        ["Instance", "instance", "+"],
-        ["User", "creator", "+"]
+        ["Service", "owner", "owned_trust_domains"]
         
     ], 
     
-    "XOS": [
+    "XOSCore": [
         
         
         
@@ -652,5 +361,14 @@
         
         
         
+    ], 
+    
+    "ServiceGraphConstraint": [
+        
+        
+        
+        
+        
+        
     ]
-}
+}
\ No newline at end of file
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_controller_dependencies.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_controller_dependencies.py
deleted file mode 100644
index 47d55f0..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_controller_dependencies.py
+++ /dev/null
@@ -1,244 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-from mock import patch
-import mock
-import pdb
-import networkx as nx
-
-import os
-import sys
-
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
-xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
-
-
-class TestControllerDependencies(unittest.TestCase):
-
-    def setUp(self):
-        global mock_enumerator, event_loop
-
-        self.sys_path_save = sys.path
-        self.cwd_save = os.getcwd()
-
-        config = os.path.join(test_path, "test_config.yaml")
-        from xosconfig import Config
-
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        from xossynchronizer.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
-
-        os.chdir(os.path.join(test_path, ".."))  # config references xos-synchronizer-tests/model-deps
-
-        import xossynchronizer.event_loop
-        reload(xossynchronizer.event_loop)
-        event_loop = xossynchronizer.event_loop
-
-        import xossynchronizer.backend
-        reload(xossynchronizer.backend)
-
-        from xossynchronizer.modelaccessor import model_accessor
-
-        from mock_modelaccessor import mock_enumerator
-
-        # import all class names to globals
-        for (k, v) in model_accessor.all_model_classes.items():
-            globals()[k] = v
-
-        b = xossynchronizer.backend.Backend(model_accessor=model_accessor)
-        steps_dir = Config.get("steps_dir")
-        self.steps = b.load_sync_step_modules(steps_dir)
-        self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
-
-    def tearDown(self):
-        sys.path = self.sys_path_save
-        os.chdir(self.cwd_save)
-
-    def test_multi_controller_path(self):
-        csl = ControllerSlice()
-        csi = ControllerSite()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        csl.slice = slice
-        csi.site = site
-        slice.controllerslices = mock_enumerator([csl])
-        site.controllersite = mock_enumerator([csi])
-
-        verdict, edge_type = self.synchronizer.concrete_path_exists(csl, csi)
-        self.assertTrue(verdict)
-
-        # TODO(smbaker): event_loop.PROXY_EDGE is set to the wrong thing
-        # self.assertEqual(edge_type, event_loop.PROXY_EDGE)
-
-    def test_controller_path_simple(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        p.slice = s
-        s.site = t
-        ct.site = t
-        t.controllersite = mock_enumerator([ct])
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, ct], False)
-        self.assertEqual([ct, p], cohorts[0])
-        cohorts = self.synchronizer.compute_dependent_cohorts([ct, p], False)
-        self.assertEqual([ct, p], cohorts[0])
-
-    def test_controller_deletion_path(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        ct.site = t
-        p.slice = s
-        s.site = t
-
-        t.controllersite = mock_enumerator([ct])
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], False)
-        self.assertIn(t, cohorts[0])
-        self.assertIn(ct, cohorts[0])
-        self.assertIn(s, cohorts[0])
-        self.assertIn(p, cohorts[0])
-        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
-        #self.assertEqual([t, ct, s, p], cohorts[0])
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], True)
-        self.assertIn(t, cohorts[0])
-        self.assertIn(ct, cohorts[0])
-        self.assertIn(s, cohorts[0])
-        self.assertIn(p, cohorts[0])
-        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
-        #self.assertEqual([p, s, ct, t], cohorts[0])
-
-    def test_multi_controller_schedule(self):
-        csl = ControllerSlice()
-        csi = ControllerSite()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        csl.slice = slice
-        csi.site = site
-        slice.controllerslices = mock_enumerator([csl])
-        site.controllersite = mock_enumerator([csi])
-        i = Instance()
-        i.slice = slice
-
-        cohorts = self.synchronizer.compute_dependent_cohorts(
-            [i, slice, site, csl, csi], False
-        )
-        self.assertIn(site, cohorts[0])
-        self.assertIn(csi, cohorts[0])
-        self.assertIn(slice, cohorts[0])
-        self.assertIn(csl, cohorts[0])
-        self.assertIn(i, cohorts[0])
-
-        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
-        #self.assertEqual([site, csi, slice, csl, i], cohorts[0])
-
-    def test_multi_controller_path_negative(self):
-        csl = ControllerSlice()
-        csi = ControllerSite()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        csl.slice = slice
-        csi.site = site
-        slice.controllerslices = mock_enumerator([])
-        site.controllersite = mock_enumerator([])
-
-        verdict, edge_type = self.synchronizer.concrete_path_exists(csl, csi)
-        self.assertFalse(verdict)
-        self.assertEqual(edge_type, None)
-
-    def test_controller_path_simple_negative(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        p.slice = s
-        s.site = t
-        ct.site = t
-        t.controllersite = mock_enumerator([])
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, ct], False)
-        self.assertIn([ct], cohorts)
-        self.assertIn([p], cohorts)
-
-    def test_controller_deletion_path_negative(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        s.site = t
-
-        t.controllersite = mock_enumerator([])
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], False)
-        self.assertIn([t, s], cohorts)
-        self.assertIn([p], cohorts)
-        self.assertIn([ct], cohorts)
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], True)
-        self.assertIn([s, t], cohorts)
-        self.assertIn([p], cohorts)
-        self.assertIn([ct], cohorts)
-
-    def DISABLED_test_multi_controller_deletion_schedule(self):
-        # TODO(smbaker): `csi` is undefined, test is broken as written.
-
-        csl = ControllerSlice()
-        cn = ControllerNetwork()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        slice.controllerslices = mock_enumerator([])
-        site.controllersite = mock_enumerator([])
-        i = Instance()
-        i.slice = slice
-
-        cohorts = self.synchronizer.compute_dependent_cohorts(
-            [i, slice, site, csl, csi], False
-        )
-        self.assertIn([site, slice, i], cohorts)
-        self.assertIn([csl], cohorts)
-        self.assertIn([csi], cohorts)
-
-    def test_multi_controller_schedule_negative(self):
-        csl = ControllerSlice()
-        csi = ControllerSite()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        slice.controllerslices = mock_enumerator([])
-        site.controllersite = mock_enumerator([])
-        i = Instance()
-        i.slice = slice
-
-        cohorts = self.synchronizer.compute_dependent_cohorts(
-            [i, slice, site, csl, csi], False
-        )
-        self.assertIn([site, slice, i], cohorts)
-        self.assertIn([csl], cohorts)
-        self.assertIn([csi], cohorts)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_load.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_load.py
index e2bbbb0..774bc97 100644
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_load.py
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_load.py
@@ -65,39 +65,22 @@
 
     def test_load_steps(self):
         step_names = [s.__name__ for s in self.steps]
-        self.assertIn("SyncControllerSlices", step_names)
+        self.assertIn("SyncPort", step_names)
 
     def test_load_deps(self):
         self.synchronizer.load_dependency_graph()
         graph = self.synchronizer.model_dependency_graph
-        self.assertTrue(graph[False].has_edge("Instance", "Slice"))
-        self.assertTrue(graph[True].has_edge("Slice", "Instance"))
-        self.assertTrue(graph[False].has_edge("Slice", "ControllerSlice"))
-        self.assertTrue(graph[True].has_edge("ControllerSlice", "Slice"))
-
-    def test_load_dep_accessors(self):
-        self.synchronizer.load_dependency_graph()
-        graph = self.synchronizer.model_dependency_graph
-        self.assertDictContainsSubset(
-            {"src_accessor": "controllerslices"},
-            graph[False]["Slice"]["ControllerSlice"],
-        )
-        self.assertDictContainsSubset(
-            {"src_accessor": "slice", "dst_accessor": "controllerslices"},
-            graph[True]["Slice"]["ControllerSlice"],
-        )
+        self.assertTrue(graph[False].has_edge("Slice", "Site"))
+        self.assertTrue(graph[True].has_edge("Site", "Slice"))
 
     def test_load_sync_steps(self):
         self.synchronizer.load_sync_steps()
         model_to_step = self.synchronizer.model_to_step
         step_lookup = self.synchronizer.step_lookup
         self.assertIn(
-            ("ControllerSlice", ["SyncControllerSlices"]), model_to_step.items()
-        )
-        self.assertIn(
             ("Port", ["SyncPort"]), model_to_step.items()
         )
-        self.assertIn(("SiteRole", ["SyncRoles"]), model_to_step.items())
+        self.assertIn(("Image", ["SyncImages"]), model_to_step.items())
 
         for k, v in model_to_step.items():
             val = v[0]
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_model_policy_tenantwithcontainer.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_model_policy_tenantwithcontainer.py
deleted file mode 100644
index e2659c3..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_model_policy_tenantwithcontainer.py
+++ /dev/null
@@ -1,286 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import unittest
-from mock import patch
-import mock
-import pdb
-
-import os
-import sys
-from xosconfig import Config
-
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
-xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
-
-class TestModelPolicyTenantWithContainer(unittest.TestCase):
-    def setUp(self):
-        global TenantWithContainerPolicy, LeastLoadedNodeScheduler, MockObjectList
-
-        self.sys_path_save = sys.path
-        self.cwd_save = os.getcwd()
-
-        config = basic_conf = os.path.abspath(
-            os.path.dirname(os.path.realpath(__file__)) + "/test_config.yaml"
-        )
-        Config.clear()  # in case left unclean by a previous test case
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        from xossynchronizer.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
-
-        import xossynchronizer.model_policies.model_policy_tenantwithcontainer
-        from xossynchronizer.model_policies.model_policy_tenantwithcontainer import (
-            TenantWithContainerPolicy,
-            LeastLoadedNodeScheduler,
-        )
-
-        from mock_modelaccessor import MockObjectList
-
-        # import all class names to globals
-        for (
-            k,
-            v,
-        ) in xossynchronizer.model_policies.model_policy_tenantwithcontainer.model_accessor.all_model_classes.items():
-            globals()[k] = v
-
-        from xossynchronizer.modelaccessor import model_accessor
-
-        self.policy = TenantWithContainerPolicy(model_accessor=model_accessor)
-        self.user = User(email="testadmin@test.org")
-        self.tenant = TenantWithContainer(creator=self.user)
-        self.flavor = Flavor(name="m1.small")
-
-    def tearDown(self):
-        Config.clear()
-        sys.path = self.sys_path_save
-        os.chdir(self.cwd_save)
-
-    def test_manage_container_no_slices(self):
-        with patch.object(TenantWithContainer, "owner") as owner:
-            owner.slices.count.return_value = 0
-            with self.assertRaises(Exception) as e:
-                self.policy.manage_container(self.tenant)
-            self.assertEqual(e.exception.message, "The service has no slices")
-
-    def test_manage_container(self):
-        with patch.object(TenantWithContainer, "owner") as owner, patch.object(
-            TenantWithContainer, "save"
-        ) as tenant_save, patch.object(
-            Node, "site_deployment"
-        ) as site_deployment, patch.object(
-            Instance, "save"
-        ) as instance_save, patch.object(
-            Instance, "delete"
-        ) as instance_delete, patch.object(
-            TenantWithContainerPolicy, "get_image"
-        ) as get_image, patch.object(
-            LeastLoadedNodeScheduler, "pick"
-        ) as pick:
-            # setup mocks
-            node = Node(hostname="my.node.com")
-            slice = Slice(
-                name="mysite_test1", default_flavor=self.flavor, default_isolation="vm"
-            )
-            image = Image(name="trusty-server-multi-nic")
-            deployment = Deployment(name="testdeployment")
-            owner.slices.count.return_value = 1
-            owner.slices.all.return_value = [slice]
-            owner.slices.first.return_value = slice
-            get_image.return_value = image
-            pick.return_value = (node, None)
-            site_deployment.deployment = deployment
-            # done setup mocks
-
-            # call manage_container
-            self.policy.manage_container(self.tenant)
-
-            # make sure manage_container did what it is supposed to do
-            self.assertNotEqual(self.tenant.instance, None)
-            self.assertEqual(self.tenant.instance.creator.email, "testadmin@test.org")
-            self.assertEqual(self.tenant.instance.image.name, "trusty-server-multi-nic")
-            self.assertEqual(self.tenant.instance.flavor.name, "m1.small")
-            self.assertEqual(self.tenant.instance.isolation, "vm")
-            self.assertEqual(self.tenant.instance.node.hostname, "my.node.com")
-            self.assertEqual(self.tenant.instance.slice.name, "mysite_test1")
-            self.assertEqual(self.tenant.instance.parent, None)
-            instance_save.assert_called()
-            instance_delete.assert_not_called()
-            tenant_save.assert_called()
-
-    def test_manage_container_delete(self):
-        self.tenant.deleted = True
-
-        # call manage_container
-        self.policy.manage_container(self.tenant)
-
-        # make sure manage_container did what it is supposed to do
-        self.assertEqual(self.tenant.instance, None)
-
-    def test_manage_container_no_m1_small(self):
-        with patch.object(TenantWithContainer, "owner") as owner, patch.object(
-            Node, "site_deployment"
-        ) as site_deployment, patch.object(
-            Flavor, "objects"
-        ) as flavor_objects, patch.object(
-            TenantWithContainerPolicy, "get_image"
-        ) as get_image, patch.object(
-            LeastLoadedNodeScheduler, "pick"
-        ) as pick:
-            # setup mocks
-            node = Node(hostname="my.node.com")
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            image = Image(name="trusty-server-multi-nic")
-            deployment = Deployment(name="testdeployment")
-            owner.slices.count.return_value = 1
-            owner.slices.all.return_value = [slice]
-            owner.slices.first.return_value = slice
-            get_image.return_value = image
-            pick.return_value = (node, None)
-            site_deployment.deployment = deployment
-            flavor_objects.filter.return_value = []
-            # done setup mocks
-
-            with self.assertRaises(Exception) as e:
-                self.policy.manage_container(self.tenant)
-            self.assertEqual(e.exception.message, "No m1.small flavor")
-
-    def test_least_loaded_node_scheduler(self):
-        with patch.object(Node.objects, "get_items") as node_objects:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            node = Node(hostname="my.node.com", id=4567)
-            node.instances = MockObjectList(initial=[])
-            node_objects.return_value = [node]
-
-            sched = LeastLoadedNodeScheduler(slice)
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node.id)
-
-    def test_least_loaded_node_scheduler_two_nodes(self):
-        with patch.object(Node.objects, "get_items") as node_objects:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            instance1 = Instance(id=1)
-            node1 = Node(hostname="my.node.com", id=4567)
-            node1.instances = MockObjectList(initial=[])
-            node2 = Node(hostname="my.node.com", id=8910)
-            node2.instances = MockObjectList(initial=[instance1])
-            node_objects.return_value = [node1, node2]
-
-            # should pick the node with the fewest instance (node1)
-
-            sched = LeastLoadedNodeScheduler(slice)
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node1.id)
-
-    def test_least_loaded_node_scheduler_two_nodes_multi(self):
-        with patch.object(Node.objects, "get_items") as node_objects:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            instance1 = Instance(id=1)
-            instance2 = Instance(id=2)
-            instance3 = Instance(id=3)
-            node1 = Node(hostname="my.node.com", id=4567)
-            node1.instances = MockObjectList(initial=[instance2, instance3])
-            node2 = Node(hostname="my.node.com", id=8910)
-            node2.instances = MockObjectList(initial=[instance1])
-            node_objects.return_value = [node1, node2]
-
-            # should pick the node with the fewest instance (node2)
-
-            sched = LeastLoadedNodeScheduler(slice)
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node2.id)
-
-    def test_least_loaded_node_scheduler_with_label(self):
-        with patch.object(Node.objects, "get_items") as node_objects:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            instance1 = Instance(id=1)
-            node1 = Node(hostname="my.node.com", id=4567)
-            node1.instances = MockObjectList(initial=[])
-            node2 = Node(hostname="my.node.com", id=8910)
-            node2.instances = MockObjectList(initial=[instance1])
-            # Fake out the existence of a NodeLabel object. TODO: Extend the mock framework to support the model__field
-            # syntax.
-            node1.nodelabels__name = None
-            node2.nodelabels__name = "foo"
-            node_objects.return_value = [node1, node2]
-
-            # should pick the node with the label, even if it has a greater number of instances
-
-            sched = LeastLoadedNodeScheduler(slice, label="foo")
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node2.id)
-
-    def test_least_loaded_node_scheduler_create_label(self):
-        with patch.object(Node.objects, "get_items") as node_objects, patch.object(
-            NodeLabel, "save", autospec=True
-        ) as nodelabel_save, patch.object(NodeLabel, "node") as nodelabel_node_add:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            instance1 = Instance(id=1)
-            node1 = Node(hostname="my.node.com", id=4567)
-            node1.instances = MockObjectList(initial=[])
-            node2 = Node(hostname="my.node.com", id=8910)
-            node2.instances = MockObjectList(initial=[instance1])
-            # Fake out the existence of a NodeLabel object. TODO: Extend the mock framework to support the model__field
-            # syntax.
-            node1.nodelabels__name = None
-            node2.nodelabels__name = None
-            node_objects.return_value = [node1, node2]
-
-            # should pick the node with the least number of instances
-
-            sched = LeastLoadedNodeScheduler(
-                slice, label="foo", constrain_by_service_instance=True
-            )
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node1.id)
-
-            # NodeLabel should have been created and saved
-
-            self.assertEqual(nodelabel_save.call_count, 1)
-            self.assertEqual(nodelabel_save.call_args[0][0].name, "foo")
-
-            # The NodeLabel's node field should have been added to
-
-            NodeLabel.node.add.assert_called_with(node1)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_payload.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_payload.py
deleted file mode 100644
index ab861c8..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_payload.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import json
-import unittest
-from mock import patch
-import mock
-import pdb
-import networkx as nx
-
-import os
-import sys
-
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
-xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
-
-ANSIBLE_FILE = "/tmp/payload_test"
-
-log = None
-
-
-def run_fake_ansible_template(*args, **kwargs):
-    opts = args[1]
-    open(ANSIBLE_FILE, "w").write(json.dumps(opts))
-    return [{"rc": 0}]
-
-
-def run_fake_ansible_template_fail(*args, **kwargs):
-    opts = args[1]
-    open(ANSIBLE_FILE, "w").write(json.dumps(opts))
-    return [{"rc": 1}]
-
-
-def get_ansible_output():
-    ansible_str = open(ANSIBLE_FILE).read()
-    return json.loads(ansible_str)
-
-
-class TestPayload(unittest.TestCase):
-    @classmethod
-    def setUpClass(cls):
-
-        global log
-
-        config = os.path.join(test_path, "test_config.yaml")
-        from xosconfig import Config
-
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        if not log:
-            from multistructlog import create_logger
-
-            log = create_logger(Config().get("logging"))
-
-    def setUp(self):
-
-        global log, test_steps, event_loop
-
-        self.sys_path_save = sys.path
-        self.cwd_save = os.getcwd()
-
-        config = os.path.join(test_path, "test_config.yaml")
-        from xosconfig import Config
-
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        from xossynchronizer.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
-
-        os.chdir(os.path.join(test_path, ".."))  # config references xos-synchronizer-tests/model-deps
-
-        import xossynchronizer.event_loop
-
-        reload(xossynchronizer.event_loop)
-        import xossynchronizer.backend
-
-        reload(xossynchronizer.backend)
-        import test_steps.sync_instances
-        import test_steps.sync_controller_slices
-        from xossynchronizer.modelaccessor import model_accessor
-
-        # import all class names to globals
-        for (k, v) in model_accessor.all_model_classes.items():
-            globals()[k] = v
-        b = xossynchronizer.backend.Backend(model_accessor = model_accessor)
-        steps_dir = Config.get("steps_dir")
-        self.steps = b.load_sync_step_modules(steps_dir)
-        self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
-
-    def tearDown(self):
-        sys.path = self.sys_path_save
-        os.chdir(self.cwd_save)
-
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template,
-    )
-    def test_delete_record(self, mock_run_template):
-        with mock.patch.object(Instance, "save") as instance_save:
-            o = Instance()
-            o.name = "Sisi Pascal"
-
-            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor = self.synchronizer.model_accessor)
-            self.synchronizer.delete_record(o, log)
-
-            a = get_ansible_output()
-            self.assertDictContainsSubset({"delete": True, "name": o.name}, a)
-            o.save.assert_called_with(update_fields=["backend_need_reap"])
-
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template_fail,
-    )
-    def test_delete_record_fail(self, mock_run_template):
-        with mock.patch.object(Instance, "save") as instance_save:
-            o = Instance()
-            o.name = "Sisi Pascal"
-
-            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor = self.synchronizer.model_accessor)
-
-            with self.assertRaises(Exception) as e:
-                self.synchronizer.delete_record(o, log)
-
-            self.assertEqual(
-                e.exception.message, "Nonzero rc from Ansible during delete_record"
-            )
-
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template,
-    )
-    def test_sync_record(self, mock_run_template):
-        with mock.patch.object(Instance, "save") as instance_save:
-            o = Instance()
-            o.name = "Sisi Pascal"
-
-            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor = self.synchronizer.model_accessor)
-            self.synchronizer.sync_record(o, log)
-
-            a = get_ansible_output()
-            self.assertDictContainsSubset({"delete": False, "name": o.name}, a)
-            o.save.assert_called_with(
-                update_fields=[
-                    "enacted",
-                    "backend_status",
-                    "backend_register",
-                    "backend_code",
-                ]
-            )
-
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template,
-    )
-    def test_sync_cohort(self, mock_run_template):
-        with mock.patch.object(Instance, "save") as instance_save, mock.patch.object(
-            ControllerSlice, "save"
-        ) as controllerslice_save:
-            cs = ControllerSlice()
-            s = Slice(name="SP SP")
-            cs.slice = s
-
-            o = Instance()
-            o.name = "Sisi Pascal"
-            o.slice = s
-
-            cohort = [cs, o]
-            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor = self.synchronizer.model_accessor)
-            cs.synchronizer_step = test_steps.sync_controller_slices.SyncControllerSlices(
-                model_accessor = self.synchronizer.model_accessor
-            )
-
-            self.synchronizer.sync_cohort(cohort, False)
-
-            a = get_ansible_output()
-            self.assertDictContainsSubset({"delete": False, "name": o.name}, a)
-            o.save.assert_called_with(
-                update_fields=[
-                    "enacted",
-                    "backend_status",
-                    "backend_register",
-                    "backend_code",
-                ]
-            )
-            cs.save.assert_called_with(
-                update_fields=[
-                    "enacted",
-                    "backend_status",
-                    "backend_register",
-                    "backend_code",
-                ]
-            )
-
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template,
-    )
-    def test_deferred_exception(self, mock_run_template):
-        with mock.patch.object(Instance, "save") as instance_save:
-            cs = ControllerSlice()
-            s = Slice(name="SP SP")
-            cs.slice = s
-            cs.force_defer = True
-
-            o = Instance()
-            o.name = "Sisi Pascal"
-            o.slice = s
-
-            cohort = [cs, o]
-            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor=self.synchronizer.model_accessor)
-            cs.synchronizer_step = test_steps.sync_controller_slices.SyncControllerSlices(
-                model_accessor=self.synchronizer.model_accessor
-            )
-
-            self.synchronizer.sync_cohort(cohort, False)
-            o.save.assert_called_with(
-                always_update_timestamp=True,
-                update_fields=["backend_status", "backend_register"],
-            )
-            self.assertEqual(cs.backend_code, 0)
-
-            self.assertIn("Force", cs.backend_status)
-            self.assertIn("Failed due to", o.backend_status)
-
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template,
-    )
-    def test_backend_status(self, mock_run_template):
-        with mock.patch.object(Instance, "save") as instance_save:
-            cs = ControllerSlice()
-            s = Slice(name="SP SP")
-            cs.slice = s
-            cs.force_fail = True
-
-            o = Instance()
-            o.name = "Sisi Pascal"
-            o.slice = s
-
-            cohort = [cs, o]
-            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor=self.synchronizer.model_accessor)
-            cs.synchronizer_step = test_steps.sync_controller_slices.SyncControllerSlices(
-                model_accessor=self.synchronizer.model_accessor)
-
-            self.synchronizer.sync_cohort(cohort, False)
-            o.save.assert_called_with(
-                always_update_timestamp=True,
-                update_fields=["backend_status", "backend_register"],
-            )
-            self.assertIn("Force", cs.backend_status)
-            self.assertIn("Failed due to", o.backend_status)
-
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template,
-    )
-    def test_fetch_pending(self, mock_run_template):
-        pending_objects, pending_steps = self.synchronizer.fetch_pending()
-        pending_objects2 = list(pending_objects)
-
-        any_cs = next(
-            obj for obj in pending_objects if obj.leaf_model_name == "ControllerSlice"
-        )
-        any_instance = next(
-            obj for obj in pending_objects2 if obj.leaf_model_name == "Instance"
-        )
-
-        slice = Slice()
-        any_instance.slice = slice
-        any_cs.slice = slice
-
-        self.synchronizer.external_dependencies = []
-        cohorts = self.synchronizer.compute_dependent_cohorts(pending_objects, False)
-        flat_objects = [item for cohort in cohorts for item in cohort]
-
-        self.assertEqual(set(flat_objects), set(pending_objects))
-
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template,
-    )
-    def test_fetch_pending_with_external_dependencies(
-        self, mock_run_template,
-    ):
-        pending_objects, pending_steps = self.synchronizer.fetch_pending()
-        pending_objects2 = list(pending_objects)
-
-        any_cn = next(
-            obj for obj in pending_objects if obj.leaf_model_name == "ControllerNetwork"
-        )
-        any_user = next(
-            obj for obj in pending_objects2 if obj.leaf_model_name == "User"
-        )
-
-        cohorts = self.synchronizer.compute_dependent_cohorts(pending_objects, False)
-
-        flat_objects = [item for cohort in cohorts for item in cohort]
-        self.assertEqual(set(flat_objects), set(pending_objects))
-
-        # These cannot be None, but for documentation purposes
-        self.assertIsNotNone(any_cn)
-        self.assertIsNotNone(any_user)
-
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template,
-    )
-    def test_external_dependency_exception(self, mock_run_template):
-        cs = ControllerSlice()
-        s = Slice(name="SP SP")
-        cs.slice = s
-
-        o = Instance()
-        o.name = "Sisi Pascal"
-        o.slice = s
-
-        cohort = [cs, o]
-        o.synchronizer_step = None
-        o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor=self.synchronizer.model_accessor)
-
-        self.synchronizer.sync_cohort(cohort, False)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_run.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_run.py
index fdbff3c..a9c6c19 100644
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_run.py
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_run.py
@@ -86,24 +86,11 @@
         sys.path = self.sys_path_save
         os.chdir(self.cwd_save)
 
-    @mock.patch(
-        "test_steps.sync_instances.ansiblesyncstep.run_template",
-        side_effect=run_fake_ansible_template,
-    )
-    def test_run_once(self, mock_run_template):
+    def test_run_once(self):
         pending_objects, pending_steps = self.synchronizer.fetch_pending()
         pending_objects2 = list(pending_objects)
 
-        any_cs = next(
-            obj for obj in pending_objects if obj.leaf_model_name == "ControllerSlice"
-        )
-        any_instance = next(
-            obj for obj in pending_objects2 if obj.leaf_model_name == "Instance"
-        )
-
         slice = Slice()
-        any_instance.slice = slice
-        any_cs.slice = slice
 
         self.synchronizer.run_once()
 
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_scheduler.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_scheduler.py
index 0164c5a..0aa5334 100644
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_scheduler.py
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_scheduler.py
@@ -83,36 +83,6 @@
         same, t = self.synchronizer.same_object(s, t)
         self.assertFalse(same)
 
-    def test_same_object_lst(self):
-        s = Slice(pk=5)
-        t = ControllerSlice(slice=s)
-        u = ControllerSlice(slice=s)
-
-        s.controllerslices = mock_enumerator([t, u])
-
-        same, et = self.synchronizer.same_object(s.controllerslices, u)
-        self.assertTrue(same)
-        self.assertEqual(et, event_loop.PROXY_EDGE)
-
-        same, et = self.synchronizer.same_object(s.controllerslices, t)
-
-        self.assertTrue(same)
-        self.assertEqual(et, event_loop.PROXY_EDGE)
-
-    def test_same_object_lst_dc(self):
-        r = Slice(pk=4)
-        s = Slice(pk=5)
-        t = ControllerSlice(slice=r)
-        u = ControllerSlice(slice=s)
-
-        s.controllerslices = mock_enumerator([u])
-
-        same, et = self.synchronizer.same_object(s.controllerslices, t)
-        self.assertFalse(same)
-
-        same, et = self.synchronizer.same_object(s.controllerslices, u)
-        self.assertTrue(same)
-
     def test_concrete_path_no_model_path(self):
         p = Port()
         n = NetworkParameter()
@@ -120,60 +90,24 @@
         self.assertFalse(verdict)
 
     def test_concrete_no_object_path_adjacent(self):
-        p = Instance()
-        s1 = Slice()
-        s2 = Slice()
-        p.slice = s2
-        verdict, _ = self.synchronizer.concrete_path_exists(p, s1)
-
+        slice = Slice()
+        site1 = Site()
+        site2 = Site()
+        slice.site = site2
+        verdict, _ = self.synchronizer.concrete_path_exists(slice, site1)
         self.assertFalse(verdict)
 
     def test_concrete_object_path_adjacent(self):
-        p = Instance()
-        s = Slice()
-        p.slice = s
-        verdict, edge_type = self.synchronizer.concrete_path_exists(p, s)
+        slice = Slice()
+        site = Site()
+        slice.site = site
+        verdict, edge_type = self.synchronizer.concrete_path_exists(slice, site)
 
         self.assertTrue(verdict)
         self.assertEqual(edge_type, event_loop.DIRECT_EDGE)
 
-    def test_concrete_object_controller_path_adjacent(self):
-        p = Instance()
-        q = Instance()
-        cs = ControllerSlice()
-        cs2 = ControllerSlice()
-        s1 = Slice()
-        s2 = Slice()
-        p.slice = s1
-        q.slice = s2
-        cs.slice = s1
-        s1.controllerslices = mock_enumerator([cs])
-        s2.controllerslices = mock_enumerator([])
-
-        verdict1, edge_type1 = self.synchronizer.concrete_path_exists(p, cs)
-        verdict2, _ = self.synchronizer.concrete_path_exists(q, cs)
-        verdict3, _ = self.synchronizer.concrete_path_exists(p, cs2)
-
-        self.assertTrue(verdict1)
-        self.assertFalse(verdict2)
-        self.assertFalse(verdict3)
-
-        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
-        #self.assertEqual(edge_type1, event_loop.PROXY_EDGE)
-
-    def test_concrete_object_controller_path_distant(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        ct.site = t
-        p.slice = s
-        s.site = t
-        verdict = self.synchronizer.concrete_path_exists(p, ct)
-        self.assertTrue(verdict)
-
     def test_concrete_object_path_distant(self):
-        p = Instance()
+        p = ComputeServiceInstance()
         s = Slice()
         t = Site()
         p.slice = s
@@ -181,93 +115,14 @@
         verdict = self.synchronizer.concrete_path_exists(p, t)
         self.assertTrue(verdict)
 
-    def test_concrete_no_object_path_distant(self):
-        p = Instance()
-        s = Slice()
-        s.controllerslice = mock_enumerator([])
-
-        t = Site()
-        t.controllersite = mock_enumerator([])
-
-        ct = ControllerSite()
-        ct.site = Site()
-        p.slice = s
-        s.site = t
-
-        verdict, _ = self.synchronizer.concrete_path_exists(p, ct)
-        self.assertFalse(verdict)
-
     def test_cohorting_independent(self):
         i = Image()
 
         p = Slice()
-        c = Instance()
-        c.slice = None
-        c.image = None
+        c = Site()
 
         cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c], False)
         self.assertEqual(len(cohorts), 3)
 
-    def test_cohorting_related(self):
-        i = Image()
-        p = Port()
-        c = Instance()
-        c.image = i
-        s = ControllerSlice()
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s], False)
-        self.assertIn([i, c], cohorts)
-        self.assertIn([p], cohorts)
-        self.assertIn([s], cohorts)
-
-    def test_cohorting_related_multi(self):
-        i = Image()
-        p = Port()
-        c = Instance()
-        c.image = i
-        cs = ControllerSlice()
-        s = Slice()
-        cs.slice = s
-        s.controllerslices = mock_enumerator([cs])
-        c.slice = s
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s, cs], False)
-
-        big_cohort = max(cohorts, key=len)
-        self.assertGreater(big_cohort.index(c), big_cohort.index(i))
-        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
-        #self.assertGreater(big_cohort.index(cs), big_cohort.index(s))
-        self.assertIn([p], cohorts)
-
-    def test_cohorting_related_multi_delete(self):
-        i = Image()
-        p = Port()
-        c = Instance()
-        c.image = i
-        cs = ControllerSlice()
-        s = Slice()
-        cs.slice = s
-        c.slice = s
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s, cs], True)
-
-        big_cohort = max(cohorts, key=len)
-        self.assertGreater(big_cohort.index(i), big_cohort.index(c))
-        self.assertGreater(big_cohort.index(s), big_cohort.index(cs))
-        self.assertIn([p], cohorts)
-
-    def test_cohorting_related_delete(self):
-        i = Image()
-        p = Port()
-        c = Instance()
-        c.image = i
-        s = ControllerSlice()
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s], True)
-        self.assertIn([c, i], cohorts)
-        self.assertIn([p], cohorts)
-        self.assertIn([s], cohorts)
-
-
 if __name__ == "__main__":
     unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_container.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_container.py
deleted file mode 100644
index 8cbabcb..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_container.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from xossynchronizer.steps.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from xossynchronizer.steps.syncstep import DeferredException
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__), "..")
-sys.path.insert(0, parentdir)
-
-
-class SyncContainer(SyncInstanceUsingAnsible):
-    observes = "Instance"
-    template_name = "sync_container.yaml"
-
-    def __init__(self, *args, **kwargs):
-        super(SyncContainer, self).__init__(*args, **kwargs)
-
-    def fetch_pending(self, deletion=False):
-        i = self.model_accessor.Instance()
-        i.name = "Spectacular Sponge"
-        j = self.model_accessor.Instance()
-        j.name = "Spontaneous Tent"
-        k = self.model_accessor.Instance()
-        k.name = "Embarrassed Cat"
-
-        objs = [i, j, k]
-        return objs
-
-    def sync_record(self, o):
-        pass
-
-    def delete_record(self, o):
-        pass
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_images.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_images.py
deleted file mode 100644
index ef85983..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_images.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import base64
-from xossynchronizer.steps.syncstep import SyncStep
-
-class SyncControllerImages(SyncStep):
-    observes = "ControllerImages"
-    requested_interval = 0
-    playbook = "sync_controller_images.yaml"
-
-    def fetch_pending(self, deleted):
-        ci = self.model_accessor.ControllerImages()
-        i = self.model_accessor.Image()
-        i.name = "Lush Loss"
-        ci.i = i
-        return [ci]
-
-    def map_sync_inputs(self, controller_image):
-        image_fields = {
-            "endpoint": controller_image.controller.auth_url,
-            "endpoint_v3": controller_image.controller.auth_url_v3,
-            "admin_user": controller_image.controller.admin_user,
-            "admin_password": controller_image.controller.admin_password,
-            "domain": controller_image.controller.domain,
-            "name": controller_image.image.name,
-            "filepath": controller_image.image.path,
-            # name of ansible playbook
-            "ansible_tag": "%s@%s"
-            % (controller_image.image.name, controller_image.controller.name),
-        }
-
-        return image_fields
-
-    def map_sync_outputs(self, controller_image, res):
-        image_id = res[0]["id"]
-        controller_image.glance_image_id = image_id
-        controller_image.backend_status = "1 - OK"
-        controller_image.save()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_networks.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_networks.py
deleted file mode 100644
index 55dfe4e..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_networks.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import base64
-import struct
-import socket
-from netaddr import IPAddress, IPNetwork
-from xossynchronizer.steps.syncstep import SyncStep
-
-class SyncControllerNetworks(SyncStep):
-    requested_interval = 0
-    observes = "ControllerNetwork"
-    external_dependencies = ["User"]
-    playbook = "sync_controller_networks.yaml"
-
-    def fetch_pending(self, deleted):
-        ci = self.model_accessor.ControllerNetwork()
-        i = self.model_accessor.Network()
-        i.name = "Lush Loss"
-        s = self.model_accessor.Slice()
-        s.name = "Ghastly Notebook"
-        i.owner = s
-        ci.i = i
-        return [ci]
-
-    def map_sync_outputs(self, controller_network, res):
-        network_id = res[0]["network"]["id"]
-        subnet_id = res[1]["subnet"]["id"]
-        controller_network.net_id = network_id
-        controller_network.subnet = self.cidr
-        controller_network.subnet_id = subnet_id
-        controller_network.backend_status = "1 - OK"
-        if not controller_network.segmentation_id:
-            controller_network.segmentation_id = str(
-                self.get_segmentation_id(controller_network)
-            )
-        controller_network.save()
-
-    def map_sync_inputs(self, controller_network):
-        pass
-
-    def map_delete_inputs(self, controller_network):
-        network_fields = {"endpoint": None, "delete": True}
-
-        return network_fields
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_site_privileges.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_site_privileges.py
deleted file mode 100644
index e286ef8..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_site_privileges.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import base64
-import json
-from xossynchronizer.steps.syncstep import SyncStep
-
-class SyncControllerSitePrivileges(SyncStep):
-    requested_interval = 0
-    observes = "ControllerSitePrivilege"
-    playbook = "sync_controller_users.yaml"
-
-    def map_sync_inputs(self, controller_site_privilege):
-        controller_register = json.loads(
-            controller_site_privilege.controller.backend_register
-        )
-        if not controller_site_privilege.controller.admin_user:
-            return
-
-        roles = [controller_site_privilege.site_privilege.role.role]
-        # setup user home site roles at controller
-        if not controller_site_privilege.site_privilege.user.site:
-            raise Exception(
-                "Siteless user %s" % controller_site_privilege.site_privilege.user.email
-            )
-        else:
-            # look up tenant id for the user's site at the controller
-            # ctrl_site_deployments = SiteDeployment.objects.filter(
-            #  site_deployment__site=controller_site_privilege.user.site,
-            #  controller=controller_site_privilege.controller)
-
-            # if ctrl_site_deployments:
-            #    # need the correct tenant id for site at the controller
-            #    tenant_id = ctrl_site_deployments[0].tenant_id
-            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
-            user_fields = {
-                "endpoint": controller_site_privilege.controller.auth_url,
-                "endpoint_v3": controller_site_privilege.controller.auth_url_v3,
-                "domain": controller_site_privilege.controller.domain,
-                "name": controller_site_privilege.site_privilege.user.email,
-                "email": controller_site_privilege.site_privilege.user.email,
-                "password": controller_site_privilege.site_privilege.user.remote_password,
-                "admin_user": controller_site_privilege.controller.admin_user,
-                "admin_password": controller_site_privilege.controller.admin_password,
-                "ansible_tag": "%s@%s"
-                % (
-                    controller_site_privilege.site_privilege.user.email.replace(
-                        "@", "-at-"
-                    ),
-                    controller_site_privilege.controller.name,
-                ),
-                "admin_tenant": controller_site_privilege.controller.admin_tenant,
-                "roles": roles,
-                "tenant": controller_site_privilege.site_privilege.site.login_base,
-            }
-
-            return user_fields
-
-    def map_sync_outputs(self, controller_site_privilege, res):
-        # results is an array in which each element corresponds to an
-        # "ok" string received per operation. If we get as many oks as
-        # the number of operations we issued, that means a grand success.
-        # Otherwise, the number of oks tell us which operation failed.
-        controller_site_privilege.role_id = res[0]["id"]
-        controller_site_privilege.save()
-
-    def delete_record(self, controller_site_privilege):
-        controller_register = json.loads(
-            controller_site_privilege.controller.backend_register
-        )
-        if controller_register.get("disabled", False):
-            raise InnocuousException(
-                "Controller %s is disabled" % controller_site_privilege.controller.name
-            )
-
-        if controller_site_privilege.role_id:
-            driver = self.driver.admin_driver(
-                controller=controller_site_privilege.controller
-            )
-            user = ControllerUser.objects.get(
-                controller=controller_site_privilege.controller,
-                user=controller_site_privilege.site_privilege.user,
-            )
-            site = ControllerSite.objects.get(
-                controller=controller_site_privilege.controller,
-                user=controller_site_privilege.site_privilege.user,
-            )
-            driver.delete_user_role(
-                user.kuser_id,
-                site.tenant_id,
-                controller_site_privilege.site_prvilege.role.role,
-            )
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_sites.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_sites.py
deleted file mode 100644
index 24aa76f..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_sites.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import base64
-import json
-from xossynchronizer.steps.syncstep import SyncStep
-
-class SyncControllerSites(SyncStep):
-    requested_interval = 0
-    observes = "ControllerSite"
-    playbook = "sync_controller_sites.yaml"
-
-    def fetch_pending(self, deleted=False):
-        lobjs = super(SyncControllerSites, self).fetch_pending(deleted)
-
-        if not deleted:
-            # filter out objects with null controllers
-            lobjs = [x for x in lobjs if x.controller]
-
-        return lobjs
-
-    def map_sync_inputs(self, controller_site):
-        tenant_fields = {
-            "endpoint": controller_site.controller.auth_url,
-            "endpoint_v3": controller_site.controller.auth_url_v3,
-            "domain": controller_site.controller.domain,
-            "admin_user": controller_site.controller.admin_user,
-            "admin_password": controller_site.controller.admin_password,
-            "admin_tenant": controller_site.controller.admin_tenant,
-            # name of ansible playbook
-            "ansible_tag": "%s@%s"
-            % (controller_site.site.login_base, controller_site.controller.name),
-            "tenant": controller_site.site.login_base,
-            "tenant_description": controller_site.site.name,
-        }
-        return tenant_fields
-
-    def map_sync_outputs(self, controller_site, res):
-        controller_site.tenant_id = res[0]["id"]
-        controller_site.backend_status = "1 - OK"
-        controller_site.save()
-
-    def delete_record(self, controller_site):
-        controller_register = json.loads(controller_site.controller.backend_register)
-        if controller_register.get("disabled", False):
-            raise InnocuousException(
-                "Controller %s is disabled" % controller_site.controller.name
-            )
-
-        if controller_site.tenant_id:
-            driver = self.driver.admin_driver(controller=controller_site.controller)
-            driver.delete_tenant(controller_site.tenant_id)
-
-        """
-        Ansible does not support tenant deletion yet
-
-        import pdb
-        pdb.set_trace()
-        template = os_template_env.get_template('delete_controller_sites.yaml')
-        tenant_fields = {'endpoint':controller_site.controller.auth_url,
-                         'admin_user': controller_site.controller.admin_user,
-                         'admin_password': controller_site.controller.admin_password,
-                         'admin_tenant': 'admin',
-                         'ansible_tag': 'controller_sites/%s@%s'%(controller_site.controller_site.site.login_base,controller_site.controller_site.deployment.name), # name of ansible playbook
-                         'tenant': controller_site.controller_site.site.login_base,
-                         'delete': True}
-
-        rendered = template.render(tenant_fields)
-        res = run_template('sync_controller_sites.yaml', tenant_fields)
-
-        if (len(res)!=1):
-                raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
-        """
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slice_privileges.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slice_privileges.py
deleted file mode 100644
index 09b63e6..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slice_privileges.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import base64
-import json
-from xossynchronizer.steps.syncstep import SyncStep
-
-class SyncControllerSlicePrivileges(SyncStep):
-    requested_interval = 0
-    observes = "ControllerSlicePrivilege"
-    playbook = "sync_controller_users.yaml"
-
-    def map_sync_inputs(self, controller_slice_privilege):
-        if not controller_slice_privilege.controller.admin_user:
-            return
-
-        template = os_template_env.get_template("sync_controller_users.yaml")
-        roles = [controller_slice_privilege.slice_privilege.role.role]
-        # setup user home slice roles at controller
-        if not controller_slice_privilege.slice_privilege.user.site:
-            raise Exception(
-                "Sliceless user %s"
-                % controller_slice_privilege.slice_privilege.user.email
-            )
-        else:
-            user_fields = {
-                "endpoint": controller_slice_privilege.controller.auth_url,
-                "endpoint_v3": controller_slice_privilege.controller.auth_url_v3,
-                "domain": controller_slice_privilege.controller.domain,
-                "name": controller_slice_privilege.slice_privilege.user.email,
-                "email": controller_slice_privilege.slice_privilege.user.email,
-                "password": controller_slice_privilege.slice_privilege.user.remote_password,
-                "admin_user": controller_slice_privilege.controller.admin_user,
-                "admin_password": controller_slice_privilege.controller.admin_password,
-                "ansible_tag": "%s@%s@%s"
-                % (
-                    controller_slice_privilege.slice_privilege.user.email.replace(
-                        "@", "-at-"
-                    ),
-                    controller_slice_privilege.slice_privilege.slice.name,
-                    controller_slice_privilege.controller.name,
-                ),
-                "admin_tenant": controller_slice_privilege.controller.admin_tenant,
-                "roles": roles,
-                "tenant": controller_slice_privilege.slice_privilege.slice.name,
-            }
-            return user_fields
-
-    def map_sync_outputs(self, controller_slice_privilege, res):
-        controller_slice_privilege.role_id = res[0]["id"]
-        controller_slice_privilege.save()
-
-    def delete_record(self, controller_slice_privilege):
-        controller_register = json.loads(
-            controller_slice_privilege.controller.backend_register
-        )
-        if controller_register.get("disabled", False):
-            raise InnocuousException(
-                "Controller %s is disabled" % controller_slice_privilege.controller.name
-            )
-
-        if controller_slice_privilege.role_id:
-            driver = self.driver.admin_driver(
-                controller=controller_slice_privilege.controller
-            )
-            user = ControllerUser.objects.filter(
-                controller_id=controller_slice_privilege.controller.id,
-                user_id=controller_slice_privilege.slice_privilege.user.id,
-            )
-            user = user[0]
-            slice = ControllerSlice.objects.filter(
-                controller_id=controller_slice_privilege.controller.id,
-                user_id=controller_slice_privilege.slice_privilege.user.id,
-            )
-            slice = slice[0]
-            driver.delete_user_role(
-                user.kuser_id,
-                slice.tenant_id,
-                controller_slice_privilege.slice_prvilege.role.role,
-            )
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slices.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slices.py
deleted file mode 100644
index 31c62f1..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slices.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import base64
-from xossynchronizer.steps.syncstep import DeferredException
-from xossynchronizer.steps.ansiblesyncstep import AnsibleSyncStep
-
-class SyncControllerSlices(AnsibleSyncStep):
-    requested_interval = 0
-    observes = "ControllerSlice"
-    playbook = "sync_controller_slices.yaml"
-
-    def map_sync_inputs(self, controller_slice):
-        if getattr(controller_slice, "force_fail", None):
-            raise Exception("Forced failure")
-        elif getattr(controller_slice, "force_defer", None):
-            raise DeferredException("Forced defer")
-
-        tenant_fields = {"endpoint": "endpoint", "name": "Flagrant Haircut"}
-
-        return tenant_fields
-
-    def map_sync_outputs(self, controller_slice, res):
-        controller_slice.save()
-
-    def map_delete_inputs(self, controller_slice):
-        tenant_fields = {
-            "endpoint": "endpoint",
-            "name": "Conscientious Plastic",
-            "delete": True,
-        }
-        return tenant_fields
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_users.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_users.py
deleted file mode 100644
index a039257..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_users.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import base64
-from xossynchronizer.steps.syncstep import SyncStep
-
-
-class SyncControllerUsers(SyncStep):
-    requested_interval = 0
-    observes = "ControllerUser"
-    playbook = "sync_controller_users.yaml"
-
-    def map_sync_inputs(self, controller_user):
-        if not controller_user.controller.admin_user:
-            return
-
-        # All users will have at least the 'user' role at their home site/tenant.
-        # We must also check if the user should have the admin role
-
-        roles = ["user"]
-        if controller_user.user.is_admin:
-            driver = self.driver.admin_driver(controller=controller_user.controller)
-            roles.append(driver.get_admin_role().name)
-
-        # setup user home site roles at controller
-        if not controller_user.user.site:
-            raise Exception("Siteless user %s" % controller_user.user.email)
-        else:
-            user_fields = {
-                "endpoint": controller_user.controller.auth_url,
-                "endpoint_v3": controller_user.controller.auth_url_v3,
-                "domain": controller_user.controller.domain,
-                "name": controller_user.user.email,
-                "email": controller_user.user.email,
-                "password": controller_user.user.remote_password,
-                "admin_user": controller_user.controller.admin_user,
-                "admin_password": controller_user.controller.admin_password,
-                "ansible_tag": "%s@%s"
-                % (
-                    controller_user.user.email.replace("@", "-at-"),
-                    controller_user.controller.name,
-                ),
-                "admin_project": controller_user.controller.admin_tenant,
-                "roles": roles,
-                "project": controller_user.user.site.login_base,
-            }
-            return user_fields
-
-    def map_sync_outputs(self, controller_user, res):
-        controller_user.kuser_id = res[0]["user"]["id"]
-        controller_user.backend_status = "1 - OK"
-        controller_user.save()
-
-    def delete_record(self, controller_user):
-        if controller_user.kuser_id:
-            driver = self.driver.admin_driver(controller=controller_user.controller)
-            driver.delete_user(controller_user.kuser_id)
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_instances.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_instances.py
deleted file mode 100644
index 1a70884..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_instances.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-from xossynchronizer.steps import ansiblesyncstep
-
-
-def escape(s):
-    s = s.replace("\n", r"\n").replace('"', r"\"")
-    return s
-
-
-class SyncInstances(ansiblesyncstep.AnsibleSyncStep):
-    requested_interval = 0
-    # This observes is intentionally a list of one string, to test steps where observes is a list of strings.
-    observes = ["Instance"]
-    playbook = "sync_instances.yaml"
-
-    def fetch_pending(self, deletion=False):
-        objs = super(SyncInstances, self).fetch_pending(deletion)
-        objs = [x for x in objs if x.isolation == "vm"]
-        return objs
-
-    def map_sync_inputs(self, instance):
-        inputs = {}
-        metadata_update = {}
-
-        fields = {"name": instance.name, "delete": False}
-        return fields
-
-    def map_sync_outputs(self, instance, res):
-        instance.save()
-
-    def map_delete_inputs(self, instance):
-        input = {
-            "endpoint": "endpoint",
-            "admin_user": "admin_user",
-            "admin_password": "admin_password",
-            "project_name": "project_name",
-            "tenant": "tenant",
-            "tenant_description": "tenant_description",
-            "name": instance.name,
-            "ansible_tag": "ansible_tag",
-            "delete": True,
-        }
-
-        return input
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_roles.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_roles.py
deleted file mode 100644
index 1bd2d0f..0000000
--- a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_roles.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import base64
-from xossynchronizer.steps.syncstep import SyncStep
-from mock_modelaccessor import *
-
-
-class SyncRoles(SyncStep):
-    requested_interval = 0
-
-    # This observes is intentionally a list of three classes, to test steps where observes is a list of classes.
-    observes = [SiteRole, SliceRole, ControllerRole]
-
-    def sync_record(self, role):
-        if not role.enacted:
-            controllers = Controller.objects.all()
-            for controller in controllers:
-                driver = self.driver.admin_driver(controller=controller)
-                driver.create_role(role.role)
-            role.save()
diff --git a/lib/xos-synchronizer/xossynchronizer/model_policies/model_policy_tenantwithcontainer.py b/lib/xos-synchronizer/xossynchronizer/model_policies/model_policy_tenantwithcontainer.py
index 66ac348..3db1395 100644
--- a/lib/xos-synchronizer/xossynchronizer/model_policies/model_policy_tenantwithcontainer.py
+++ b/lib/xos-synchronizer/xossynchronizer/model_policies/model_policy_tenantwithcontainer.py
@@ -292,7 +292,6 @@
                 assert node is not None
                 assert desired_image is not None
                 assert tenant.creator is not None
-                assert node.site_deployment.deployment is not None
                 assert flavor is not None
 
                 try:
@@ -301,7 +300,6 @@
                         node=node,
                         image=desired_image,
                         creator=tenant.creator,
-                        deployment=node.site_deployment.deployment,
                         flavor=flavor,
                         isolation=slice.default_isolation,
                         parent=parent,
diff --git a/lib/xos-synchronizer/xossynchronizer/synchronizer.py b/lib/xos-synchronizer/xossynchronizer/synchronizer.py
index 30d3ca0..f204785 100644
--- a/lib/xos-synchronizer/xossynchronizer/synchronizer.py
+++ b/lib/xos-synchronizer/xossynchronizer/synchronizer.py
@@ -33,8 +33,7 @@
         wait = False
         while not models_active:
             try:
-                _i = self.model_accessor.Instance.objects.first()
-                _n = self.model_accessor.NetworkTemplate.objects.first()
+                _i = self.model_accessor.Site.objects.first()
                 models_active = True
             except Exception as e:
                 self.log.info("Exception", e=e)
diff --git a/xos/core/migrations/0004_auto_20190304_2050.py b/xos/core/migrations/0004_auto_20190304_2050.py
new file mode 100644
index 0000000..235a688
--- /dev/null
+++ b/xos/core/migrations/0004_auto_20190304_2050.py
@@ -0,0 +1,345 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# -*- coding: utf-8 -*-
+# Generated by Django 1.11.11 on 2019-03-05 01:50
+from __future__ import unicode_literals
+
+import core.models.xosbase_header
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('core', '0003_auto_20190304_1358'),
+    ]
+
+    operations = [
+        migrations.RemoveField(
+            model_name='controller_decl',
+            name='deployment',
+        ),
+        migrations.AlterUniqueTogether(
+            name='controllerimages_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='controllerimages_decl',
+            name='controller',
+        ),
+        migrations.RemoveField(
+            model_name='controllerimages_decl',
+            name='image',
+        ),
+        migrations.AlterUniqueTogether(
+            name='controllernetwork_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='controllernetwork_decl',
+            name='controller',
+        ),
+        migrations.RemoveField(
+            model_name='controllernetwork_decl',
+            name='network',
+        ),
+        migrations.DeleteModel(
+            name='ControllerRole_decl',
+        ),
+        migrations.AlterUniqueTogether(
+            name='controllersite_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='controllersite_decl',
+            name='controller',
+        ),
+        migrations.RemoveField(
+            model_name='controllersite_decl',
+            name='site',
+        ),
+        migrations.AlterUniqueTogether(
+            name='controllersiteprivilege_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='controllersiteprivilege_decl',
+            name='controller',
+        ),
+        migrations.RemoveField(
+            model_name='controllersiteprivilege_decl',
+            name='site_privilege',
+        ),
+        migrations.AlterUniqueTogether(
+            name='controllerslice_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='controllerslice_decl',
+            name='controller',
+        ),
+        migrations.RemoveField(
+            model_name='controllerslice_decl',
+            name='slice',
+        ),
+        migrations.AlterUniqueTogether(
+            name='controllersliceprivilege_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='controllersliceprivilege_decl',
+            name='controller',
+        ),
+        migrations.RemoveField(
+            model_name='controllersliceprivilege_decl',
+            name='slice_privilege',
+        ),
+        migrations.AlterUniqueTogether(
+            name='controlleruser_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='controlleruser_decl',
+            name='controller',
+        ),
+        migrations.RemoveField(
+            model_name='controlleruser_decl',
+            name='user',
+        ),
+        migrations.AlterUniqueTogether(
+            name='imagedeployments_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='imagedeployments_decl',
+            name='deployment',
+        ),
+        migrations.RemoveField(
+            model_name='imagedeployments_decl',
+            name='image',
+        ),
+        migrations.RemoveField(
+            model_name='instance_decl',
+            name='creator',
+        ),
+        migrations.RemoveField(
+            model_name='instance_decl',
+            name='deployment',
+        ),
+        migrations.RemoveField(
+            model_name='instance_decl',
+            name='flavor',
+        ),
+        migrations.RemoveField(
+            model_name='instance_decl',
+            name='image',
+        ),
+        migrations.RemoveField(
+            model_name='instance_decl',
+            name='node',
+        ),
+        migrations.RemoveField(
+            model_name='instance_decl',
+            name='parent',
+        ),
+        migrations.RemoveField(
+            model_name='instance_decl',
+            name='slice',
+        ),
+        migrations.AlterUniqueTogether(
+            name='sitedeployment_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='sitedeployment_decl',
+            name='controller',
+        ),
+        migrations.RemoveField(
+            model_name='sitedeployment_decl',
+            name='deployment',
+        ),
+        migrations.RemoveField(
+            model_name='sitedeployment_decl',
+            name='site',
+        ),
+        migrations.RemoveField(
+            model_name='siteprivilege_decl',
+            name='role',
+        ),
+        migrations.RemoveField(
+            model_name='siteprivilege_decl',
+            name='site',
+        ),
+        migrations.RemoveField(
+            model_name='siteprivilege_decl',
+            name='user',
+        ),
+        migrations.AlterUniqueTogether(
+            name='sliceprivilege_decl',
+            unique_together=set([]),
+        ),
+        migrations.RemoveField(
+            model_name='sliceprivilege_decl',
+            name='role',
+        ),
+        migrations.RemoveField(
+            model_name='sliceprivilege_decl',
+            name='slice',
+        ),
+        migrations.RemoveField(
+            model_name='sliceprivilege_decl',
+            name='user',
+        ),
+        migrations.RemoveField(
+            model_name='tenantwithcontainer_decl',
+            name='creator',
+        ),
+        migrations.RemoveField(
+            model_name='tenantwithcontainer_decl',
+            name='instance',
+        ),
+        migrations.RemoveField(
+            model_name='tenantwithcontainer_decl',
+            name='serviceinstance_decl_ptr',
+        ),
+        migrations.DeleteModel(
+            name='Controller',
+        ),
+        migrations.DeleteModel(
+            name='ControllerImages',
+        ),
+        migrations.DeleteModel(
+            name='ControllerNetwork',
+        ),
+        migrations.DeleteModel(
+            name='ControllerRole',
+        ),
+        migrations.DeleteModel(
+            name='ControllerSite',
+        ),
+        migrations.DeleteModel(
+            name='ControllerSitePrivilege',
+        ),
+        migrations.DeleteModel(
+            name='ControllerSlice',
+        ),
+        migrations.DeleteModel(
+            name='ControllerSlicePrivilege',
+        ),
+        migrations.DeleteModel(
+            name='ControllerUser',
+        ),
+        migrations.DeleteModel(
+            name='Deployment',
+        ),
+        migrations.DeleteModel(
+            name='ImageDeployments',
+        ),
+#        migrations.DeleteModel(
+#            name='Instance',
+#        ),
+#        migrations.DeleteModel(
+#            name='SiteDeployment',
+#        ),
+        migrations.DeleteModel(
+            name='SitePrivilege',
+        ),
+        migrations.DeleteModel(
+            name='SiteRole',
+        ),
+        migrations.DeleteModel(
+            name='SlicePrivilege',
+        ),
+        migrations.DeleteModel(
+            name='SliceRole',
+        ),
+        migrations.DeleteModel(
+            name='TenantWithContainer',
+        ),
+        migrations.RemoveField(
+            model_name='node_decl',
+            name='site_deployment',
+        ),
+        migrations.RemoveField(
+            model_name='privilege_decl',
+            name='controller_id',
+        ),
+        migrations.AddField(
+            model_name='node_decl',
+            name='site',
+            field=models.ForeignKey(default=core.models.xosbase_header.get_first_site, on_delete=django.db.models.deletion.CASCADE, related_name='nodes', to='core.Site'),
+        ),
+        # NOTE: Had to manually move AlterUniqueTogether before RemoveField
+        migrations.AlterUniqueTogether(
+            name='port_decl',
+            unique_together=set([('service_instance', 'network')]),
+        ),
+        migrations.RemoveField(
+            model_name='port_decl',
+            name='instance',
+        ),
+        migrations.DeleteModel(
+            name='Controller_decl',
+        ),
+        migrations.DeleteModel(
+            name='ControllerImages_decl',
+        ),
+        migrations.DeleteModel(
+            name='ControllerNetwork_decl',
+        ),
+        migrations.DeleteModel(
+            name='ControllerSite_decl',
+        ),
+        migrations.DeleteModel(
+            name='ControllerSitePrivilege_decl',
+        ),
+        migrations.DeleteModel(
+            name='ControllerSlice_decl',
+        ),
+        migrations.DeleteModel(
+            name='ControllerSlicePrivilege_decl',
+        ),
+        migrations.DeleteModel(
+            name='ControllerUser_decl',
+        ),
+        migrations.DeleteModel(
+            name='Deployment_decl',
+        ),
+        migrations.DeleteModel(
+            name='ImageDeployments_decl',
+        ),
+#        migrations.DeleteModel(
+#            name='SiteDeployment_decl',
+#        ),
+        migrations.DeleteModel(
+            name='SitePrivilege_decl',
+        ),
+        migrations.DeleteModel(
+            name='SiteRole_decl',
+        ),
+        migrations.DeleteModel(
+            name='SlicePrivilege_decl',
+        ),
+        migrations.DeleteModel(
+            name='SliceRole_decl',
+        ),
+        migrations.DeleteModel(
+            name='TenantWithContainer_decl',
+        ),
+#        migrations.DeleteModel(
+#            name='Instance_decl',
+#        ),
+    ]
diff --git a/xos/core/migrations/0005_delete_models.py b/xos/core/migrations/0005_delete_models.py
new file mode 100644
index 0000000..cf80c9d
--- /dev/null
+++ b/xos/core/migrations/0005_delete_models.py
@@ -0,0 +1,43 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# -*- coding: utf-8 -*-
+# Generated by Django 1.11.11 on 2019-03-05 01:50
+from __future__ import unicode_literals
+
+import core.models.xosbase_header
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('core', '0004_auto_20190304_2050'),
+    ]
+
+    operations = [
+        migrations.DeleteModel(
+            name='Instance',
+        ),
+        migrations.DeleteModel(
+            name='SiteDeployment',
+        ),
+        migrations.DeleteModel(
+            name='SiteDeployment_decl',
+        ),
+        migrations.DeleteModel(
+            name='Instance_decl',
+        ),
+    ]
diff --git a/xos/core/models/core.xproto b/xos/core/models/core.xproto
index fbb945e..3c0d63b 100644
--- a/xos/core/models/core.xproto
+++ b/xos/core/models/core.xproto
@@ -102,7 +102,6 @@
 message Privilege::grant_policy (XOSBase) {
      required int32 accessor_id = 1 [null = False, blank=False];
      required string accessor_type = 2 [null = False, max_length=1024, blank = False];
-     optional int32 controller_id = 3 [null = True, blank = True];
      required int32 object_id = 4 [null = False, blank=False];
      required string object_type = 5 [null = False, max_length=1024, blank = False];
      required string permission = 6 [null = False, default = "all", max_length=1024, tosca_key=True];
@@ -125,36 +124,6 @@
      required manytoone image->Image:computeserviceinstances = 2:1001 [db_index = True, null = False, blank = False, help_text = "Image used to instantiate this ServiceInstance"];
 }
 
-// Admins at a deployment have access to controllers at those deployments
-policy controller_policy
-       < ctx.user.is_admin
-           | exists Privilege:
-                  Privilege.accessor_id = ctx.user.id
-                  & Privilege.object_type = "Deployment"
-                  & Privilege.permission = "role:admin"
-                  & Privilege.object_id = obj.id >
-
-message Controller::controller_policy (XOSBase) {
-     required string name = 1 [max_length = 200, content_type = "stripped", blank = False, help_text = "Name of the Controller", null = False, db_index = False, unique = True];
-     required string backend_type = 2 [max_length = 200, content_type = "stripped", blank = False, help_text = "Type of compute controller, e.g. EC2, OpenStack, or OpenStack version", null = False, db_index = False];
-     required string version = 3 [max_length = 200, content_type = "stripped", blank = False, help_text = "Controller version", null = False, db_index = False];
-     optional string auth_url = 4 [max_length = 200, content_type = "stripped", blank = True, help_text = "Auth url for the compute controller", null = True, db_index = False];
-     optional string admin_user = 5 [max_length = 200, content_type = "stripped", blank = True, help_text = "Username of an admin user at this controller", null = True, db_index = False];
-     optional string admin_password = 6 [max_length = 200, content_type = "stripped", blank = True, help_text = "Password of theadmin user at this controller", null = True, db_index = False];
-     optional string admin_tenant = 7 [max_length = 200, content_type = "stripped", blank = True, help_text = "Name of the tenant the admin user belongs to", null = True, db_index = False];
-     optional string domain = 8 [max_length = 200, content_type = "stripped", blank = True, help_text = "Name of the domain this controller belongs to", null = True, db_index = False];
-     optional string rabbit_host = 9 [max_length = 200, content_type = "stripped", blank = True, help_text = "IP address of rabbitmq server at this controller", null = True, db_index = False];
-     optional string rabbit_user = 10 [max_length = 200, content_type = "stripped", blank = True, help_text = "Username of rabbitmq server at this controller", null = True, db_index = False];
-     optional string rabbit_password = 11 [max_length = 200, content_type = "stripped", blank = True, help_text = "Password of rabbitmq server at this controller", null = True, db_index = False];
-     required manytoone deployment->Deployment:controllerdeployments = 12:1001 [db_index = True, null = False, blank = False];
-}
-
-message ControllerImages (XOSBase) {
-     required manytoone image->Image:controllerimages = 1:1002 [db_index = True, null = False, blank = False, unique_with = "controller"];
-     required manytoone controller->Controller:controllerimages = 2:1001 [db_index = True, null = False, blank = False];
-     optional string glance_image_id = 3 [max_length = 200, content_type = "stripped", blank = True, help_text = "Glance image id", null = True, db_index = False];
-}
-
 // Everyone has read access
 // For write access, you have to be a site_admin
 
@@ -190,77 +159,6 @@
                  & Privilege.permission = "role:admin")
                 ) >
 
-policy controller_network_policy <
-         ctx.user.is_admin
-         | *slice_policy(network.owner) >
-
-
-message ControllerNetwork::controller_network_policy (XOSBase) {
-     required manytoone network->Network:controllernetworks = 1:1001 [db_index = True, null = False, blank = False, unique_with = "controller"];
-     required manytoone controller->Controller:controllernetworks = 2:1002 [db_index = True, null = False, blank = False];
-     required string subnet = 3 [db_index = False, max_length = 32, null = False, blank = True];
-     required string start_ip = 4 [db_index = False, max_length = 32, null = False, blank = True];
-     required string stop_ip = 5 [db_index = False, max_length = 32, null = False, blank = True];
-     optional string net_id = 6 [help_text = "Neutron network", max_length = 256, null = True, db_index = False, blank = True];
-     optional string router_id = 7 [help_text = "Neutron router id", max_length = 256, null = True, db_index = False, blank = True];
-     optional string subnet_id = 8 [help_text = "Neutron subnet id", max_length = 256, null = True, db_index = False, blank = True];
-     optional string gateway = 9 [db_index = False, max_length = 32, null = True, blank = True];
-     optional string segmentation_id = 10 [db_index = False, max_length = 32, null = True, blank = True];
-}
-
-message ControllerRole (XOSBase) {
-     required string role = 1 [choices = "(('admin', 'Admin'),)", max_length = 30, content_type = "stripped", blank = False, null = False, db_index = False];
-}
-
-message ControllerSite (XOSBase) {
-     required manytoone site->Site:controllersite = 1:1002 [db_index = True, null = False, blank = False, unique_with="controller", tosca_key = True];
-     optional manytoone controller->Controller:controllersite = 2:1003 [db_index = True, null = True, blank = True, tosca_key = True];
-     optional string tenant_id = 3 [max_length = 200, content_type = "stripped", blank = True, help_text = "Keystone tenant id", null = True, db_index = True];
-}
-
-message ControllerSitePrivilege (XOSBase) {
-     required manytoone controller->Controller:controllersiteprivileges = 1:1004 [db_index = True, null = False, blank = False, unique_with = "site_privilege"];
-     required manytoone site_privilege->SitePrivilege:controllersiteprivileges = 2:1001 [db_index = True, null = False, blank = False, unique_with = "role_id"];
-     optional string role_id = 3 [max_length = 200, content_type = "stripped", blank = True, help_text = "Keystone id", null = True, db_index = True];
-}
-
-policy controller_slice_policy <
-         ctx.user.is_admin
-         | *slice_policy(slice) >
-
-message ControllerSlice::controller_slice_policy (XOSBase) {
-     required manytoone controller->Controller:controllerslices = 1:1005 [db_index = True, null = False, blank = False, unique_with = "slice"];
-     required manytoone slice->Slice:controllerslices = 2:1002 [db_index = True, null = False, blank = False];
-     optional string tenant_id = 3 [max_length = 200, content_type = "stripped", blank = True, help_text = "Keystone tenant id", null = True, db_index = False];
-}
-
-message ControllerSlicePrivilege (XOSBase) {
-     required manytoone controller->Controller:controllersliceprivileges = 1:1006 [db_index = True, null = False, blank = False, unique_with = "slice_privilege"];
-     required manytoone slice_privilege->SlicePrivilege:controllersliceprivileges = 2:1001 [db_index = True, null = False, blank = False];
-     optional string role_id = 3 [max_length = 200, content_type = "stripped", blank = True, help_text = "Keystone id", null = True, db_index = True];
-}
-
-policy controller_user_policy <
-         ctx.user.is_admin
-         | (ctx.read_access & *user_policy(user)) >
-
-message ControllerUser::controller_user_policy (XOSBase) {
-     required manytoone user->User:controllerusers = 1:1001 [db_index = True, null = False, blank = False];
-     required manytoone controller->Controller:controllersusers = 2:1007 [db_index = True, null = False, blank = False, unique_with = "user"];
-     optional string kuser_id = 3 [max_length = 200, content_type = "stripped", blank = True, help_text = "Keystone user id", null = True, db_index = False];
-}
-
-// Everyone has read access
-// For write access you need admin privileges at that deployment
-policy deployment_policy <
-         ctx.user.is_admin
-         | (ctx.write_access -> exists Privilege: Privilege.object_type = "Deployment" & Privilege.object_id = obj.id & Privilege.accessor_id = ctx.user.id & Privilege.permission = "role:admin") >
-
-message Deployment::deployment_policy (XOSBase) {
-     required string name = 1 [max_length = 200, content_type = "stripped", blank = False, help_text = "Name of the Deployment", null = False, db_index = False, unique = True];
-     required string accessControl = 2 [default = "allow all", max_length = 200, blank = False, help_text = "Access control list that specifies which sites/users may use nodes in this deployment", null = False, db_index = False, varchar = True];
-}
-
 
 message Flavor (XOSBase) {
      required string name = 1 [max_length = 32, content_type = "stripped", blank = False, help_text = "name of this flavor, as displayed to users", null = False, db_index = False, unique = True];
@@ -278,45 +176,6 @@
      optional string tag = 6 [max_length = 256, content_type = "stripped", blank = True, help_text = "For Docker Images, tag of image", null = True, db_index = False];
 }
 
-policy image_deployment_policy <
-        *deployment_policy(deployment)
->
-
-message ImageDeployments (XOSBase) {
-     required manytoone image->Image:imagedeployments = 1:1003 [db_index = True, null = False, blank = False, unique_with = "deployment"];
-     required manytoone deployment->Deployment:imagedeployments = 2:1002 [db_index = True, null = False, blank = False];
-}
-
-policy instance_creator < obj.creator >
-policy instance_isolation < (obj.isolation = "container" | obj.isolation = "container_vm" ) -> (obj.image.kind = "container") >
-policy instance_isolation_container_vm_parent < (obj.isolation = "container_vm") -> obj.parent >
-policy instance_parent_isolation_container_vm < obj.parent -> ( obj.isolation = "container_vm" ) >
-policy instance_isolation_vm < (obj.isolation = "vm") -> (obj.image.kind = "vm") >
-policy instance_creator_privilege < not (obj.slice.creator = obj.creator) -> exists Privilege:Privilege.object_id = obj.slice.id & Privilege.accessor_id = obj.creator.id & Privilege.object_type = "Slice" >
-
-policy instance_policy < *slice_policy(slice) >
-
-message Instance::instance_policy (XOSBase) {
-     option validators = "instance_creator:Instance has no creator, instance_isolation: Container instance {obj.name} must use container image, instance_isolation_container_vm_parent:Container-vm instance {obj.name} must have a parent, instance_parent_isolation_container_vm:Parent field can only be set on Container-vm instances ({obj.name}), instance_isolation_vm: VM Instance {obj.name} must use VM image, instance_creator_privilege: instance creator has no privileges on slice";
-     optional string instance_id = 1 [max_length = 200, content_type = "stripped", blank = True, help_text = "Nova instance id", null = True, db_index = False, feedback_state = True];
-     optional string instance_uuid = 2 [max_length = 200, content_type = "stripped", blank = True, help_text = "Nova instance uuid", null = True, db_index = False, feedback_state = True];
-     required string name = 3 [max_length = 200, content_type = "stripped", blank = False, help_text = "Instance name", null = False, db_index = False];
-     optional string instance_name = 4 [max_length = 200, content_type = "stripped", blank = True, help_text = "OpenStack generated name", null = True, db_index = False, feedback_state = True];
-     optional string ip = 5 [max_length = 39, content_type = "ip", blank = True, help_text = "Instance ip address", null = True, db_index = False, gui_hidden = True];
-     required manytoone image->Image:instances = 6:1004 [db_index = True, null = False, blank = False];
-     optional manytoone creator->User:instances = 7:1002 [db_index = True, null = True, blank = True];
-     required manytoone slice->Slice:instances = 8:1003 [db_index = True, null = False, blank = False];
-     required manytoone deployment->Deployment:instance_deployment = 9:1003 [db_index = True, null = False, blank = False];
-     required manytoone node->Node:instances = 10:1001 [db_index = True, null = False, blank = False];
-     required int32 numberCores = 11 [help_text = "Number of cores for instance", default = 0, null = False, db_index = False, blank = False];
-     required manytoone flavor->Flavor:instance = 12:1001 [help_text = "Flavor of this instance", null = False, db_index = True, blank = False];
-     optional string userData = 13 [help_text = "user_data passed to instance during creation", null = True, db_index = False, blank = True, varchar = True];
-     required string isolation = 14 [default = "vm", choices = "(('vm', 'Virtual Machine'), ('container', 'Container'), ('container_vm', 'Container In VM'))", max_length = 30, blank = False, null = False, db_index = False];
-     optional string volumes = 15 [help_text = "Comma-separated list of directories to expose to parent context", null = True, db_index = False, blank = True];
-     optional manytoone parent->Instance:instance = 16:1001 [help_text = "Parent Instance for containers nested inside of VMs", null = True, db_index = True, blank = True];
-}
-
-
 policy network_policy < *slice_policy(owner) >
 
 message Network::network_policy (XOSBase) {
@@ -369,26 +228,23 @@
      optional string vtn_kind = 11 [default = "PRIVATE", choices = "(('PRIVATE', 'Private'), ('PUBLIC', 'Public'), ('MANAGEMENT_LOCAL', 'Management Local'), ('MANAGEMENT_HOST', 'Management Host'), ('VSG', 'VSG'), ('ACCESS_AGENT', 'Access Agent'), ('FLAT', 'Flat'))", max_length = 30, blank = True, null = True, db_index = False];
 }
 
-policy node_policy < *site_policy(site_deployment.site) >
-
-message Node::node_policy (XOSBase) {
+message Node (XOSBase) {
      required string name = 1 [max_length = 200, content_type = "stripped", blank = False, help_text = "Name of the Node", null = False, db_index = False, unique = True];
-     required manytoone site_deployment->SiteDeployment:nodes = 2:1001 [db_index = True, null = False, blank = False];
      optional string bridgeId = 3 [max_length = 200, content_type = "stripped", blank = True, help_text = "Bridge Id", null = True, db_index = False];
      optional string dataPlaneIntf = 4 [max_length = 200, content_type = "stripped", blank = True, help_text = "Dataplane Interface", null = True, db_index = False];
      optional string dataPlaneIp = 5 [max_length = 200, content_type = "stripped", blank = True, help_text = "Dataplane Ip", null = True, db_index = False];
      optional string hostManagementIface = 6 [max_length = 200, content_type = "stripped", blank = True, help_text = "Host Management Interface", null = True, db_index = False];
+     required manytoone site->Site:nodes = 7:1006 [db_index = True, null = False, blank = False, default=get_first_site];
 }
 message NodeLabel (XOSBase) {
      required string name = 1 [max_length = 200, content_type = "stripped", blank = False, help_text = "label name", null = False, db_index = False, unique = True];
      required manytomany node->Node/NodeLabel_node:nodelabels = 2:1002 [db_index = False, blank = True];
 }
 
-policy port_policy < *instance_policy(instance) & *network_policy(network) >
+policy port_policy < *network_policy(network) >
 
 message Port::port_policy (XOSBase) {
-     required manytoone network->Network:links = 1:1003 [db_index = True, null = False, blank = False, unique_with = "instance", help_text = "Network bound to this port"];
-     optional manytoone instance->Instance:ports = 2:1002 [db_index = True, null = True, blank = True, help_text = "Instance bound to this port"];
+     required manytoone network->Network:links = 1:1003 [db_index = True, null = False, blank = False, unique_with = "service_instance", help_text = "Network bound to this port"];
      optional string ip = 3 [max_length = 39, content_type = "ip", blank = True, help_text = "Instance ip address", null = True, db_index = False];
      optional string port_id = 4 [help_text = "Neutron port id", max_length = 256, null = True, db_index = False, blank = True];
      optional string mac = 5 [help_text = "MAC address associated with this port", max_length = 256, null = True, db_index = False, blank = True];
@@ -585,25 +441,6 @@
      required string abbreviated_name = 10 [db_index = False, max_length = 80, null = False, content_type = "stripped", blank = False];
 }
 
-
-message SiteDeployment (XOSBase) {
-     required manytoone site->Site:sitedeployments = 1:1003 [db_index = True, null = False, blank = False, unique_with = "deployment", tosca_key=True];
-     required manytoone deployment->Deployment:sitedeployments = 2:1004 [db_index = True, null = False, blank = False, unique_with = "controller", tosca_key=True];
-     optional manytoone controller->Controller:sitedeployments = 3:1008 [db_index = True, null = True, blank = True];
-     optional string availability_zone = 4 [max_length = 200, content_type = "stripped", blank = True, help_text = "OpenStack availability zone", null = True, db_index = False];
-}
-
-message SitePrivilege (XOSBase) {
-     required manytoone user->User:siteprivileges = 1:1003 [db_index = True, null = False, blank = False];
-     required manytoone site->Site:siteprivileges = 2:1004 [db_index = True, null = False, blank = False, tosca_key=True];
-     required manytoone role->SiteRole:siteprivileges = 3:1001 [db_index = True, null = False, blank = False, tosca_key=True];
-}
-
-
-message SiteRole (XOSBase) {
-     required string role = 1 [choices = "(('admin', 'Admin'), ('pi', 'PI'), ('tech', 'Tech'), ('billing', 'Billing'))", max_length = 30, content_type = "stripped", blank = False, null = False, db_index = False, tosca_key=True];
-}
-
 policy slice_name_no_spaces < {{ ' ' not in obj.name }} >
 policy slice_has_creator < obj.creator >
 
@@ -631,16 +468,6 @@
      optional string controller_kind = 21 [max_length = 256, content_type = "stripped", blank = True, help_text = "Type of controller, vim-dependent", null = True, db_index = False];
 }
 
-message SlicePrivilege (XOSBase) {
-     required manytoone user->User:sliceprivileges = 1:1005 [db_index = True, null = False, blank = False, unique_with = "slice"];
-     required manytoone slice->Slice:sliceprivileges = 2:1007 [db_index = True, null = False, blank = False, unique_with = "role"];
-     required manytoone role->SliceRole:sliceprivileges = 3:1002 [db_index = True, null = False, blank = False];
-}
-
-
-message SliceRole (XOSBase) {
-     required string role = 1 [choices = "(('admin', 'Admin'), ('default', 'Default'), ('access', 'Access'))", max_length = 30, content_type = "stripped", blank = False, null = False, db_index = False, tosca_key=True];
-}
 
 policy tag_policy < ctx.user.is_admin >
 
@@ -785,42 +612,6 @@
          null = False];
 }
 
-message TenantWithContainer (ServiceInstance) {
-     option description = "A ServiceInstance that uses an Instance to house its compute services";
-
-     optional manytoone instance->Instance:+ = 1:1003 [
-         help_text = "Instance used by this Tenant",
-         blank = True,
-         db_index = True,
-         null = True];
-     optional manytoone creator->User:+ = 2:1006 [
-         help_text = "Creator of this Tenant",
-         blank = True,
-         db_index = True,
-         null = True];
-     optional string external_hostname = 3 [
-         help_text = "External host name",
-         blank = True,
-         content_type = "stripped",
-         db_index = False,
-         max_length = 30,
-         null = True];
-     optional string external_container = 4 [
-         help_text = "External host name",
-         blank = True,
-         content_type = "stripped",
-         db_index = False,
-         max_length = 30,
-         null = True];
-     optional string node_label = 5 [
-         help_text = "Node constraint",
-         blank = True,
-         content_type = "stripped",
-         db_index = False,
-         max_length = 30,
-         null = True];
-}
-
 message TrustDomain (XOSBase) {
      required string name = 1 [max_length = 255, null = False, db_index = True, blank = False, help_text = "Name of this trust domain"];
      required manytoone owner->Service:owned_trust_domains = 2:1011 [null = False, db_index = True, blank = False, help_text = "Service partioned by this trust domain"];
diff --git a/xos/core/models/port.py b/xos/core/models/port.py
index 5db5db1..aba14bf 100644
--- a/xos/core/models/port.py
+++ b/xos/core/models/port.py
@@ -20,20 +20,3 @@
     class Meta:
         proxy = True
 
-    def save(self, *args, **kwargs):
-        if self.instance:
-            if (
-                (self.instance.slice not in self.network.permitted_slices.all())
-                and (self.instance.slice != self.network.owner)
-                and (not self.network.permit_all_slices)
-            ):
-                raise XOSValidationError("Slice is not allowed to connect to network")
-
-        if self.instance and self.service_instance:
-            raise XOSValidationError(
-                "Only one of (instance, service_instance) may be set,"
-                "port=%s, network=%s, instance=%s, service_instance=%s"
-                % (self, self.network, self.instance, self.service_instance)
-            )
-
-        super(Port, self).save(*args, **kwargs)
diff --git a/xos/core/models/xosbase_header.py b/xos/core/models/xosbase_header.py
index f27fc05..c15823f 100644
--- a/xos/core/models/xosbase_header.py
+++ b/xos/core/models/xosbase_header.py
@@ -44,6 +44,12 @@
 XOS_GLOBAL_DEFAULT_SECURITY_POLICY = True
 
 
+def get_first_site():
+    # Hackish solution to Node.site needing a default
+    from site import Site
+    return Site.objects.first().id
+
+
 def json_handler(obj):
     if isinstance(obj, pytz.tzfile.DstTzInfo):
         # json can't serialize DstTzInfo
diff --git a/xos/synchronizers/new_base/model_policies/test_model_policy_tenantwithcontainer.py b/xos/synchronizers/new_base/model_policies/test_model_policy_tenantwithcontainer.py
deleted file mode 100644
index ddce4a6..0000000
--- a/xos/synchronizers/new_base/model_policies/test_model_policy_tenantwithcontainer.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import unittest
-from mock import patch
-import mock
-import pdb
-
-import os
-import sys
-from xosconfig import Config
-
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-xos_dir = os.path.join(test_path, "..", "..", "..")
-
-
-class TestModelPolicyTenantWithContainer(unittest.TestCase):
-    def setUp(self):
-        global TenantWithContainerPolicy, LeastLoadedNodeScheduler, MockObjectList
-
-        self.sys_path_save = sys.path
-        self.cwd_save = os.getcwd()
-        sys.path.append(xos_dir)
-        sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base"))
-        sys.path.append(
-            os.path.join(xos_dir, "synchronizers", "new_base", "model_policies")
-        )
-
-        config = basic_conf = os.path.abspath(
-            os.path.dirname(os.path.realpath(__file__)) + "/test_config.yaml"
-        )
-        Config.clear()  # in case left unclean by a previous test case
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        from synchronizers.new_base.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        build_mock_modelaccessor(xos_dir, services_dir=None, service_xprotos=[])
-
-        import model_policy_tenantwithcontainer
-        from model_policy_tenantwithcontainer import (
-            TenantWithContainerPolicy,
-            LeastLoadedNodeScheduler,
-        )
-
-        from mock_modelaccessor import MockObjectList
-
-        # import all class names to globals
-        for (
-            k,
-            v,
-        ) in model_policy_tenantwithcontainer.model_accessor.all_model_classes.items():
-            globals()[k] = v
-
-        # TODO: Mock_model_accessor lacks save or delete methods
-        # Instance.save = mock.Mock
-        # Instance.delete = mock.Mock
-        # TenantWithContainer.save = mock.Mock
-
-        self.policy = TenantWithContainerPolicy()
-        self.user = User(email="testadmin@test.org")
-        self.tenant = TenantWithContainer(creator=self.user)
-        self.flavor = Flavor(name="m1.small")
-
-    def tearDown(self):
-        Config.clear()
-        sys.path = self.sys_path_save
-        os.chdir(self.cwd_save)
-
-    def test_manage_container_no_slices(self):
-        with patch.object(TenantWithContainer, "owner") as owner:
-            owner.slices.count.return_value = 0
-            with self.assertRaises(Exception) as e:
-                self.policy.manage_container(self.tenant)
-            self.assertEqual(e.exception.message, "The service has no slices")
-
-    def test_manage_container(self):
-        with patch.object(TenantWithContainer, "owner") as owner, patch.object(
-            TenantWithContainer, "save"
-        ) as tenant_save, patch.object(
-            Node, "site_deployment"
-        ) as site_deployment, patch.object(
-            Instance, "save"
-        ) as instance_save, patch.object(
-            Instance, "delete"
-        ) as instance_delete, patch.object(
-            TenantWithContainerPolicy, "get_image"
-        ) as get_image, patch.object(
-            LeastLoadedNodeScheduler, "pick"
-        ) as pick:
-            # setup mocks
-            node = Node(hostname="my.node.com")
-            slice = Slice(
-                name="mysite_test1", default_flavor=self.flavor, default_isolation="vm"
-            )
-            image = Image(name="trusty-server-multi-nic")
-            deployment = Deployment(name="testdeployment")
-            owner.slices.count.return_value = 1
-            owner.slices.all.return_value = [slice]
-            owner.slices.first.return_value = slice
-            get_image.return_value = image
-            pick.return_value = (node, None)
-            site_deployment.deployment = deployment
-            # done setup mocks
-
-            # call manage_container
-            self.policy.manage_container(self.tenant)
-
-            # make sure manage_container did what it is supposed to do
-            self.assertNotEqual(self.tenant.instance, None)
-            self.assertEqual(self.tenant.instance.creator.email, "testadmin@test.org")
-            self.assertEqual(self.tenant.instance.image.name, "trusty-server-multi-nic")
-            self.assertEqual(self.tenant.instance.flavor.name, "m1.small")
-            self.assertEqual(self.tenant.instance.isolation, "vm")
-            self.assertEqual(self.tenant.instance.node.hostname, "my.node.com")
-            self.assertEqual(self.tenant.instance.slice.name, "mysite_test1")
-            self.assertEqual(self.tenant.instance.parent, None)
-            instance_save.assert_called()
-            instance_delete.assert_not_called()
-            tenant_save.assert_called()
-
-    def test_manage_container_delete(self):
-        self.tenant.deleted = True
-
-        # call manage_container
-        self.policy.manage_container(self.tenant)
-
-        # make sure manage_container did what it is supposed to do
-        self.assertEqual(self.tenant.instance, None)
-
-    def test_manage_container_no_m1_small(self):
-        with patch.object(TenantWithContainer, "owner") as owner, patch.object(
-            Node, "site_deployment"
-        ) as site_deployment, patch.object(
-            Flavor, "objects"
-        ) as flavor_objects, patch.object(
-            TenantWithContainerPolicy, "get_image"
-        ) as get_image, patch.object(
-            LeastLoadedNodeScheduler, "pick"
-        ) as pick:
-            # setup mocks
-            node = Node(hostname="my.node.com")
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            image = Image(name="trusty-server-multi-nic")
-            deployment = Deployment(name="testdeployment")
-            owner.slices.count.return_value = 1
-            owner.slices.all.return_value = [slice]
-            owner.slices.first.return_value = slice
-            get_image.return_value = image
-            pick.return_value = (node, None)
-            site_deployment.deployment = deployment
-            flavor_objects.filter.return_value = []
-            # done setup mocks
-
-            with self.assertRaises(Exception) as e:
-                self.policy.manage_container(self.tenant)
-            self.assertEqual(e.exception.message, "No m1.small flavor")
-
-    def test_least_loaded_node_scheduler(self):
-        with patch.object(Node.objects, "get_items") as node_objects:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            node = Node(hostname="my.node.com", id=4567)
-            node.instances = MockObjectList(initial=[])
-            node_objects.return_value = [node]
-
-            sched = LeastLoadedNodeScheduler(slice)
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node.id)
-
-    def test_least_loaded_node_scheduler_two_nodes(self):
-        with patch.object(Node.objects, "get_items") as node_objects:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            instance1 = Instance(id=1)
-            node1 = Node(hostname="my.node.com", id=4567)
-            node1.instances = MockObjectList(initial=[])
-            node2 = Node(hostname="my.node.com", id=8910)
-            node2.instances = MockObjectList(initial=[instance1])
-            node_objects.return_value = [node1, node2]
-
-            # should pick the node with the fewest instance (node1)
-
-            sched = LeastLoadedNodeScheduler(slice)
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node1.id)
-
-    def test_least_loaded_node_scheduler_two_nodes_multi(self):
-        with patch.object(Node.objects, "get_items") as node_objects:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            instance1 = Instance(id=1)
-            instance2 = Instance(id=2)
-            instance3 = Instance(id=3)
-            node1 = Node(hostname="my.node.com", id=4567)
-            node1.instances = MockObjectList(initial=[instance2, instance3])
-            node2 = Node(hostname="my.node.com", id=8910)
-            node2.instances = MockObjectList(initial=[instance1])
-            node_objects.return_value = [node1, node2]
-
-            # should pick the node with the fewest instance (node2)
-
-            sched = LeastLoadedNodeScheduler(slice)
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node2.id)
-
-    def test_least_loaded_node_scheduler_with_label(self):
-        with patch.object(Node.objects, "get_items") as node_objects:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            instance1 = Instance(id=1)
-            node1 = Node(hostname="my.node.com", id=4567)
-            node1.instances = MockObjectList(initial=[])
-            node2 = Node(hostname="my.node.com", id=8910)
-            node2.instances = MockObjectList(initial=[instance1])
-            # Fake out the existence of a NodeLabel object. TODO: Extend the mock framework to support the model__field
-            # syntax.
-            node1.nodelabels__name = None
-            node2.nodelabels__name = "foo"
-            node_objects.return_value = [node1, node2]
-
-            # should pick the node with the label, even if it has a greater number of instances
-
-            sched = LeastLoadedNodeScheduler(slice, label="foo")
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node2.id)
-
-    def test_least_loaded_node_scheduler_create_label(self):
-        with patch.object(Node.objects, "get_items") as node_objects, patch.object(
-            NodeLabel, "save", autospec=True
-        ) as nodelabel_save, patch.object(NodeLabel, "node") as nodelabel_node_add:
-            slice = Slice(
-                name="mysite_test1", default_flavor=None, default_isolation="vm"
-            )
-            instance1 = Instance(id=1)
-            node1 = Node(hostname="my.node.com", id=4567)
-            node1.instances = MockObjectList(initial=[])
-            node2 = Node(hostname="my.node.com", id=8910)
-            node2.instances = MockObjectList(initial=[instance1])
-            # Fake out the existence of a NodeLabel object. TODO: Extend the mock framework to support the model__field
-            # syntax.
-            node1.nodelabels__name = None
-            node2.nodelabels__name = None
-            node_objects.return_value = [node1, node2]
-
-            # should pick the node with the least number of instances
-
-            sched = LeastLoadedNodeScheduler(
-                slice, label="foo", constrain_by_service_instance=True
-            )
-            (picked_node, parent) = sched.pick()
-
-            self.assertNotEqual(picked_node, None)
-            self.assertEqual(picked_node.id, node1.id)
-
-            # NodeLabel should have been created and saved
-
-            self.assertEqual(nodelabel_save.call_count, 1)
-            self.assertEqual(nodelabel_save.call_args[0][0].name, "foo")
-
-            # The NodeLabel's node field should have been added to
-
-            NodeLabel.node.add.assert_called_with(node1)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/xos/synchronizers/new_base/tests/test_controller_dependencies.py b/xos/synchronizers/new_base/tests/test_controller_dependencies.py
deleted file mode 100644
index 47f17d2..0000000
--- a/xos/synchronizers/new_base/tests/test_controller_dependencies.py
+++ /dev/null
@@ -1,226 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-from mock import patch
-import mock
-import pdb
-import networkx as nx
-
-import os
-import sys
-
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-xos_dir = os.path.join(test_path, "..", "..", "..")
-
-
-class TestControllerDependencies(unittest.TestCase):
-
-    __test__ = False
-
-    def setUp(self):
-        global mock_enumerator, event_loop
-
-        self.sys_path_save = sys.path
-        self.cwd_save = os.getcwd()
-        sys.path.append(xos_dir)
-        sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base"))
-        sys.path.append(
-            os.path.join(xos_dir, "synchronizers", "new_base", "tests", "steps")
-        )
-
-        config = os.path.join(test_path, "test_config.yaml")
-        from xosconfig import Config
-
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        from synchronizers.new_base.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        build_mock_modelaccessor(xos_dir, services_dir=None, service_xprotos=[])
-
-        os.chdir(os.path.join(test_path, ".."))  # config references tests/model-deps
-
-        import event_loop
-
-        reload(event_loop)
-        import backend
-
-        reload(backend)
-        from mock_modelaccessor import mock_enumerator
-        from modelaccessor import model_accessor
-
-        # import all class names to globals
-        for (k, v) in model_accessor.all_model_classes.items():
-            globals()[k] = v
-
-        b = backend.Backend()
-        steps_dir = Config.get("steps_dir")
-        self.steps = b.load_sync_step_modules(steps_dir)
-        self.synchronizer = event_loop.XOSObserver(self.steps)
-
-    def tearDown(self):
-        sys.path = self.sys_path_save
-        os.chdir(self.cwd_save)
-
-    def test_multi_controller_path(self):
-        csl = ControllerSlice()
-        csi = ControllerSite()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        csl.slice = slice
-        csi.site = site
-        slice.controllerslices = mock_enumerator([csl])
-        site.controllersite = mock_enumerator([csi])
-
-        verdict, edge_type = self.synchronizer.concrete_path_exists(csl, csi)
-        self.assertTrue(verdict)
-        self.assertEqual(edge_type, event_loop.PROXY_EDGE)
-
-    def test_controller_path_simple(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        p.slice = s
-        s.site = t
-        ct.site = t
-        t.controllersite = mock_enumerator([ct])
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, ct], False)
-        self.assertEqual([ct, p], cohorts[0])
-        cohorts = self.synchronizer.compute_dependent_cohorts([ct, p], False)
-        self.assertEqual([ct, p], cohorts[0])
-
-    def test_controller_deletion_path(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        ct.site = t
-        p.slice = s
-        s.site = t
-
-        t.controllersite = mock_enumerator([ct])
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], False)
-        self.assertEqual([t, ct, s, p], cohorts[0])
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], True)
-        self.assertEqual([p, s, ct, t], cohorts[0])
-
-    def test_multi_controller_schedule(self):
-        csl = ControllerSlice()
-        csi = ControllerSite()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        csl.slice = slice
-        csi.site = site
-        slice.controllerslices = mock_enumerator([csl])
-        site.controllersite = mock_enumerator([csi])
-        i = Instance()
-        i.slice = slice
-
-        cohorts = self.synchronizer.compute_dependent_cohorts(
-            [i, slice, site, csl, csi], False
-        )
-        self.assertEqual([site, csi, slice, csl, i], cohorts[0])
-
-    def test_multi_controller_path_negative(self):
-        csl = ControllerSlice()
-        csi = ControllerSite()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        csl.slice = slice
-        csi.site = site
-        slice.controllerslices = mock_enumerator([])
-        site.controllersite = mock_enumerator([])
-
-        verdict, edge_type = self.synchronizer.concrete_path_exists(csl, csi)
-        self.assertFalse(verdict)
-        self.assertEqual(edge_type, None)
-
-    def test_controller_path_simple_negative(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        p.slice = s
-        s.site = t
-        ct.site = t
-        t.controllersite = mock_enumerator([])
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, ct], False)
-        self.assertIn([ct], cohorts)
-        self.assertIn([p], cohorts)
-
-    def test_controller_deletion_path_negative(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        s.site = t
-
-        t.controllersite = mock_enumerator([])
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], False)
-        self.assertIn([t, s], cohorts)
-        self.assertIn([p], cohorts)
-        self.assertIn([ct], cohorts)
-        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], True)
-        self.assertIn([s, t], cohorts)
-        self.assertIn([p], cohorts)
-        self.assertIn([ct], cohorts)
-
-    def test_multi_controller_deletion_schedule(self):
-        csl = ControllerSlice()
-        cn = ControllerNetwork()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        slice.controllerslices = mock_enumerator([])
-        site.controllersite = mock_enumerator([])
-        i = Instance()
-        i.slice = slice
-
-        cohorts = self.synchronizer.compute_dependent_cohorts(
-            [i, slice, site, csl, csi], False
-        )
-        self.assertIn([site, slice, i], cohorts)
-        self.assertIn([csl], cohorts)
-        self.assertIn([csi], cohorts)
-
-    def test_multi_controller_schedule_negative(self):
-        csl = ControllerSlice()
-        csi = ControllerSite()
-        site = Site()
-        slice = Slice()
-        slice.site = site
-        slice.controllerslices = mock_enumerator([])
-        site.controllersite = mock_enumerator([])
-        i = Instance()
-        i.slice = slice
-
-        cohorts = self.synchronizer.compute_dependent_cohorts(
-            [i, slice, site, csl, csi], False
-        )
-        self.assertIn([site, slice, i], cohorts)
-        self.assertIn([csl], cohorts)
-        self.assertIn([csi], cohorts)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/xos/synchronizers/new_base/tests/test_diffs.py b/xos/synchronizers/new_base/tests/test_diffs.py
deleted file mode 100644
index c2e53ad..0000000
--- a/xos/synchronizers/new_base/tests/test_diffs.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-from mock import patch, call, Mock, PropertyMock
-import json
-
-import os
-import sys
-
-# Hack to load synchronizer framework
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-xos_dir = os.path.join(test_path, "../../..")
-if not os.path.exists(os.path.join(test_path, "new_base")):
-    xos_dir = os.path.join(test_path, "../../../../../../orchestration/xos/xos")
-    services_dir = os.path.join(xos_dir, "../../xos_services")
-sys.path.append(xos_dir)
-sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base"))
-# END Hack to load synchronizer framework
-
-
-class TestDiffs(unittest.TestCase):
-
-    """ These tests are for the mock modelaccessor, to make sure it behaves like the real one """
-
-    def setUp(self):
-
-        self.sys_path_save = sys.path
-        sys.path.append(xos_dir)
-        sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base"))
-
-        # Setting up the config module
-        from xosconfig import Config
-
-        config = os.path.join(test_path, "test_config.yaml")
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-        # END Setting up the config module
-
-        from synchronizers.new_base.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        # FIXME this is to get jenkins to pass the tests, somehow it is running tests in a different order
-        # and apparently it is not overriding the generated model accessor
-        build_mock_modelaccessor(xos_dir, services_dir, [])
-        import synchronizers.new_base.modelaccessor
-
-        # import all class names to globals
-        for (
-            k,
-            v,
-        ) in (
-            synchronizers.new_base.modelaccessor.model_accessor.all_model_classes.items()
-        ):
-            globals()[k] = v
-
-        self.log = Mock()
-
-    def tearDown(self):
-        sys.path = self.sys_path_save
-
-    def test_new_diff(self):
-        site = Site(name="mysite")
-
-        self.assertEqual(site.is_new, True)
-        self.assertEqual(site._dict, {"name": "mysite"})
-        self.assertEqual(site.diff, {})
-        self.assertEqual(site.changed_fields, ["name"])
-        self.assertEqual(site.has_field_changed("name"), False)
-        self.assertEqual(site.has_field_changed("login_base"), False)
-
-        site.login_base = "bar"
-
-        self.assertEqual(site._dict, {"login_base": "bar", "name": "mysite"})
-        self.assertEqual(site.diff, {"login_base": (None, "bar")})
-        self.assertIn("name", site.changed_fields)
-        self.assertIn("login_base", site.changed_fields)
-        self.assertEqual(site.has_field_changed("name"), False)
-        self.assertEqual(site.has_field_changed("login_base"), True)
-        self.assertEqual(site.get_field_diff("login_base"), (None, "bar"))
-
-    def test_existing_diff(self):
-        site = Site(name="mysite", login_base="foo")
-
-        # this is what would happen after saving and re-loading
-        site.is_new = False
-        site.id = 1
-        site._initial = site._dict
-
-        self.assertEqual(site.is_new, False)
-        self.assertEqual(site._dict, {"id": 1, "name": "mysite", "login_base": "foo"})
-        self.assertEqual(site.diff, {})
-        self.assertEqual(site.changed_fields, [])
-        self.assertEqual(site.has_field_changed("name"), False)
-        self.assertEqual(site.has_field_changed("login_base"), False)
-
-        site.login_base = "bar"
-
-        self.assertEqual(site._dict, {"id": 1, "login_base": "bar", "name": "mysite"})
-        self.assertEqual(site.diff, {"login_base": ("foo", "bar")})
-        self.assertIn("login_base", site.changed_fields)
-        self.assertEqual(site.has_field_changed("name"), False)
-        self.assertEqual(site.has_field_changed("login_base"), True)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/xos/synchronizers/new_base/tests/test_event_engine.py b/xos/synchronizers/new_base/tests/test_event_engine.py
deleted file mode 100644
index b5b6921..0000000
--- a/xos/synchronizers/new_base/tests/test_event_engine.py
+++ /dev/null
@@ -1,346 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import confluent_kafka
-import functools
-import unittest
-
-from mock import patch, PropertyMock, ANY
-
-import os
-import sys
-import time
-
-log = None
-
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-xos_dir = os.path.join(test_path, "..", "..", "..")
-
-
-def config_get_mock(orig, overrides, key):
-    if key in overrides:
-        return overrides[key]
-    else:
-        return orig(key)
-
-
-class FakeKafkaConsumer:
-    def __init__(self, values=[]):
-        self.values = values
-
-    def subscribe(self, topics):
-        pass
-
-    def poll(self, timeout=1.0):
-        if self.values:
-            return FakeKafkaMessage(self.values.pop())
-        # block forever
-        time.sleep(1000)
-
-
-class FakeKafkaMessage:
-    """ Works like Message in confluent_kafka
-        https://docs.confluent.io/current/clients/confluent-kafka-python/#message
-    """
-
-    def __init__(
-        self,
-        timestamp=None,
-        topic="faketopic",
-        key="fakekey",
-        value="fakevalue",
-        error=False,
-    ):
-
-        if timestamp is None:
-            self.fake_ts_type = confluent_kafka.TIMESTAMP_NOT_AVAILABLE
-            self.fake_timestamp = None
-        else:
-            self.fake_ts_type = confluent_kafka.TIMESTAMP_CREATE_TIME
-            self.fake_timestamp = timestamp
-
-        self.fake_topic = topic
-        self.fake_key = key
-        self.fake_value = value
-        self.fake_error = error
-
-    def error(self):
-        return self.fake_error
-
-    def timestamp(self):
-        return (self.fake_ts_type, self.fake_timestamp)
-
-    def topic(self):
-        return self.fake_topic
-
-    def key(self):
-        return self.fake_key
-
-    def value(self):
-        return self.fake_value
-
-
-class TestEventEngine(unittest.TestCase):
-    @classmethod
-    def setUpClass(cls):
-
-        global log
-
-        config = os.path.join(test_path, "test_config.yaml")
-        from xosconfig import Config
-
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        if not log:
-            from multistructlog import create_logger
-
-            log = create_logger(Config().get("logging"))
-
-    def setUp(self):
-        global XOSKafkaThread, Config, log
-
-        self.sys_path_save = sys.path
-        self.cwd_save = os.getcwd()
-        sys.path.append(xos_dir)
-        sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base"))
-        sys.path.append(
-            os.path.join(xos_dir, "synchronizers", "new_base", "tests", "event_steps")
-        )
-
-        config = os.path.join(test_path, "test_config.yaml")
-        from xosconfig import Config
-
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        from synchronizers.new_base.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        build_mock_modelaccessor(xos_dir, services_dir=None, service_xprotos=[])
-
-        os.chdir(os.path.join(test_path, ".."))  # config references tests/model-deps
-
-        from event_engine import XOSKafkaThread, XOSEventEngine
-
-        self.event_steps_dir = Config.get("event_steps_dir")
-        self.event_engine = XOSEventEngine(log)
-
-    def tearDown(self):
-        sys.path = self.sys_path_save
-        os.chdir(self.cwd_save)
-
-    def test_load_event_step_modules(self):
-        self.event_engine.load_event_step_modules(self.event_steps_dir)
-        self.assertEqual(len(self.event_engine.event_steps), 1)
-
-    def test_start(self):
-        self.event_engine.load_event_step_modules(self.event_steps_dir)
-
-        with patch.object(
-            XOSKafkaThread, "create_kafka_consumer"
-        ) as create_kafka_consumer, patch.object(
-            FakeKafkaConsumer, "subscribe"
-        ) as fake_subscribe, patch.object(
-            self.event_engine.event_steps[0], "process_event"
-        ) as process_event:
-
-            create_kafka_consumer.return_value = FakeKafkaConsumer(
-                values=["sampleevent"]
-            )
-            self.event_engine.start()
-
-            self.assertEqual(len(self.event_engine.threads), 1)
-
-            # Since event_engine.start() launches threads, give them a hundred milliseconds to do something...
-            time.sleep(0.1)
-
-            # We should have subscribed to the fake consumer
-            fake_subscribe.assert_called_once()
-
-            # The fake consumer will have returned one event
-            process_event.assert_called_once()
-
-    def test_start_with_pattern(self):
-        self.event_engine.load_event_step_modules(self.event_steps_dir)
-
-        with patch.object(
-            XOSKafkaThread, "create_kafka_consumer"
-        ) as create_kafka_consumer, patch.object(
-            FakeKafkaConsumer, "subscribe"
-        ) as fake_subscribe, patch.object(
-            self.event_engine.event_steps[0], "process_event"
-        ) as process_event, patch.object(
-            self.event_engine.event_steps[0], "pattern", new_callable=PropertyMock
-        ) as pattern, patch.object(
-            self.event_engine.event_steps[0], "topics", new_callable=PropertyMock
-        ) as topics:
-
-            pattern.return_value = "somepattern"
-            topics.return_value = []
-
-            create_kafka_consumer.return_value = FakeKafkaConsumer(
-                values=["sampleevent"]
-            )
-            self.event_engine.start()
-
-            self.assertEqual(len(self.event_engine.threads), 1)
-
-            # Since event_engine.start() launches threads, give them a hundred milliseconds to do something...
-            time.sleep(0.1)
-
-            # We should have subscribed to the fake consumer
-            fake_subscribe.assert_called_with("somepattern")
-
-            # The fake consumer will have returned one event
-            process_event.assert_called_once()
-
-    def test_start_bad_tech(self):
-        """ Set an unknown Technology in the event_step. XOSEventEngine.start() should print an error message and
-            not create any threads.
-        """
-
-        self.event_engine.load_event_step_modules(self.event_steps_dir)
-
-        with patch.object(
-            XOSKafkaThread, "create_kafka_consumer"
-        ) as create_kafka_consumer, patch.object(
-            log, "error"
-        ) as log_error, patch.object(
-            self.event_engine.event_steps[0], "technology"
-        ) as technology:
-            technology.return_value = "not_kafka"
-            create_kafka_consumer.return_value = FakeKafkaConsumer()
-            self.event_engine.start()
-
-            self.assertEqual(len(self.event_engine.threads), 0)
-
-            log_error.assert_called_with(
-                "Unknown technology. Skipping step",
-                step="TestEventStep",
-                technology=ANY,
-            )
-
-    def test_start_bad_no_topics(self):
-        """ Set no topics in the event_step. XOSEventEngine.start() will launch a thread, but the thread will fail
-            with an exception before calling subscribe.
-        """
-
-        self.event_engine.load_event_step_modules(self.event_steps_dir)
-
-        with patch.object(
-            XOSKafkaThread, "create_kafka_consumer"
-        ) as create_kafka_consumer, patch.object(
-            FakeKafkaConsumer, "subscribe"
-        ) as fake_subscribe, patch.object(
-            self.event_engine.event_steps[0], "topics", new_callable=PropertyMock
-        ) as topics:
-            topics.return_value = []
-            create_kafka_consumer.return_value = FakeKafkaConsumer()
-            self.event_engine.start()
-
-            # the thread does get launched, but it will fail with an exception
-            self.assertEqual(len(self.event_engine.threads), 1)
-
-            time.sleep(0.1)
-
-            fake_subscribe.assert_not_called()
-
-    def test_start_bad_topics_and_pattern(self):
-        """ Set no topics in the event_step. XOSEventEngine.start() will launch a thread, but the thread will fail
-            with an exception before calling subscribe.
-        """
-
-        self.event_engine.load_event_step_modules(self.event_steps_dir)
-
-        with patch.object(
-            XOSKafkaThread, "create_kafka_consumer"
-        ) as create_kafka_consumer, patch.object(
-            FakeKafkaConsumer, "subscribe"
-        ) as fake_subscribe, patch.object(
-            self.event_engine.event_steps[0], "pattern", new_callable=PropertyMock
-        ) as pattern:
-            pattern.return_value = "foo"
-            create_kafka_consumer.return_value = FakeKafkaConsumer()
-            self.event_engine.start()
-
-            # the thread does get launched, but it will fail with an exception
-            self.assertEqual(len(self.event_engine.threads), 1)
-
-            time.sleep(0.1)
-
-            fake_subscribe.assert_not_called()
-
-    def test_start_config_no_eventbus_kind(self):
-        """ Set a blank event_bus.kind in Config. XOSEventEngine.start() should print an error message and
-            not create any threads.
-        """
-
-        self.event_engine.load_event_step_modules(self.event_steps_dir)
-
-        config_get_orig = Config.get
-        with patch.object(
-            XOSKafkaThread, "create_kafka_consumer"
-        ) as create_kafka_consumer, patch.object(
-            log, "error"
-        ) as log_error, patch.object(
-            Config,
-            "get",
-            new=functools.partial(
-                config_get_mock, config_get_orig, {"event_bus.kind": None}
-            ),
-        ):
-
-            create_kafka_consumer.return_value = FakeKafkaConsumer()
-            self.event_engine.start()
-
-            self.assertEqual(len(self.event_engine.threads), 0)
-
-            log_error.assert_called_with(
-                "Eventbus kind is not configured in synchronizer config file."
-            )
-
-    def test_start_config_bad_eventbus_kind(self):
-        """ Set an unknown event_bus.kind in Config. XOSEventEngine.start() should print an error message and
-            not create any threads.
-        """
-
-        self.event_engine.load_event_step_modules(self.event_steps_dir)
-
-        config_get_orig = Config.get
-        with patch.object(
-            XOSKafkaThread, "create_kafka_consumer"
-        ) as create_kafka_consumer, patch.object(
-            log, "error"
-        ) as log_error, patch.object(
-            Config,
-            "get",
-            new=functools.partial(
-                config_get_mock, config_get_orig, {"event_bus.kind": "not_kafka"}
-            ),
-        ):
-            create_kafka_consumer.return_value = FakeKafkaConsumer()
-            self.event_engine.start()
-
-            self.assertEqual(len(self.event_engine.threads), 0)
-
-            log_error.assert_called_with(
-                "Eventbus kind is set to a technology we do not implement.",
-                eventbus_kind="not_kafka",
-            )
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/xos/synchronizers/new_base/tests/test_load.py b/xos/synchronizers/new_base/tests/test_load.py
deleted file mode 100644
index 06baa03..0000000
--- a/xos/synchronizers/new_base/tests/test_load.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-from mock import patch
-import mock
-import pdb
-import networkx as nx
-
-import os
-import sys
-
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-xos_dir = os.path.join(test_path, "..", "..", "..")
-
-
-class TestScheduling(unittest.TestCase):
-    def setUp(self):
-        self.sys_path_save = sys.path
-        self.cwd_save = os.getcwd()
-        sys.path.append(xos_dir)
-        sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base"))
-        sys.path.append(
-            os.path.join(xos_dir, "synchronizers", "new_base", "tests", "steps")
-        )
-
-        config = os.path.join(test_path, "test_config.yaml")
-        from xosconfig import Config
-
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        from synchronizers.new_base.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        build_mock_modelaccessor(xos_dir, services_dir=None, service_xprotos=[])
-
-        os.chdir(os.path.join(test_path, ".."))  # config references tests/model-deps
-
-        import event_loop
-
-        reload(event_loop)
-        import backend
-
-        reload(backend)
-
-        # self.policy = TenantWithContainerPolicy()
-        # self.user = User(email="testadmin@test.org")
-        # self.tenant = Tenant(creator=self.user)
-        # self.flavor = Flavor(name="m1.small")
-        # model_policy_tenantwithcontainer.Instance = Instance
-        # model_policy_tenantwithcontainer.Flavor = Flavor
-
-        b = backend.Backend()
-        steps_dir = Config.get("steps_dir")
-        self.steps = b.load_sync_step_modules(steps_dir)
-        self.synchronizer = event_loop.XOSObserver(self.steps)
-
-    def tearDown(self):
-        sys.path = self.sys_path_save
-        os.chdir(self.cwd_save)
-
-    def test_load_steps(self):
-        step_names = [s.__name__ for s in self.steps]
-        self.assertIn("SyncControllerSlices", step_names)
-
-    def test_load_deps(self):
-        self.synchronizer.load_dependency_graph()
-        graph = self.synchronizer.model_dependency_graph
-        self.assertTrue(graph[False].has_edge("Instance", "Slice"))
-        self.assertTrue(graph[True].has_edge("Slice", "Instance"))
-        self.assertTrue(graph[False].has_edge("Slice", "ControllerSlice"))
-        self.assertTrue(graph[True].has_edge("ControllerSlice", "Slice"))
-
-    def test_load_dep_accessors(self):
-        self.synchronizer.load_dependency_graph()
-        graph = self.synchronizer.model_dependency_graph
-        self.assertDictContainsSubset(
-            {"src_accessor": "controllerslices"},
-            graph[False]["Slice"]["ControllerSlice"],
-        )
-        self.assertDictContainsSubset(
-            {"src_accessor": "slice", "dst_accessor": "controllerslices"},
-            graph[True]["Slice"]["ControllerSlice"],
-        )
-
-    def test_load_sync_steps(self):
-        self.synchronizer.load_sync_steps()
-        model_to_step = self.synchronizer.model_to_step
-        step_lookup = self.synchronizer.step_lookup
-        self.assertIn(
-            ("ControllerSlice", ["SyncControllerSlices"]), model_to_step.items()
-        )
-        self.assertIn(("SiteRole", ["SyncRoles"]), model_to_step.items())
-
-        for k, v in model_to_step.items():
-            val = v[0]
-            observes = step_lookup[val].observes
-            if not isinstance(observes, list):
-                observes = [observes]
-
-            observed_names = [o.__name__ for o in observes]
-            self.assertIn(k, observed_names)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/xos/synchronizers/new_base/tests/test_scheduler.py b/xos/synchronizers/new_base/tests/test_scheduler.py
deleted file mode 100644
index fa2d493..0000000
--- a/xos/synchronizers/new_base/tests/test_scheduler.py
+++ /dev/null
@@ -1,284 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-from mock import patch
-import mock
-import pdb
-import networkx as nx
-
-import os
-import sys
-
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-xos_dir = os.path.join(test_path, "..", "..", "..")
-
-
-class TestScheduling(unittest.TestCase):
-
-    __test__ = False
-
-    def setUp(self):
-        global mock_enumerator, event_loop
-
-        self.sys_path_save = sys.path
-        self.cwd_save = os.getcwd()
-        sys.path.append(xos_dir)
-        sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base"))
-        sys.path.append(
-            os.path.join(xos_dir, "synchronizers", "new_base", "tests", "steps")
-        )
-
-        config = os.path.join(test_path, "test_config.yaml")
-        from xosconfig import Config
-
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        from synchronizers.new_base.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        build_mock_modelaccessor(xos_dir, services_dir=None, service_xprotos=[])
-
-        os.chdir(os.path.join(test_path, ".."))  # config references tests/model-deps
-
-        import event_loop
-
-        reload(event_loop)
-        import backend
-
-        reload(backend)
-        from mock_modelaccessor import mock_enumerator
-        from modelaccessor import model_accessor
-
-        # import all class names to globals
-        for (k, v) in model_accessor.all_model_classes.items():
-            globals()[k] = v
-
-        # self.policy = TenantWithContainerPolicy()
-        # self.user = User(email="testadmin@test.org")
-        # self.tenant = Tenant(creator=self.user)
-        # self.flavor = Flavor(name="m1.small")
-        # model_policy_tenantwithcontainer.Instance = Instance
-        # model_policy_tenantwithcontainer.Flavor = Flavor
-
-        b = backend.Backend()
-        steps_dir = Config.get("steps_dir")
-        self.steps = b.load_sync_step_modules(steps_dir)
-        self.synchronizer = event_loop.XOSObserver(self.steps)
-
-    def tearDown(self):
-        sys.path = self.sys_path_save
-        os.chdir(self.cwd_save)
-
-    def test_same_object_trivial(self):
-        s = Slice(pk=4)
-        t = Slice(pk=4)
-        same, t = self.synchronizer.same_object(s, t)
-        self.assertTrue(same)
-        self.assertEqual(t, event_loop.DIRECT_EDGE)
-
-    def test_same_object_trivial2(self):
-        s = Slice(pk=4)
-        t = Slice(pk=5)
-        same, t = self.synchronizer.same_object(s, t)
-        self.assertFalse(same)
-
-    def test_same_object_lst(self):
-        s = Slice(pk=5)
-        t = ControllerSlice(slice=s)
-        u = ControllerSlice(slice=s)
-
-        s.controllerslices = mock_enumerator([t, u])
-
-        same, et = self.synchronizer.same_object(s.controllerslices, u)
-        self.assertTrue(same)
-        self.assertEqual(et, event_loop.PROXY_EDGE)
-
-        same, et = self.synchronizer.same_object(s.controllerslices, t)
-
-        self.assertTrue(same)
-        self.assertEqual(et, event_loop.PROXY_EDGE)
-
-    def test_same_object_lst_dc(self):
-        r = Slice(pk=4)
-        s = Slice(pk=5)
-        t = ControllerSlice(slice=r)
-        u = ControllerSlice(slice=s)
-
-        s.controllerslices = mock_enumerator([u])
-
-        same, et = self.synchronizer.same_object(s.controllerslices, t)
-        self.assertFalse(same)
-
-        same, et = self.synchronizer.same_object(s.controllerslices, u)
-        self.assertTrue(same)
-
-    def test_concrete_path_no_model_path(self):
-        p = Port()
-        n = NetworkParameter()
-        verdict, _ = self.synchronizer.concrete_path_exists(p, n)
-        self.assertFalse(verdict)
-
-    def test_concrete_no_object_path_adjacent(self):
-        p = Instance()
-        s1 = Slice()
-        s2 = Slice()
-        p.slice = s2
-        verdict, _ = self.synchronizer.concrete_path_exists(p, s1)
-
-        self.assertFalse(verdict)
-
-    def test_concrete_object_path_adjacent(self):
-        p = Instance()
-        s = Slice()
-        p.slice = s
-        verdict, edge_type = self.synchronizer.concrete_path_exists(p, s)
-
-        self.assertTrue(verdict)
-        self.assertEqual(edge_type, event_loop.DIRECT_EDGE)
-
-    def test_concrete_object_controller_path_adjacent(self):
-        p = Instance()
-        q = Instance()
-        cs = ControllerSlice()
-        cs2 = ControllerSlice()
-        s1 = Slice()
-        s2 = Slice()
-        p.slice = s1
-        q.slice = s2
-        cs.slice = s1
-        s1.controllerslices = mock_enumerator([cs])
-        s2.controllerslices = mock_enumerator([])
-
-        verdict1, edge_type1 = self.synchronizer.concrete_path_exists(p, cs)
-        verdict2, _ = self.synchronizer.concrete_path_exists(q, cs)
-        verdict3, _ = self.synchronizer.concrete_path_exists(p, cs2)
-
-        self.assertTrue(verdict1)
-        self.assertFalse(verdict2)
-        self.assertFalse(verdict3)
-
-        self.assertEqual(edge_type1, event_loop.PROXY_EDGE)
-
-    def test_concrete_object_controller_path_distant(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        ct = ControllerSite()
-        ct.site = t
-        p.slice = s
-        s.site = t
-        verdict = self.synchronizer.concrete_path_exists(p, ct)
-        self.assertTrue(verdict)
-
-    def test_concrete_object_path_distant(self):
-        p = Instance()
-        s = Slice()
-        t = Site()
-        p.slice = s
-        s.site = t
-        verdict = self.synchronizer.concrete_path_exists(p, t)
-        self.assertTrue(verdict)
-
-    def test_concrete_no_object_path_distant(self):
-        p = Instance()
-        s = Slice()
-        s.controllerslice = mock_enumerator([])
-
-        t = Site()
-        t.controllersite = mock_enumerator([])
-
-        ct = ControllerSite()
-        ct.site = Site()
-        p.slice = s
-        s.site = t
-
-        verdict, _ = self.synchronizer.concrete_path_exists(p, ct)
-        self.assertFalse(verdict)
-
-    def test_cohorting_independent(self):
-        i = Image()
-
-        p = Slice()
-        c = Instance()
-        c.slice = None
-        c.image = None
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c], False)
-        self.assertEqual(len(cohorts), 3)
-
-    def test_cohorting_related(self):
-        i = Image()
-        p = Port()
-        c = Instance()
-        c.image = i
-        s = ControllerSlice()
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s], False)
-        self.assertIn([i, c], cohorts)
-        self.assertIn([p], cohorts)
-        self.assertIn([s], cohorts)
-
-    def test_cohorting_related_multi(self):
-        i = Image()
-        p = Port()
-        c = Instance()
-        c.image = i
-        cs = ControllerSlice()
-        s = Slice()
-        cs.slice = s
-        s.controllerslices = mock_enumerator([cs])
-        c.slice = s
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s, cs], False)
-
-        big_cohort = max(cohorts, key=len)
-        self.assertGreater(big_cohort.index(c), big_cohort.index(i))
-        self.assertGreater(big_cohort.index(cs), big_cohort.index(s))
-        self.assertIn([p], cohorts)
-
-    def test_cohorting_related_multi_delete(self):
-        i = Image()
-        p = Port()
-        c = Instance()
-        c.image = i
-        cs = ControllerSlice()
-        s = Slice()
-        cs.slice = s
-        c.slice = s
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s, cs], True)
-
-        big_cohort = max(cohorts, key=len)
-        self.assertGreater(big_cohort.index(i), big_cohort.index(c))
-        self.assertGreater(big_cohort.index(s), big_cohort.index(cs))
-        self.assertIn([p], cohorts)
-
-    def test_cohorting_related_delete(self):
-        i = Image()
-        p = Port()
-        c = Instance()
-        c.image = i
-        s = ControllerSlice()
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s], True)
-        self.assertIn([c, i], cohorts)
-        self.assertIn([p], cohorts)
-        self.assertIn([s], cohorts)
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/xos/synchronizers/new_base/tests/test_services.py b/xos/synchronizers/new_base/tests/test_services.py
deleted file mode 100644
index e5e2d49..0000000
--- a/xos/synchronizers/new_base/tests/test_services.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-from mock import patch
-import mock
-import pdb
-import networkx as nx
-
-import os
-import sys
-
-test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
-xos_dir = os.path.join(test_path, "..", "..", "..")
-
-
-class TestServices(unittest.TestCase):
-    def setUp(self):
-        self.sys_path_save = sys.path
-        self.cwd_save = os.getcwd()
-        sys.path.append(xos_dir)
-        sys.path.append(os.path.join(xos_dir, "synchronizers", "new_base"))
-        sys.path.append(
-            os.path.join(xos_dir, "synchronizers", "new_base", "tests", "steps")
-        )
-
-        config = os.path.join(test_path, "test_config.yaml")
-        from xosconfig import Config
-
-        Config.clear()
-        Config.init(config, "synchronizer-config-schema.yaml")
-
-        from synchronizers.new_base.mock_modelaccessor_build import (
-            build_mock_modelaccessor,
-        )
-
-        build_mock_modelaccessor(xos_dir, services_dir=None, service_xprotos=[])
-
-        os.chdir(os.path.join(test_path, ".."))  # config references tests/model-deps
-
-        import event_loop
-
-        reload(event_loop)
-        import backend
-
-        reload(backend)
-        from modelaccessor import model_accessor
-
-        # import all class names to globals
-        for (k, v) in model_accessor.all_model_classes.items():
-            globals()[k] = v
-
-        b = backend.Backend()
-        steps_dir = Config.get("steps_dir")
-        self.steps = b.load_sync_step_modules(steps_dir)
-        self.synchronizer = event_loop.XOSObserver(self.steps)
-
-    def tearDown(self):
-        sys.path = self.sys_path_save
-        os.chdir(self.cwd_save)
-
-    def test_service_models(self):
-        s = Service()
-        a = ServiceInstance(owner=s)
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([a, s], False)
-        self.assertIn([s, a], cohorts)
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([s, a], False)
-        self.assertIn([s, a], cohorts)
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([a, s], True)
-        self.assertIn([a, s], cohorts)
-
-        cohorts = self.synchronizer.compute_dependent_cohorts([s, a], True)
-        self.assertIn([a, s], cohorts)
-
-
-if __name__ == "__main__":
-    unittest.main()