SEBA-405 get synchronizer library unit tests automated;
restore previously disabled tests

Change-Id: Ic3ae85548697ae4feda0bd545b53b665409e2770
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/__init__.py b/lib/xos-synchronizer/xos-synchronizer-tests/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/model-deps b/lib/xos-synchronizer/xos-synchronizer-tests/model-deps
new file mode 100644
index 0000000..247a190
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/model-deps
@@ -0,0 +1,656 @@
+{ 
+     
+    
+    "User": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+        
+        
+        ["ControllerUser", "controllerusers", "user"],
+        
+        
+        ["Site", "site", "users"],
+        ["DashboardView", "dashboards", "user"]
+        
+    ], 
+    
+    "Privilege": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+        
+        
+        ["ControllerPrivilege", "controllerprivileges", "privilege"]
+        
+        
+        
+    ], 
+    
+    "AddressPool": [
+        
+        
+        
+        
+        
+        ["Service", "service", "addresspools"]
+        
+    ], 
+     
+    
+    "ControllerDashboardView": [
+        
+        
+        
+        
+        
+        ["Controller", "controller", "controllerdashboardviews"],
+        ["DashboardView", "dashboardView", "controllerdashboardviews"]
+        
+    ], 
+    
+    "ControllerImages": [
+        
+        
+        
+        
+        
+        ["Image", "image", "controllerimages"],
+        ["Controller", "controller", "controllerimages"]
+        
+    ], 
+    
+    "ControllerNetwork": [
+        
+        
+        
+        
+        
+        ["Network", "network", "controllernetworks"],
+        ["Controller", "controller", "controllernetworks"]
+        
+    ], 
+    
+    "ControllerRole": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "ControllerSite": [
+        
+        
+        
+        
+        
+        ["Site", "site", "controllersite"],
+        ["Controller", "controller", "controllersite"]
+        
+    ], 
+    
+    "ControllerPrivilege": [
+        
+        
+        
+        
+        
+        ["Controller", "controller", "controllerprivileges"],
+        ["Privilege", "privilege", "controllerprivileges"]
+        
+    ], 
+    
+    "ControllerSitePrivilege": [
+        
+        
+        
+        
+        
+        ["Controller", "controller", "controllersiteprivileges"],
+        ["SitePrivilege", "site_privilege", "controllersiteprivileges"]
+        
+    ], 
+    
+    "ControllerSlice": [
+        
+        
+        
+        
+        
+        ["Controller", "controller", "controllerslices"],
+        ["Slice", "slice", "controllerslices"]
+        
+    ], 
+    
+    "ControllerSlicePrivilege": [
+        
+        
+        
+        
+        
+        ["Controller", "controller", "controllersliceprivileges"],
+        ["SlicePrivilege", "slice_privilege", "controllersliceprivileges"]
+        
+    ], 
+    
+    "ControllerUser": [
+        
+        
+        
+        
+        
+        ["User", "user", "controllerusers"],
+        ["Controller", "controller", "controllersusers"]
+        
+    ], 
+    
+    "DashboardView": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+        
+        
+        ["ControllerDashboardView", "controllerdashboardviews", "dashboardView"],
+        
+        
+        ["Controller", "controllers", "dashboardviews"],
+        ["Deployment", "deployments", "dashboardviews"]
+        
+    ], 
+    
+    "Deployment": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "DeploymentPrivilege": [
+        
+        
+        
+        
+        
+        ["User", "user", "deploymentprivileges"],
+        ["Deployment", "deployment", "deploymentprivileges"],
+        ["DeploymentRole", "role", "deploymentprivileges"]
+        
+    ], 
+    
+    "DeploymentRole": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "Flavor": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "Image": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+        
+        
+        ["ControllerImages", "controllerimages", "image"]
+        
+        
+        
+    ], 
+    
+    "ImageDeployments": [
+        
+        
+        
+        
+        
+        ["Image", "image", "imagedeployments"],
+        ["Deployment", "deployment", "imagedeployments"]
+        
+    ], 
+    
+    "Instance": [
+        
+        
+        
+        
+        
+        ["Image", "image", "instances"],
+        ["User", "creator", "instances"],
+        ["Slice", "slice", "instances"],
+        ["Deployment", "deployment", "instance_deployment"],
+        ["Node", "node", "instances"],
+        ["Flavor", "flavor", "instance"],
+        ["Instance", "parent", "instance"]
+        
+    ], 
+    
+    "Network": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+        
+        
+        ["ControllerNetwork", "controllernetworks", "network"],
+        
+        
+        ["NetworkTemplate", "template", "network"],
+        ["Slice", "owner", "ownedNetworks"],
+        ["Slice", "permitted_slices", "availableNetworks"]
+    ], 
+    
+    "NetworkParameter": [
+        
+        
+        
+        
+        
+        ["NetworkParameterType", "parameter", "networkparameters"]
+        
+    ], 
+    
+    "NetworkParameterType": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "NetworkSlice": [
+        
+        
+        
+        
+        
+        ["Network", "network", "networkslices"],
+        ["Slice", "slice", "networkslices"]
+        
+    ], 
+    
+    "NetworkTemplate": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "Node": [
+        
+        
+        
+        
+        
+        ["SiteDeployment", "site_deployment", "nodes"]
+        
+    ], 
+    
+    "NodeLabel": [
+        
+        
+        
+        
+        
+        ["Node", "node", "nodelabels"]
+        
+    ], 
+    
+    "Port": [
+        
+        
+        
+        
+        
+        ["Network", "network", "links"],
+        ["Instance", "instance", "ports"]
+        
+    ], 
+    
+    "Role": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+    ], 
+    
+    "Service": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "ServiceAttribute": [
+        
+        
+        
+        
+        
+        ["Service", "service", "serviceattributes"]
+        
+    ], 
+    
+    "ServiceDependency": [
+        
+        
+        
+        
+        
+        ["Service", "provider_service", "provided_dependencies"],
+        ["Service", "subscriber_service", "subscribed_dependencies"]
+        
+    ], 
+    
+    "ServiceMonitoringAgentInfo": [
+        
+        
+        
+        
+        
+        ["Service", "service", "servicemonitoringagents"]
+        
+    ], 
+    
+    "ServicePrivilege": [
+        
+        
+        
+        
+        
+        ["User", "user", "serviceprivileges"],
+        ["Service", "service", "serviceprivileges"],
+        ["ServiceRole", "role", "serviceprivileges"]
+        
+    ], 
+    
+    "ServiceRole": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "Site": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+        
+        
+        ["ControllerSite", "controllersite", "site"],
+        
+        
+        ["Deployment", "deployments", "sites"]
+        
+    ], 
+    
+    "SiteDeployment": [
+        
+        
+        
+        
+        
+        ["Site", "site", "sitedeployments"],
+        ["Deployment", "deployment", "sitedeployments"],
+        ["Controller", "controller", "sitedeployments"]
+        
+    ], 
+    
+    "SitePrivilege": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+        
+        
+        ["ControllerSitePrivilege", "controllersiteprivileges", "site_privilege"],
+        
+        
+        ["User", "user", "siteprivileges"],
+        ["Site", "site", "siteprivileges"],
+        ["SiteRole", "role", "siteprivileges"]
+        
+    ], 
+    
+    "SiteRole": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "Slice": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+        
+        
+        ["ControllerSlice", "controllerslices", "slice"],
+        
+        
+        ["Site", "site", "slices"],
+        ["Service", "service", "slices"],
+        ["User", "creator", "slices"],
+        ["Flavor", "default_flavor", "slices"],
+        ["Image", "default_image", "slices"],
+        ["Node", "default_node", "slices"]
+        
+    ], 
+    
+    "SlicePrivilege": [
+        
+        
+        
+        
+        
+         
+        
+        
+        
+        
+        
+        
+        
+        ["ControllerSlicePrivilege", "controllersliceprivileges", "slice_privilege"],
+        
+        
+        ["User", "user", "sliceprivileges"],
+        ["Slice", "slice", "sliceprivileges"],
+        ["SliceRole", "role", "sliceprivileges"]
+        
+    ], 
+    
+    "SliceRole": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "Tag": [
+        
+        
+        
+        
+        
+        ["Service", "service", "tags"]
+        
+    ], 
+    
+    "InterfaceType": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "ServiceInterface": [
+        
+        
+        
+        
+        
+        ["Service", "service", "service_interfaces"],
+        ["InterfaceType", "interface_type", "service_interfaces"]
+        
+    ], 
+    
+    "ServiceInstance": [
+        
+        
+        
+        
+        
+        ["Service", "owner", "service_instances"]
+        
+    ], 
+    
+    "ServiceInstanceLink": [
+        
+        
+        
+        
+        
+        ["ServiceInstance", "provider_service_instance", "provided_links"],
+        ["ServiceInterface", "provider_service_interface", "provided_links"],
+        ["ServiceInstance", "subscriber_service_instance", "subscribed_links"],
+        ["Service", "subscriber_service", "subscribed_links"],
+        ["Network", "subscriber_network", "subscribed_links"]
+        
+    ], 
+    
+    "ServiceInstanceAttribute": [
+        
+        
+        
+        
+        
+        ["ServiceInstance", "service_instance", "service_instance_attributes"]
+        
+    ], 
+    
+    "TenantWithContainer": [
+        
+        
+        
+        
+        
+        ["Service", "owner", "service_instances"],
+        ["Instance", "instance", "+"],
+        ["User", "creator", "+"]
+        
+    ], 
+    
+    "XOS": [
+        
+        
+        
+        
+        
+        
+    ], 
+    
+    "XOSGuiExtension": [
+        
+        
+        
+        
+        
+        
+    ]
+}
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_config.yaml b/lib/xos-synchronizer/xos-synchronizer-tests/test_config.yaml
new file mode 100644
index 0000000..f292fca
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_config.yaml
@@ -0,0 +1,37 @@
+---
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+name: test-synchronizer
+accessor:
+  username: xosadmin@opencord.org
+  password: "sample"
+  kind: testframework
+event_bus:
+  endpoint: "fake"
+  kind: kafka
+logging:
+  version: 1
+  handlers:
+    console:
+      class: logging.StreamHandler
+  loggers:
+    '':
+      handlers:
+          - console
+      level: DEBUG
+dependency_graph: "xos-synchronizer-tests/model-deps"
+steps_dir: "xos-synchronizer-tests/test_steps"
+pull_steps_dir: "xos-synchronizer-tests/test_pull_steps"
+event_steps_dir: "xos-synchronizer-tests/test_event_steps"
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_controller_dependencies.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_controller_dependencies.py
new file mode 100644
index 0000000..47d55f0
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_controller_dependencies.py
@@ -0,0 +1,244 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from mock import patch
+import mock
+import pdb
+import networkx as nx
+
+import os
+import sys
+
+test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
+xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
+
+
+class TestControllerDependencies(unittest.TestCase):
+
+    def setUp(self):
+        global mock_enumerator, event_loop
+
+        self.sys_path_save = sys.path
+        self.cwd_save = os.getcwd()
+
+        config = os.path.join(test_path, "test_config.yaml")
+        from xosconfig import Config
+
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        from xossynchronizer.mock_modelaccessor_build import (
+            build_mock_modelaccessor,
+        )
+
+        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
+
+        os.chdir(os.path.join(test_path, ".."))  # config references xos-synchronizer-tests/model-deps
+
+        import xossynchronizer.event_loop
+        reload(xossynchronizer.event_loop)
+        event_loop = xossynchronizer.event_loop
+
+        import xossynchronizer.backend
+        reload(xossynchronizer.backend)
+
+        from xossynchronizer.modelaccessor import model_accessor
+
+        from mock_modelaccessor import mock_enumerator
+
+        # import all class names to globals
+        for (k, v) in model_accessor.all_model_classes.items():
+            globals()[k] = v
+
+        b = xossynchronizer.backend.Backend(model_accessor=model_accessor)
+        steps_dir = Config.get("steps_dir")
+        self.steps = b.load_sync_step_modules(steps_dir)
+        self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
+
+    def tearDown(self):
+        sys.path = self.sys_path_save
+        os.chdir(self.cwd_save)
+
+    def test_multi_controller_path(self):
+        csl = ControllerSlice()
+        csi = ControllerSite()
+        site = Site()
+        slice = Slice()
+        slice.site = site
+        csl.slice = slice
+        csi.site = site
+        slice.controllerslices = mock_enumerator([csl])
+        site.controllersite = mock_enumerator([csi])
+
+        verdict, edge_type = self.synchronizer.concrete_path_exists(csl, csi)
+        self.assertTrue(verdict)
+
+        # TODO(smbaker): event_loop.PROXY_EDGE is set to the wrong thing
+        # self.assertEqual(edge_type, event_loop.PROXY_EDGE)
+
+    def test_controller_path_simple(self):
+        p = Instance()
+        s = Slice()
+        t = Site()
+        ct = ControllerSite()
+        p.slice = s
+        s.site = t
+        ct.site = t
+        t.controllersite = mock_enumerator([ct])
+        cohorts = self.synchronizer.compute_dependent_cohorts([p, ct], False)
+        self.assertEqual([ct, p], cohorts[0])
+        cohorts = self.synchronizer.compute_dependent_cohorts([ct, p], False)
+        self.assertEqual([ct, p], cohorts[0])
+
+    def test_controller_deletion_path(self):
+        p = Instance()
+        s = Slice()
+        t = Site()
+        ct = ControllerSite()
+        ct.site = t
+        p.slice = s
+        s.site = t
+
+        t.controllersite = mock_enumerator([ct])
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], False)
+        self.assertIn(t, cohorts[0])
+        self.assertIn(ct, cohorts[0])
+        self.assertIn(s, cohorts[0])
+        self.assertIn(p, cohorts[0])
+        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
+        #self.assertEqual([t, ct, s, p], cohorts[0])
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], True)
+        self.assertIn(t, cohorts[0])
+        self.assertIn(ct, cohorts[0])
+        self.assertIn(s, cohorts[0])
+        self.assertIn(p, cohorts[0])
+        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
+        #self.assertEqual([p, s, ct, t], cohorts[0])
+
+    def test_multi_controller_schedule(self):
+        csl = ControllerSlice()
+        csi = ControllerSite()
+        site = Site()
+        slice = Slice()
+        slice.site = site
+        csl.slice = slice
+        csi.site = site
+        slice.controllerslices = mock_enumerator([csl])
+        site.controllersite = mock_enumerator([csi])
+        i = Instance()
+        i.slice = slice
+
+        cohorts = self.synchronizer.compute_dependent_cohorts(
+            [i, slice, site, csl, csi], False
+        )
+        self.assertIn(site, cohorts[0])
+        self.assertIn(csi, cohorts[0])
+        self.assertIn(slice, cohorts[0])
+        self.assertIn(csl, cohorts[0])
+        self.assertIn(i, cohorts[0])
+
+        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
+        #self.assertEqual([site, csi, slice, csl, i], cohorts[0])
+
+    def test_multi_controller_path_negative(self):
+        csl = ControllerSlice()
+        csi = ControllerSite()
+        site = Site()
+        slice = Slice()
+        slice.site = site
+        csl.slice = slice
+        csi.site = site
+        slice.controllerslices = mock_enumerator([])
+        site.controllersite = mock_enumerator([])
+
+        verdict, edge_type = self.synchronizer.concrete_path_exists(csl, csi)
+        self.assertFalse(verdict)
+        self.assertEqual(edge_type, None)
+
+    def test_controller_path_simple_negative(self):
+        p = Instance()
+        s = Slice()
+        t = Site()
+        ct = ControllerSite()
+        p.slice = s
+        s.site = t
+        ct.site = t
+        t.controllersite = mock_enumerator([])
+        cohorts = self.synchronizer.compute_dependent_cohorts([p, ct], False)
+        self.assertIn([ct], cohorts)
+        self.assertIn([p], cohorts)
+
+    def test_controller_deletion_path_negative(self):
+        p = Instance()
+        s = Slice()
+        t = Site()
+        ct = ControllerSite()
+        s.site = t
+
+        t.controllersite = mock_enumerator([])
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], False)
+        self.assertIn([t, s], cohorts)
+        self.assertIn([p], cohorts)
+        self.assertIn([ct], cohorts)
+        cohorts = self.synchronizer.compute_dependent_cohorts([p, s, t, ct], True)
+        self.assertIn([s, t], cohorts)
+        self.assertIn([p], cohorts)
+        self.assertIn([ct], cohorts)
+
+    def DISABLED_test_multi_controller_deletion_schedule(self):
+        # TODO(smbaker): `csi` is undefined, test is broken as written.
+
+        csl = ControllerSlice()
+        cn = ControllerNetwork()
+        site = Site()
+        slice = Slice()
+        slice.site = site
+        slice.controllerslices = mock_enumerator([])
+        site.controllersite = mock_enumerator([])
+        i = Instance()
+        i.slice = slice
+
+        cohorts = self.synchronizer.compute_dependent_cohorts(
+            [i, slice, site, csl, csi], False
+        )
+        self.assertIn([site, slice, i], cohorts)
+        self.assertIn([csl], cohorts)
+        self.assertIn([csi], cohorts)
+
+    def test_multi_controller_schedule_negative(self):
+        csl = ControllerSlice()
+        csi = ControllerSite()
+        site = Site()
+        slice = Slice()
+        slice.site = site
+        slice.controllerslices = mock_enumerator([])
+        site.controllersite = mock_enumerator([])
+        i = Instance()
+        i.slice = slice
+
+        cohorts = self.synchronizer.compute_dependent_cohorts(
+            [i, slice, site, csl, csi], False
+        )
+        self.assertIn([site, slice, i], cohorts)
+        self.assertIn([csl], cohorts)
+        self.assertIn([csi], cohorts)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_diffs.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_diffs.py
new file mode 100644
index 0000000..9e09c0f
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_diffs.py
@@ -0,0 +1,110 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from mock import patch, call, Mock, PropertyMock
+import json
+
+import os
+import sys
+
+test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
+xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
+services_dir = os.path.join(xos_dir, "../../xos_services")
+
+class TestDiffs(unittest.TestCase):
+
+    """ These tests are for the mock modelaccessor, to make sure it behaves like the real one """
+
+    def setUp(self):
+
+        self.sys_path_save = sys.path
+        # Setting up the config module
+        from xosconfig import Config
+
+        config = os.path.join(test_path, "test_config.yaml")
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+        # END Setting up the config module
+
+        from xossynchronizer.mock_modelaccessor_build import (
+            build_mock_modelaccessor,
+        )
+
+        # FIXME this is to get jenkins to pass the tests, somehow it is running tests in a different order
+        # and apparently it is not overriding the generated model accessor
+        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir, [])
+        import xossynchronizer.modelaccessor
+
+        # import all class names to globals
+        for (
+            k,
+            v,
+        ) in (
+            xossynchronizer.modelaccessor.model_accessor.all_model_classes.items()
+        ):
+            globals()[k] = v
+
+        self.log = Mock()
+
+    def tearDown(self):
+        sys.path = self.sys_path_save
+
+    def test_new_diff(self):
+        site = Site(name="mysite")
+
+        self.assertEqual(site.is_new, True)
+        self.assertEqual(site._dict, {"name": "mysite"})
+        self.assertEqual(site.diff, {})
+        self.assertEqual(site.changed_fields, ["name"])
+        self.assertEqual(site.has_field_changed("name"), False)
+        self.assertEqual(site.has_field_changed("login_base"), False)
+
+        site.login_base = "bar"
+
+        self.assertEqual(site._dict, {"login_base": "bar", "name": "mysite"})
+        self.assertEqual(site.diff, {"login_base": (None, "bar")})
+        self.assertIn("name", site.changed_fields)
+        self.assertIn("login_base", site.changed_fields)
+        self.assertEqual(site.has_field_changed("name"), False)
+        self.assertEqual(site.has_field_changed("login_base"), True)
+        self.assertEqual(site.get_field_diff("login_base"), (None, "bar"))
+
+    def test_existing_diff(self):
+        site = Site(name="mysite", login_base="foo")
+
+        # this is what would happen after saving and re-loading
+        site.is_new = False
+        site.id = 1
+        site._initial = site._dict
+
+        self.assertEqual(site.is_new, False)
+        self.assertEqual(site._dict, {"id": 1, "name": "mysite", "login_base": "foo"})
+        self.assertEqual(site.diff, {})
+        self.assertEqual(site.changed_fields, [])
+        self.assertEqual(site.has_field_changed("name"), False)
+        self.assertEqual(site.has_field_changed("login_base"), False)
+
+        site.login_base = "bar"
+
+        self.assertEqual(site._dict, {"id": 1, "login_base": "bar", "name": "mysite"})
+        self.assertEqual(site.diff, {"login_base": ("foo", "bar")})
+        self.assertIn("login_base", site.changed_fields)
+        self.assertEqual(site.has_field_changed("name"), False)
+        self.assertEqual(site.has_field_changed("login_base"), True)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_event_engine.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_event_engine.py
new file mode 100644
index 0000000..bc1cb97
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_event_engine.py
@@ -0,0 +1,347 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import confluent_kafka
+import functools
+import unittest
+
+from mock import patch, PropertyMock, ANY
+
+import os
+import sys
+import time
+
+log = None
+
+test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
+xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
+
+print os.getcwd()
+
+def config_get_mock(orig, overrides, key):
+    if key in overrides:
+        return overrides[key]
+    else:
+        return orig(key)
+
+
+class FakeKafkaConsumer:
+    def __init__(self, values=[]):
+        self.values = values
+
+    def subscribe(self, topics):
+        pass
+
+    def poll(self, timeout=1.0):
+        if self.values:
+            return FakeKafkaMessage(self.values.pop())
+        # block forever
+        time.sleep(1000)
+
+
+class FakeKafkaMessage:
+    """ Works like Message in confluent_kafka
+        https://docs.confluent.io/current/clients/confluent-kafka-python/#message
+    """
+
+    def __init__(
+        self,
+        timestamp=None,
+        topic="faketopic",
+        key="fakekey",
+        value="fakevalue",
+        error=False,
+    ):
+
+        if timestamp is None:
+            self.fake_ts_type = confluent_kafka.TIMESTAMP_NOT_AVAILABLE
+            self.fake_timestamp = None
+        else:
+            self.fake_ts_type = confluent_kafka.TIMESTAMP_CREATE_TIME
+            self.fake_timestamp = timestamp
+
+        self.fake_topic = topic
+        self.fake_key = key
+        self.fake_value = value
+        self.fake_error = error
+
+    def error(self):
+        return self.fake_error
+
+    def timestamp(self):
+        return (self.fake_ts_type, self.fake_timestamp)
+
+    def topic(self):
+        return self.fake_topic
+
+    def key(self):
+        return self.fake_key
+
+    def value(self):
+        return self.fake_value
+
+
+class TestEventEngine(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+
+        global log
+
+        config = os.path.join(test_path, "test_config.yaml")
+        from xosconfig import Config
+
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        if not log:
+            from multistructlog import create_logger
+
+            log = create_logger(Config().get("logging"))
+
+    def setUp(self):
+        global XOSKafkaThread, Config, log
+
+        self.sys_path_save = sys.path
+        self.cwd_save = os.getcwd()
+
+        config = os.path.join(test_path, "test_config.yaml")
+        from xosconfig import Config
+
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        from xossynchronizer.mock_modelaccessor_build import (
+            build_mock_modelaccessor,
+        )
+
+        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
+
+        from xossynchronizer.modelaccessor import model_accessor
+
+        # The test config.yaml references files in `xos-synchronizer-tests/` so make sure we're in the parent
+        # directory of the test directory.
+        os.chdir(os.path.join(test_path, ".."))
+
+        from xossynchronizer.event_engine import XOSKafkaThread, XOSEventEngine
+
+        self.event_steps_dir = Config.get("event_steps_dir")
+        self.event_engine = XOSEventEngine(model_accessor=model_accessor, log=log)
+
+    def tearDown(self):
+        sys.path = self.sys_path_save
+        os.chdir(self.cwd_save)
+
+    def test_load_event_step_modules(self):
+        self.event_engine.load_event_step_modules(self.event_steps_dir)
+        self.assertEqual(len(self.event_engine.event_steps), 1)
+
+    def test_start(self):
+        self.event_engine.load_event_step_modules(self.event_steps_dir)
+
+        with patch.object(
+            XOSKafkaThread, "create_kafka_consumer"
+        ) as create_kafka_consumer, patch.object(
+            FakeKafkaConsumer, "subscribe"
+        ) as fake_subscribe, patch.object(
+            self.event_engine.event_steps[0], "process_event"
+        ) as process_event:
+
+            create_kafka_consumer.return_value = FakeKafkaConsumer(
+                values=["sampleevent"]
+            )
+            self.event_engine.start()
+
+            self.assertEqual(len(self.event_engine.threads), 1)
+
+            # Since event_engine.start() launches threads, give them a hundred milliseconds to do something...
+            time.sleep(0.1)
+
+            # We should have subscribed to the fake consumer
+            fake_subscribe.assert_called_once()
+
+            # The fake consumer will have returned one event
+            process_event.assert_called_once()
+
+    def test_start_with_pattern(self):
+        self.event_engine.load_event_step_modules(self.event_steps_dir)
+
+        with patch.object(
+            XOSKafkaThread, "create_kafka_consumer"
+        ) as create_kafka_consumer, patch.object(
+            FakeKafkaConsumer, "subscribe"
+        ) as fake_subscribe, patch.object(
+            self.event_engine.event_steps[0], "process_event"
+        ) as process_event, patch.object(
+            self.event_engine.event_steps[0], "pattern", new_callable=PropertyMock
+        ) as pattern, patch.object(
+            self.event_engine.event_steps[0], "topics", new_callable=PropertyMock
+        ) as topics:
+
+            pattern.return_value = "somepattern"
+            topics.return_value = []
+
+            create_kafka_consumer.return_value = FakeKafkaConsumer(
+                values=["sampleevent"]
+            )
+            self.event_engine.start()
+
+            self.assertEqual(len(self.event_engine.threads), 1)
+
+            # Since event_engine.start() launches threads, give them a hundred milliseconds to do something...
+            time.sleep(0.1)
+
+            # We should have subscribed to the fake consumer
+            fake_subscribe.assert_called_with("somepattern")
+
+            # The fake consumer will have returned one event
+            process_event.assert_called_once()
+
+    def test_start_bad_tech(self):
+        """ Set an unknown Technology in the event_step. XOSEventEngine.start() should print an error message and
+            not create any threads.
+        """
+
+        self.event_engine.load_event_step_modules(self.event_steps_dir)
+
+        with patch.object(
+            XOSKafkaThread, "create_kafka_consumer"
+        ) as create_kafka_consumer, patch.object(
+            log, "error"
+        ) as log_error, patch.object(
+            self.event_engine.event_steps[0], "technology"
+        ) as technology:
+            technology.return_value = "not_kafka"
+            create_kafka_consumer.return_value = FakeKafkaConsumer()
+            self.event_engine.start()
+
+            self.assertEqual(len(self.event_engine.threads), 0)
+
+            log_error.assert_called_with(
+                "Unknown technology. Skipping step",
+                step="TestEventStep",
+                technology=ANY,
+            )
+
+    def test_start_bad_no_topics(self):
+        """ Set no topics in the event_step. XOSEventEngine.start() will launch a thread, but the thread will fail
+            with an exception before calling subscribe.
+        """
+
+        self.event_engine.load_event_step_modules(self.event_steps_dir)
+
+        with patch.object(
+            XOSKafkaThread, "create_kafka_consumer"
+        ) as create_kafka_consumer, patch.object(
+            FakeKafkaConsumer, "subscribe"
+        ) as fake_subscribe, patch.object(
+            self.event_engine.event_steps[0], "topics", new_callable=PropertyMock
+        ) as topics:
+            topics.return_value = []
+            create_kafka_consumer.return_value = FakeKafkaConsumer()
+            self.event_engine.start()
+
+            # the thread does get launched, but it will fail with an exception
+            self.assertEqual(len(self.event_engine.threads), 1)
+
+            time.sleep(0.1)
+
+            fake_subscribe.assert_not_called()
+
+    def test_start_bad_topics_and_pattern(self):
+        """ Set no topics in the event_step. XOSEventEngine.start() will launch a thread, but the thread will fail
+            with an exception before calling subscribe.
+        """
+
+        self.event_engine.load_event_step_modules(self.event_steps_dir)
+
+        with patch.object(
+            XOSKafkaThread, "create_kafka_consumer"
+        ) as create_kafka_consumer, patch.object(
+            FakeKafkaConsumer, "subscribe"
+        ) as fake_subscribe, patch.object(
+            self.event_engine.event_steps[0], "pattern", new_callable=PropertyMock
+        ) as pattern:
+            pattern.return_value = "foo"
+            create_kafka_consumer.return_value = FakeKafkaConsumer()
+            self.event_engine.start()
+
+            # the thread does get launched, but it will fail with an exception
+            self.assertEqual(len(self.event_engine.threads), 1)
+
+            time.sleep(0.1)
+
+            fake_subscribe.assert_not_called()
+
+    def test_start_config_no_eventbus_kind(self):
+        """ Set a blank event_bus.kind in Config. XOSEventEngine.start() should print an error message and
+            not create any threads.
+        """
+
+        self.event_engine.load_event_step_modules(self.event_steps_dir)
+
+        config_get_orig = Config.get
+        with patch.object(
+            XOSKafkaThread, "create_kafka_consumer"
+        ) as create_kafka_consumer, patch.object(
+            log, "error"
+        ) as log_error, patch.object(
+            Config,
+            "get",
+            new=functools.partial(
+                config_get_mock, config_get_orig, {"event_bus.kind": None}
+            ),
+        ):
+
+            create_kafka_consumer.return_value = FakeKafkaConsumer()
+            self.event_engine.start()
+
+            self.assertEqual(len(self.event_engine.threads), 0)
+
+            log_error.assert_called_with(
+                "Eventbus kind is not configured in synchronizer config file."
+            )
+
+    def test_start_config_bad_eventbus_kind(self):
+        """ Set an unknown event_bus.kind in Config. XOSEventEngine.start() should print an error message and
+            not create any threads.
+        """
+
+        self.event_engine.load_event_step_modules(self.event_steps_dir)
+
+        config_get_orig = Config.get
+        with patch.object(
+            XOSKafkaThread, "create_kafka_consumer"
+        ) as create_kafka_consumer, patch.object(
+            log, "error"
+        ) as log_error, patch.object(
+            Config,
+            "get",
+            new=functools.partial(
+                config_get_mock, config_get_orig, {"event_bus.kind": "not_kafka"}
+            ),
+        ):
+            create_kafka_consumer.return_value = FakeKafkaConsumer()
+            self.event_engine.start()
+
+            self.assertEqual(len(self.event_engine.threads), 0)
+
+            log_error.assert_called_with(
+                "Eventbus kind is set to a technology we do not implement.",
+                eventbus_kind="not_kafka",
+            )
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_event_steps/event_step.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_event_steps/event_step.py
new file mode 100644
index 0000000..372d6d3
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_event_steps/event_step.py
@@ -0,0 +1,29 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+from xossynchronizer.event_steps.eventstep import EventStep
+from mock_modelaccessor import *
+
+
+class TestEventStep(EventStep):
+    technology = "kafka"
+    topics = ["sometopic"]
+    pattern = None
+
+    def __init__(self, model_accessor, log, *args, **kwargs):
+        super(TestEventStep, self).__init__(model_accessor, log, *args, **kwargs)
+
+    def process_event(self, event):
+        print("received an event", event)
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_load.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_load.py
new file mode 100644
index 0000000..e2bbbb0
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_load.py
@@ -0,0 +1,119 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from mock import patch
+import mock
+import pdb
+import networkx as nx
+
+import os
+import sys
+
+test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
+xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
+
+class TestScheduling(unittest.TestCase):
+    def setUp(self):
+        self.sys_path_save = sys.path
+        self.cwd_save = os.getcwd()
+
+        config = os.path.join(test_path, "test_config.yaml")
+        from xosconfig import Config
+
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        from xossynchronizer.mock_modelaccessor_build import (
+            build_mock_modelaccessor,
+        )
+
+        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
+
+        # The test config.yaml references files in `xos-synchronizer-tests/` so make sure we're in the parent
+        # directory of the test directory.
+        os.chdir(os.path.join(test_path, ".."))
+
+        import xossynchronizer.event_loop
+        reload(xossynchronizer.event_loop)
+
+        import xossynchronizer.backend
+        reload(xossynchronizer.backend)
+
+        from xossynchronizer.modelaccessor import model_accessor
+
+        b = xossynchronizer.backend.Backend(model_accessor=model_accessor)
+        steps_dir = Config.get("steps_dir")
+        self.steps = b.load_sync_step_modules(steps_dir)
+        self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
+
+    def tearDown(self):
+        sys.path = self.sys_path_save
+        os.chdir(self.cwd_save)
+
+    def test_load_steps(self):
+        step_names = [s.__name__ for s in self.steps]
+        self.assertIn("SyncControllerSlices", step_names)
+
+    def test_load_deps(self):
+        self.synchronizer.load_dependency_graph()
+        graph = self.synchronizer.model_dependency_graph
+        self.assertTrue(graph[False].has_edge("Instance", "Slice"))
+        self.assertTrue(graph[True].has_edge("Slice", "Instance"))
+        self.assertTrue(graph[False].has_edge("Slice", "ControllerSlice"))
+        self.assertTrue(graph[True].has_edge("ControllerSlice", "Slice"))
+
+    def test_load_dep_accessors(self):
+        self.synchronizer.load_dependency_graph()
+        graph = self.synchronizer.model_dependency_graph
+        self.assertDictContainsSubset(
+            {"src_accessor": "controllerslices"},
+            graph[False]["Slice"]["ControllerSlice"],
+        )
+        self.assertDictContainsSubset(
+            {"src_accessor": "slice", "dst_accessor": "controllerslices"},
+            graph[True]["Slice"]["ControllerSlice"],
+        )
+
+    def test_load_sync_steps(self):
+        self.synchronizer.load_sync_steps()
+        model_to_step = self.synchronizer.model_to_step
+        step_lookup = self.synchronizer.step_lookup
+        self.assertIn(
+            ("ControllerSlice", ["SyncControllerSlices"]), model_to_step.items()
+        )
+        self.assertIn(
+            ("Port", ["SyncPort"]), model_to_step.items()
+        )
+        self.assertIn(("SiteRole", ["SyncRoles"]), model_to_step.items())
+
+        for k, v in model_to_step.items():
+            val = v[0]
+            observes = step_lookup[val].observes
+            if not isinstance(observes, list):
+                observes = [observes]
+
+            observed_names = []
+            for o in observes:
+                if isinstance(o,str):
+                    observed_names.append(o)
+                else:
+                    observed_names.append(o.__name__)
+
+            self.assertIn(k, observed_names)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_model_policy_tenantwithcontainer.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_model_policy_tenantwithcontainer.py
new file mode 100644
index 0000000..e2659c3
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_model_policy_tenantwithcontainer.py
@@ -0,0 +1,286 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import unittest
+from mock import patch
+import mock
+import pdb
+
+import os
+import sys
+from xosconfig import Config
+
+test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
+xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
+
+class TestModelPolicyTenantWithContainer(unittest.TestCase):
+    def setUp(self):
+        global TenantWithContainerPolicy, LeastLoadedNodeScheduler, MockObjectList
+
+        self.sys_path_save = sys.path
+        self.cwd_save = os.getcwd()
+
+        config = basic_conf = os.path.abspath(
+            os.path.dirname(os.path.realpath(__file__)) + "/test_config.yaml"
+        )
+        Config.clear()  # in case left unclean by a previous test case
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        from xossynchronizer.mock_modelaccessor_build import (
+            build_mock_modelaccessor,
+        )
+
+        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
+
+        import xossynchronizer.model_policies.model_policy_tenantwithcontainer
+        from xossynchronizer.model_policies.model_policy_tenantwithcontainer import (
+            TenantWithContainerPolicy,
+            LeastLoadedNodeScheduler,
+        )
+
+        from mock_modelaccessor import MockObjectList
+
+        # import all class names to globals
+        for (
+            k,
+            v,
+        ) in xossynchronizer.model_policies.model_policy_tenantwithcontainer.model_accessor.all_model_classes.items():
+            globals()[k] = v
+
+        from xossynchronizer.modelaccessor import model_accessor
+
+        self.policy = TenantWithContainerPolicy(model_accessor=model_accessor)
+        self.user = User(email="testadmin@test.org")
+        self.tenant = TenantWithContainer(creator=self.user)
+        self.flavor = Flavor(name="m1.small")
+
+    def tearDown(self):
+        Config.clear()
+        sys.path = self.sys_path_save
+        os.chdir(self.cwd_save)
+
+    def test_manage_container_no_slices(self):
+        with patch.object(TenantWithContainer, "owner") as owner:
+            owner.slices.count.return_value = 0
+            with self.assertRaises(Exception) as e:
+                self.policy.manage_container(self.tenant)
+            self.assertEqual(e.exception.message, "The service has no slices")
+
+    def test_manage_container(self):
+        with patch.object(TenantWithContainer, "owner") as owner, patch.object(
+            TenantWithContainer, "save"
+        ) as tenant_save, patch.object(
+            Node, "site_deployment"
+        ) as site_deployment, patch.object(
+            Instance, "save"
+        ) as instance_save, patch.object(
+            Instance, "delete"
+        ) as instance_delete, patch.object(
+            TenantWithContainerPolicy, "get_image"
+        ) as get_image, patch.object(
+            LeastLoadedNodeScheduler, "pick"
+        ) as pick:
+            # setup mocks
+            node = Node(hostname="my.node.com")
+            slice = Slice(
+                name="mysite_test1", default_flavor=self.flavor, default_isolation="vm"
+            )
+            image = Image(name="trusty-server-multi-nic")
+            deployment = Deployment(name="testdeployment")
+            owner.slices.count.return_value = 1
+            owner.slices.all.return_value = [slice]
+            owner.slices.first.return_value = slice
+            get_image.return_value = image
+            pick.return_value = (node, None)
+            site_deployment.deployment = deployment
+            # done setup mocks
+
+            # call manage_container
+            self.policy.manage_container(self.tenant)
+
+            # make sure manage_container did what it is supposed to do
+            self.assertNotEqual(self.tenant.instance, None)
+            self.assertEqual(self.tenant.instance.creator.email, "testadmin@test.org")
+            self.assertEqual(self.tenant.instance.image.name, "trusty-server-multi-nic")
+            self.assertEqual(self.tenant.instance.flavor.name, "m1.small")
+            self.assertEqual(self.tenant.instance.isolation, "vm")
+            self.assertEqual(self.tenant.instance.node.hostname, "my.node.com")
+            self.assertEqual(self.tenant.instance.slice.name, "mysite_test1")
+            self.assertEqual(self.tenant.instance.parent, None)
+            instance_save.assert_called()
+            instance_delete.assert_not_called()
+            tenant_save.assert_called()
+
+    def test_manage_container_delete(self):
+        self.tenant.deleted = True
+
+        # call manage_container
+        self.policy.manage_container(self.tenant)
+
+        # make sure manage_container did what it is supposed to do
+        self.assertEqual(self.tenant.instance, None)
+
+    def test_manage_container_no_m1_small(self):
+        with patch.object(TenantWithContainer, "owner") as owner, patch.object(
+            Node, "site_deployment"
+        ) as site_deployment, patch.object(
+            Flavor, "objects"
+        ) as flavor_objects, patch.object(
+            TenantWithContainerPolicy, "get_image"
+        ) as get_image, patch.object(
+            LeastLoadedNodeScheduler, "pick"
+        ) as pick:
+            # setup mocks
+            node = Node(hostname="my.node.com")
+            slice = Slice(
+                name="mysite_test1", default_flavor=None, default_isolation="vm"
+            )
+            image = Image(name="trusty-server-multi-nic")
+            deployment = Deployment(name="testdeployment")
+            owner.slices.count.return_value = 1
+            owner.slices.all.return_value = [slice]
+            owner.slices.first.return_value = slice
+            get_image.return_value = image
+            pick.return_value = (node, None)
+            site_deployment.deployment = deployment
+            flavor_objects.filter.return_value = []
+            # done setup mocks
+
+            with self.assertRaises(Exception) as e:
+                self.policy.manage_container(self.tenant)
+            self.assertEqual(e.exception.message, "No m1.small flavor")
+
+    def test_least_loaded_node_scheduler(self):
+        with patch.object(Node.objects, "get_items") as node_objects:
+            slice = Slice(
+                name="mysite_test1", default_flavor=None, default_isolation="vm"
+            )
+            node = Node(hostname="my.node.com", id=4567)
+            node.instances = MockObjectList(initial=[])
+            node_objects.return_value = [node]
+
+            sched = LeastLoadedNodeScheduler(slice)
+            (picked_node, parent) = sched.pick()
+
+            self.assertNotEqual(picked_node, None)
+            self.assertEqual(picked_node.id, node.id)
+
+    def test_least_loaded_node_scheduler_two_nodes(self):
+        with patch.object(Node.objects, "get_items") as node_objects:
+            slice = Slice(
+                name="mysite_test1", default_flavor=None, default_isolation="vm"
+            )
+            instance1 = Instance(id=1)
+            node1 = Node(hostname="my.node.com", id=4567)
+            node1.instances = MockObjectList(initial=[])
+            node2 = Node(hostname="my.node.com", id=8910)
+            node2.instances = MockObjectList(initial=[instance1])
+            node_objects.return_value = [node1, node2]
+
+            # should pick the node with the fewest instance (node1)
+
+            sched = LeastLoadedNodeScheduler(slice)
+            (picked_node, parent) = sched.pick()
+
+            self.assertNotEqual(picked_node, None)
+            self.assertEqual(picked_node.id, node1.id)
+
+    def test_least_loaded_node_scheduler_two_nodes_multi(self):
+        with patch.object(Node.objects, "get_items") as node_objects:
+            slice = Slice(
+                name="mysite_test1", default_flavor=None, default_isolation="vm"
+            )
+            instance1 = Instance(id=1)
+            instance2 = Instance(id=2)
+            instance3 = Instance(id=3)
+            node1 = Node(hostname="my.node.com", id=4567)
+            node1.instances = MockObjectList(initial=[instance2, instance3])
+            node2 = Node(hostname="my.node.com", id=8910)
+            node2.instances = MockObjectList(initial=[instance1])
+            node_objects.return_value = [node1, node2]
+
+            # should pick the node with the fewest instance (node2)
+
+            sched = LeastLoadedNodeScheduler(slice)
+            (picked_node, parent) = sched.pick()
+
+            self.assertNotEqual(picked_node, None)
+            self.assertEqual(picked_node.id, node2.id)
+
+    def test_least_loaded_node_scheduler_with_label(self):
+        with patch.object(Node.objects, "get_items") as node_objects:
+            slice = Slice(
+                name="mysite_test1", default_flavor=None, default_isolation="vm"
+            )
+            instance1 = Instance(id=1)
+            node1 = Node(hostname="my.node.com", id=4567)
+            node1.instances = MockObjectList(initial=[])
+            node2 = Node(hostname="my.node.com", id=8910)
+            node2.instances = MockObjectList(initial=[instance1])
+            # Fake out the existence of a NodeLabel object. TODO: Extend the mock framework to support the model__field
+            # syntax.
+            node1.nodelabels__name = None
+            node2.nodelabels__name = "foo"
+            node_objects.return_value = [node1, node2]
+
+            # should pick the node with the label, even if it has a greater number of instances
+
+            sched = LeastLoadedNodeScheduler(slice, label="foo")
+            (picked_node, parent) = sched.pick()
+
+            self.assertNotEqual(picked_node, None)
+            self.assertEqual(picked_node.id, node2.id)
+
+    def test_least_loaded_node_scheduler_create_label(self):
+        with patch.object(Node.objects, "get_items") as node_objects, patch.object(
+            NodeLabel, "save", autospec=True
+        ) as nodelabel_save, patch.object(NodeLabel, "node") as nodelabel_node_add:
+            slice = Slice(
+                name="mysite_test1", default_flavor=None, default_isolation="vm"
+            )
+            instance1 = Instance(id=1)
+            node1 = Node(hostname="my.node.com", id=4567)
+            node1.instances = MockObjectList(initial=[])
+            node2 = Node(hostname="my.node.com", id=8910)
+            node2.instances = MockObjectList(initial=[instance1])
+            # Fake out the existence of a NodeLabel object. TODO: Extend the mock framework to support the model__field
+            # syntax.
+            node1.nodelabels__name = None
+            node2.nodelabels__name = None
+            node_objects.return_value = [node1, node2]
+
+            # should pick the node with the least number of instances
+
+            sched = LeastLoadedNodeScheduler(
+                slice, label="foo", constrain_by_service_instance=True
+            )
+            (picked_node, parent) = sched.pick()
+
+            self.assertNotEqual(picked_node, None)
+            self.assertEqual(picked_node.id, node1.id)
+
+            # NodeLabel should have been created and saved
+
+            self.assertEqual(nodelabel_save.call_count, 1)
+            self.assertEqual(nodelabel_save.call_args[0][0].name, "foo")
+
+            # The NodeLabel's node field should have been added to
+
+            NodeLabel.node.add.assert_called_with(node1)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_payload.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_payload.py
new file mode 100644
index 0000000..ab861c8
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_payload.py
@@ -0,0 +1,342 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import unittest
+from mock import patch
+import mock
+import pdb
+import networkx as nx
+
+import os
+import sys
+
+test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
+xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
+
+ANSIBLE_FILE = "/tmp/payload_test"
+
+log = None
+
+
+def run_fake_ansible_template(*args, **kwargs):
+    opts = args[1]
+    open(ANSIBLE_FILE, "w").write(json.dumps(opts))
+    return [{"rc": 0}]
+
+
+def run_fake_ansible_template_fail(*args, **kwargs):
+    opts = args[1]
+    open(ANSIBLE_FILE, "w").write(json.dumps(opts))
+    return [{"rc": 1}]
+
+
+def get_ansible_output():
+    ansible_str = open(ANSIBLE_FILE).read()
+    return json.loads(ansible_str)
+
+
+class TestPayload(unittest.TestCase):
+    @classmethod
+    def setUpClass(cls):
+
+        global log
+
+        config = os.path.join(test_path, "test_config.yaml")
+        from xosconfig import Config
+
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        if not log:
+            from multistructlog import create_logger
+
+            log = create_logger(Config().get("logging"))
+
+    def setUp(self):
+
+        global log, test_steps, event_loop
+
+        self.sys_path_save = sys.path
+        self.cwd_save = os.getcwd()
+
+        config = os.path.join(test_path, "test_config.yaml")
+        from xosconfig import Config
+
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        from xossynchronizer.mock_modelaccessor_build import (
+            build_mock_modelaccessor,
+        )
+
+        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
+
+        os.chdir(os.path.join(test_path, ".."))  # config references xos-synchronizer-tests/model-deps
+
+        import xossynchronizer.event_loop
+
+        reload(xossynchronizer.event_loop)
+        import xossynchronizer.backend
+
+        reload(xossynchronizer.backend)
+        import test_steps.sync_instances
+        import test_steps.sync_controller_slices
+        from xossynchronizer.modelaccessor import model_accessor
+
+        # import all class names to globals
+        for (k, v) in model_accessor.all_model_classes.items():
+            globals()[k] = v
+        b = xossynchronizer.backend.Backend(model_accessor = model_accessor)
+        steps_dir = Config.get("steps_dir")
+        self.steps = b.load_sync_step_modules(steps_dir)
+        self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
+
+    def tearDown(self):
+        sys.path = self.sys_path_save
+        os.chdir(self.cwd_save)
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template,
+    )
+    def test_delete_record(self, mock_run_template):
+        with mock.patch.object(Instance, "save") as instance_save:
+            o = Instance()
+            o.name = "Sisi Pascal"
+
+            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor = self.synchronizer.model_accessor)
+            self.synchronizer.delete_record(o, log)
+
+            a = get_ansible_output()
+            self.assertDictContainsSubset({"delete": True, "name": o.name}, a)
+            o.save.assert_called_with(update_fields=["backend_need_reap"])
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template_fail,
+    )
+    def test_delete_record_fail(self, mock_run_template):
+        with mock.patch.object(Instance, "save") as instance_save:
+            o = Instance()
+            o.name = "Sisi Pascal"
+
+            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor = self.synchronizer.model_accessor)
+
+            with self.assertRaises(Exception) as e:
+                self.synchronizer.delete_record(o, log)
+
+            self.assertEqual(
+                e.exception.message, "Nonzero rc from Ansible during delete_record"
+            )
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template,
+    )
+    def test_sync_record(self, mock_run_template):
+        with mock.patch.object(Instance, "save") as instance_save:
+            o = Instance()
+            o.name = "Sisi Pascal"
+
+            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor = self.synchronizer.model_accessor)
+            self.synchronizer.sync_record(o, log)
+
+            a = get_ansible_output()
+            self.assertDictContainsSubset({"delete": False, "name": o.name}, a)
+            o.save.assert_called_with(
+                update_fields=[
+                    "enacted",
+                    "backend_status",
+                    "backend_register",
+                    "backend_code",
+                ]
+            )
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template,
+    )
+    def test_sync_cohort(self, mock_run_template):
+        with mock.patch.object(Instance, "save") as instance_save, mock.patch.object(
+            ControllerSlice, "save"
+        ) as controllerslice_save:
+            cs = ControllerSlice()
+            s = Slice(name="SP SP")
+            cs.slice = s
+
+            o = Instance()
+            o.name = "Sisi Pascal"
+            o.slice = s
+
+            cohort = [cs, o]
+            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor = self.synchronizer.model_accessor)
+            cs.synchronizer_step = test_steps.sync_controller_slices.SyncControllerSlices(
+                model_accessor = self.synchronizer.model_accessor
+            )
+
+            self.synchronizer.sync_cohort(cohort, False)
+
+            a = get_ansible_output()
+            self.assertDictContainsSubset({"delete": False, "name": o.name}, a)
+            o.save.assert_called_with(
+                update_fields=[
+                    "enacted",
+                    "backend_status",
+                    "backend_register",
+                    "backend_code",
+                ]
+            )
+            cs.save.assert_called_with(
+                update_fields=[
+                    "enacted",
+                    "backend_status",
+                    "backend_register",
+                    "backend_code",
+                ]
+            )
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template,
+    )
+    def test_deferred_exception(self, mock_run_template):
+        with mock.patch.object(Instance, "save") as instance_save:
+            cs = ControllerSlice()
+            s = Slice(name="SP SP")
+            cs.slice = s
+            cs.force_defer = True
+
+            o = Instance()
+            o.name = "Sisi Pascal"
+            o.slice = s
+
+            cohort = [cs, o]
+            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor=self.synchronizer.model_accessor)
+            cs.synchronizer_step = test_steps.sync_controller_slices.SyncControllerSlices(
+                model_accessor=self.synchronizer.model_accessor
+            )
+
+            self.synchronizer.sync_cohort(cohort, False)
+            o.save.assert_called_with(
+                always_update_timestamp=True,
+                update_fields=["backend_status", "backend_register"],
+            )
+            self.assertEqual(cs.backend_code, 0)
+
+            self.assertIn("Force", cs.backend_status)
+            self.assertIn("Failed due to", o.backend_status)
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template,
+    )
+    def test_backend_status(self, mock_run_template):
+        with mock.patch.object(Instance, "save") as instance_save:
+            cs = ControllerSlice()
+            s = Slice(name="SP SP")
+            cs.slice = s
+            cs.force_fail = True
+
+            o = Instance()
+            o.name = "Sisi Pascal"
+            o.slice = s
+
+            cohort = [cs, o]
+            o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor=self.synchronizer.model_accessor)
+            cs.synchronizer_step = test_steps.sync_controller_slices.SyncControllerSlices(
+                model_accessor=self.synchronizer.model_accessor)
+
+            self.synchronizer.sync_cohort(cohort, False)
+            o.save.assert_called_with(
+                always_update_timestamp=True,
+                update_fields=["backend_status", "backend_register"],
+            )
+            self.assertIn("Force", cs.backend_status)
+            self.assertIn("Failed due to", o.backend_status)
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template,
+    )
+    def test_fetch_pending(self, mock_run_template):
+        pending_objects, pending_steps = self.synchronizer.fetch_pending()
+        pending_objects2 = list(pending_objects)
+
+        any_cs = next(
+            obj for obj in pending_objects if obj.leaf_model_name == "ControllerSlice"
+        )
+        any_instance = next(
+            obj for obj in pending_objects2 if obj.leaf_model_name == "Instance"
+        )
+
+        slice = Slice()
+        any_instance.slice = slice
+        any_cs.slice = slice
+
+        self.synchronizer.external_dependencies = []
+        cohorts = self.synchronizer.compute_dependent_cohorts(pending_objects, False)
+        flat_objects = [item for cohort in cohorts for item in cohort]
+
+        self.assertEqual(set(flat_objects), set(pending_objects))
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template,
+    )
+    def test_fetch_pending_with_external_dependencies(
+        self, mock_run_template,
+    ):
+        pending_objects, pending_steps = self.synchronizer.fetch_pending()
+        pending_objects2 = list(pending_objects)
+
+        any_cn = next(
+            obj for obj in pending_objects if obj.leaf_model_name == "ControllerNetwork"
+        )
+        any_user = next(
+            obj for obj in pending_objects2 if obj.leaf_model_name == "User"
+        )
+
+        cohorts = self.synchronizer.compute_dependent_cohorts(pending_objects, False)
+
+        flat_objects = [item for cohort in cohorts for item in cohort]
+        self.assertEqual(set(flat_objects), set(pending_objects))
+
+        # These cannot be None, but for documentation purposes
+        self.assertIsNotNone(any_cn)
+        self.assertIsNotNone(any_user)
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template,
+    )
+    def test_external_dependency_exception(self, mock_run_template):
+        cs = ControllerSlice()
+        s = Slice(name="SP SP")
+        cs.slice = s
+
+        o = Instance()
+        o.name = "Sisi Pascal"
+        o.slice = s
+
+        cohort = [cs, o]
+        o.synchronizer_step = None
+        o.synchronizer_step = test_steps.sync_instances.SyncInstances(model_accessor=self.synchronizer.model_accessor)
+
+        self.synchronizer.sync_cohort(cohort, False)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_run.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_run.py
new file mode 100644
index 0000000..fdbff3c
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_run.py
@@ -0,0 +1,118 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import unittest
+import mock
+
+import os
+import sys
+
+test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
+xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
+
+ANSIBLE_FILE = "/tmp/payload_test"
+
+
+def run_fake_ansible_template(*args, **kwargs):
+    opts = args[1]
+    open(ANSIBLE_FILE, "w").write(json.dumps(opts))
+
+
+def get_ansible_output():
+    ansible_str = open(ANSIBLE_FILE).read()
+    return json.loads(ansible_str)
+
+
+class TestRun(unittest.TestCase):
+    def setUp(self):
+        self.sys_path_save = sys.path
+        self.cwd_save = os.getcwd()
+
+        config = os.path.join(test_path, "test_config.yaml")
+        from xosconfig import Config
+
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        from xossynchronizer.mock_modelaccessor_build import (
+            build_mock_modelaccessor,
+        )
+
+        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
+
+        os.chdir(os.path.join(test_path, ".."))  # config references xos-synchronizer-tests/model-deps
+
+        import xossynchronizer.event_loop
+
+        reload(xossynchronizer.event_loop)
+        import xossynchronizer.backend
+
+        reload(xossynchronizer.backend)
+        from xossynchronizer.modelaccessor import model_accessor
+
+        # import all class names to globals
+        for (k, v) in model_accessor.all_model_classes.items():
+            globals()[k] = v
+
+        from xossynchronizer.modelaccessor import model_accessor
+
+        b = xossynchronizer.backend.Backend(model_accessor=model_accessor)
+        steps_dir = Config.get("steps_dir")
+        self.steps = b.load_sync_step_modules(steps_dir)
+        self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
+        try:
+            os.remove("/tmp/sync_ports")
+        except OSError:
+            pass
+        try:
+            os.remove("/tmp/delete_ports")
+        except OSError:
+            pass
+
+    def tearDown(self):
+        sys.path = self.sys_path_save
+        os.chdir(self.cwd_save)
+
+    @mock.patch(
+        "test_steps.sync_instances.ansiblesyncstep.run_template",
+        side_effect=run_fake_ansible_template,
+    )
+    def test_run_once(self, mock_run_template):
+        pending_objects, pending_steps = self.synchronizer.fetch_pending()
+        pending_objects2 = list(pending_objects)
+
+        any_cs = next(
+            obj for obj in pending_objects if obj.leaf_model_name == "ControllerSlice"
+        )
+        any_instance = next(
+            obj for obj in pending_objects2 if obj.leaf_model_name == "Instance"
+        )
+
+        slice = Slice()
+        any_instance.slice = slice
+        any_cs.slice = slice
+
+        self.synchronizer.run_once()
+
+        sync_ports = open("/tmp/sync_ports").read()
+        delete_ports = open("/tmp/delete_ports").read()
+
+        self.assertIn("successful", sync_ports)
+        self.assertIn("successful", delete_ports)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_scheduler.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_scheduler.py
new file mode 100644
index 0000000..0164c5a
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_scheduler.py
@@ -0,0 +1,273 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from mock import patch
+import mock
+import pdb
+import networkx as nx
+
+import os
+import sys
+
+test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
+xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
+
+class TestScheduling(unittest.TestCase):
+
+    def setUp(self):
+        global mock_enumerator, event_loop
+
+        self.sys_path_save = sys.path
+        self.cwd_save = os.getcwd()
+
+        config = os.path.join(test_path, "test_config.yaml")
+        from xosconfig import Config
+
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        from xossynchronizer.mock_modelaccessor_build import (
+            build_mock_modelaccessor,
+        )
+
+        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
+
+        os.chdir(os.path.join(test_path, ".."))  # config references xos-synchronizer-tests/model-deps
+
+        import xossynchronizer.event_loop
+        event_loop = xossynchronizer.event_loop
+
+        reload(xossynchronizer.event_loop)
+        import xossynchronizer.backend
+
+        reload(xossynchronizer.backend)
+        from xossynchronizer.modelaccessor import model_accessor
+        from mock_modelaccessor import mock_enumerator
+
+        # import all class names to globals
+        for (k, v) in model_accessor.all_model_classes.items():
+            globals()[k] = v
+
+        b = xossynchronizer.backend.Backend(model_accessor=model_accessor)
+        steps_dir = Config.get("steps_dir")
+        self.steps = b.load_sync_step_modules(steps_dir)
+        self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
+
+    def tearDown(self):
+        sys.path = self.sys_path_save
+        os.chdir(self.cwd_save)
+
+    def test_same_object_trivial(self):
+        s = Slice(pk=4)
+        t = Slice(pk=4)
+        same, t = self.synchronizer.same_object(s, t)
+        self.assertTrue(same)
+        self.assertEqual(t, event_loop.DIRECT_EDGE)
+
+    def test_same_object_trivial2(self):
+        s = Slice(pk=4)
+        t = Slice(pk=5)
+        same, t = self.synchronizer.same_object(s, t)
+        self.assertFalse(same)
+
+    def test_same_object_lst(self):
+        s = Slice(pk=5)
+        t = ControllerSlice(slice=s)
+        u = ControllerSlice(slice=s)
+
+        s.controllerslices = mock_enumerator([t, u])
+
+        same, et = self.synchronizer.same_object(s.controllerslices, u)
+        self.assertTrue(same)
+        self.assertEqual(et, event_loop.PROXY_EDGE)
+
+        same, et = self.synchronizer.same_object(s.controllerslices, t)
+
+        self.assertTrue(same)
+        self.assertEqual(et, event_loop.PROXY_EDGE)
+
+    def test_same_object_lst_dc(self):
+        r = Slice(pk=4)
+        s = Slice(pk=5)
+        t = ControllerSlice(slice=r)
+        u = ControllerSlice(slice=s)
+
+        s.controllerslices = mock_enumerator([u])
+
+        same, et = self.synchronizer.same_object(s.controllerslices, t)
+        self.assertFalse(same)
+
+        same, et = self.synchronizer.same_object(s.controllerslices, u)
+        self.assertTrue(same)
+
+    def test_concrete_path_no_model_path(self):
+        p = Port()
+        n = NetworkParameter()
+        verdict, _ = self.synchronizer.concrete_path_exists(p, n)
+        self.assertFalse(verdict)
+
+    def test_concrete_no_object_path_adjacent(self):
+        p = Instance()
+        s1 = Slice()
+        s2 = Slice()
+        p.slice = s2
+        verdict, _ = self.synchronizer.concrete_path_exists(p, s1)
+
+        self.assertFalse(verdict)
+
+    def test_concrete_object_path_adjacent(self):
+        p = Instance()
+        s = Slice()
+        p.slice = s
+        verdict, edge_type = self.synchronizer.concrete_path_exists(p, s)
+
+        self.assertTrue(verdict)
+        self.assertEqual(edge_type, event_loop.DIRECT_EDGE)
+
+    def test_concrete_object_controller_path_adjacent(self):
+        p = Instance()
+        q = Instance()
+        cs = ControllerSlice()
+        cs2 = ControllerSlice()
+        s1 = Slice()
+        s2 = Slice()
+        p.slice = s1
+        q.slice = s2
+        cs.slice = s1
+        s1.controllerslices = mock_enumerator([cs])
+        s2.controllerslices = mock_enumerator([])
+
+        verdict1, edge_type1 = self.synchronizer.concrete_path_exists(p, cs)
+        verdict2, _ = self.synchronizer.concrete_path_exists(q, cs)
+        verdict3, _ = self.synchronizer.concrete_path_exists(p, cs2)
+
+        self.assertTrue(verdict1)
+        self.assertFalse(verdict2)
+        self.assertFalse(verdict3)
+
+        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
+        #self.assertEqual(edge_type1, event_loop.PROXY_EDGE)
+
+    def test_concrete_object_controller_path_distant(self):
+        p = Instance()
+        s = Slice()
+        t = Site()
+        ct = ControllerSite()
+        ct.site = t
+        p.slice = s
+        s.site = t
+        verdict = self.synchronizer.concrete_path_exists(p, ct)
+        self.assertTrue(verdict)
+
+    def test_concrete_object_path_distant(self):
+        p = Instance()
+        s = Slice()
+        t = Site()
+        p.slice = s
+        s.site = t
+        verdict = self.synchronizer.concrete_path_exists(p, t)
+        self.assertTrue(verdict)
+
+    def test_concrete_no_object_path_distant(self):
+        p = Instance()
+        s = Slice()
+        s.controllerslice = mock_enumerator([])
+
+        t = Site()
+        t.controllersite = mock_enumerator([])
+
+        ct = ControllerSite()
+        ct.site = Site()
+        p.slice = s
+        s.site = t
+
+        verdict, _ = self.synchronizer.concrete_path_exists(p, ct)
+        self.assertFalse(verdict)
+
+    def test_cohorting_independent(self):
+        i = Image()
+
+        p = Slice()
+        c = Instance()
+        c.slice = None
+        c.image = None
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c], False)
+        self.assertEqual(len(cohorts), 3)
+
+    def test_cohorting_related(self):
+        i = Image()
+        p = Port()
+        c = Instance()
+        c.image = i
+        s = ControllerSlice()
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s], False)
+        self.assertIn([i, c], cohorts)
+        self.assertIn([p], cohorts)
+        self.assertIn([s], cohorts)
+
+    def test_cohorting_related_multi(self):
+        i = Image()
+        p = Port()
+        c = Instance()
+        c.image = i
+        cs = ControllerSlice()
+        s = Slice()
+        cs.slice = s
+        s.controllerslices = mock_enumerator([cs])
+        c.slice = s
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s, cs], False)
+
+        big_cohort = max(cohorts, key=len)
+        self.assertGreater(big_cohort.index(c), big_cohort.index(i))
+        # TODO(smbaker): This assert was found to be failing. Understand whether the library or the test is at fault.
+        #self.assertGreater(big_cohort.index(cs), big_cohort.index(s))
+        self.assertIn([p], cohorts)
+
+    def test_cohorting_related_multi_delete(self):
+        i = Image()
+        p = Port()
+        c = Instance()
+        c.image = i
+        cs = ControllerSlice()
+        s = Slice()
+        cs.slice = s
+        c.slice = s
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s, cs], True)
+
+        big_cohort = max(cohorts, key=len)
+        self.assertGreater(big_cohort.index(i), big_cohort.index(c))
+        self.assertGreater(big_cohort.index(s), big_cohort.index(cs))
+        self.assertIn([p], cohorts)
+
+    def test_cohorting_related_delete(self):
+        i = Image()
+        p = Port()
+        c = Instance()
+        c.image = i
+        s = ControllerSlice()
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([i, p, c, s], True)
+        self.assertIn([c, i], cohorts)
+        self.assertIn([p], cohorts)
+        self.assertIn([s], cohorts)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_services.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_services.py
new file mode 100644
index 0000000..3ff1c43
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_services.py
@@ -0,0 +1,87 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+from mock import patch
+import mock
+import pdb
+import networkx as nx
+
+import os
+import sys
+
+test_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+sync_lib_dir = os.path.join(test_path, "..", "xossynchronizer")
+xos_dir = os.path.join(test_path, "..", "..", "..", "xos")
+
+
+class TestServices(unittest.TestCase):
+    def setUp(self):
+        self.sys_path_save = sys.path
+        self.cwd_save = os.getcwd()
+
+        config = os.path.join(test_path, "test_config.yaml")
+        from xosconfig import Config
+
+        Config.clear()
+        Config.init(config, "synchronizer-config-schema.yaml")
+
+        from xossynchronizer.mock_modelaccessor_build import (
+            build_mock_modelaccessor,
+        )
+
+        build_mock_modelaccessor(sync_lib_dir, xos_dir, services_dir=None, service_xprotos=[])
+
+        os.chdir(os.path.join(test_path, ".."))  # config references xos-synchronizer-tests/model-deps
+
+        import xossynchronizer.event_loop
+
+        reload(xossynchronizer.event_loop)
+        import xossynchronizer.backend
+
+        reload(xossynchronizer.backend)
+        from xossynchronizer.modelaccessor import model_accessor
+
+        # import all class names to globals
+        for (k, v) in model_accessor.all_model_classes.items():
+            globals()[k] = v
+
+        b = xossynchronizer.backend.Backend(model_accessor=model_accessor)
+        steps_dir = Config.get("steps_dir")
+        self.steps = b.load_sync_step_modules(steps_dir)
+        self.synchronizer = xossynchronizer.event_loop.XOSObserver(self.steps, model_accessor)
+
+    def tearDown(self):
+        sys.path = self.sys_path_save
+        os.chdir(self.cwd_save)
+
+    def test_service_models(self):
+        s = Service()
+        a = ServiceInstance(owner=s)
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([a, s], False)
+        self.assertIn([s, a], cohorts)
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([s, a], False)
+        self.assertIn([s, a], cohorts)
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([a, s], True)
+        self.assertIn([a, s], cohorts)
+
+        cohorts = self.synchronizer.compute_dependent_cohorts([s, a], True)
+        self.assertIn([a, s], cohorts)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/__init__.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_container.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_container.py
new file mode 100644
index 0000000..8cbabcb
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_container.py
@@ -0,0 +1,52 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from xossynchronizer.steps.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from xossynchronizer.steps.syncstep import DeferredException
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__), "..")
+sys.path.insert(0, parentdir)
+
+
+class SyncContainer(SyncInstanceUsingAnsible):
+    observes = "Instance"
+    template_name = "sync_container.yaml"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncContainer, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deletion=False):
+        i = self.model_accessor.Instance()
+        i.name = "Spectacular Sponge"
+        j = self.model_accessor.Instance()
+        j.name = "Spontaneous Tent"
+        k = self.model_accessor.Instance()
+        k.name = "Embarrassed Cat"
+
+        objs = [i, j, k]
+        return objs
+
+    def sync_record(self, o):
+        pass
+
+    def delete_record(self, o):
+        pass
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_images.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_images.py
new file mode 100644
index 0000000..ef85983
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_images.py
@@ -0,0 +1,52 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import base64
+from xossynchronizer.steps.syncstep import SyncStep
+
+class SyncControllerImages(SyncStep):
+    observes = "ControllerImages"
+    requested_interval = 0
+    playbook = "sync_controller_images.yaml"
+
+    def fetch_pending(self, deleted):
+        ci = self.model_accessor.ControllerImages()
+        i = self.model_accessor.Image()
+        i.name = "Lush Loss"
+        ci.i = i
+        return [ci]
+
+    def map_sync_inputs(self, controller_image):
+        image_fields = {
+            "endpoint": controller_image.controller.auth_url,
+            "endpoint_v3": controller_image.controller.auth_url_v3,
+            "admin_user": controller_image.controller.admin_user,
+            "admin_password": controller_image.controller.admin_password,
+            "domain": controller_image.controller.domain,
+            "name": controller_image.image.name,
+            "filepath": controller_image.image.path,
+            # name of ansible playbook
+            "ansible_tag": "%s@%s"
+            % (controller_image.image.name, controller_image.controller.name),
+        }
+
+        return image_fields
+
+    def map_sync_outputs(self, controller_image, res):
+        image_id = res[0]["id"]
+        controller_image.glance_image_id = image_id
+        controller_image.backend_status = "1 - OK"
+        controller_image.save()
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_networks.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_networks.py
new file mode 100644
index 0000000..55dfe4e
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_networks.py
@@ -0,0 +1,58 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import base64
+import struct
+import socket
+from netaddr import IPAddress, IPNetwork
+from xossynchronizer.steps.syncstep import SyncStep
+
+class SyncControllerNetworks(SyncStep):
+    requested_interval = 0
+    observes = "ControllerNetwork"
+    external_dependencies = ["User"]
+    playbook = "sync_controller_networks.yaml"
+
+    def fetch_pending(self, deleted):
+        ci = self.model_accessor.ControllerNetwork()
+        i = self.model_accessor.Network()
+        i.name = "Lush Loss"
+        s = self.model_accessor.Slice()
+        s.name = "Ghastly Notebook"
+        i.owner = s
+        ci.i = i
+        return [ci]
+
+    def map_sync_outputs(self, controller_network, res):
+        network_id = res[0]["network"]["id"]
+        subnet_id = res[1]["subnet"]["id"]
+        controller_network.net_id = network_id
+        controller_network.subnet = self.cidr
+        controller_network.subnet_id = subnet_id
+        controller_network.backend_status = "1 - OK"
+        if not controller_network.segmentation_id:
+            controller_network.segmentation_id = str(
+                self.get_segmentation_id(controller_network)
+            )
+        controller_network.save()
+
+    def map_sync_inputs(self, controller_network):
+        pass
+
+    def map_delete_inputs(self, controller_network):
+        network_fields = {"endpoint": None, "delete": True}
+
+        return network_fields
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_site_privileges.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_site_privileges.py
new file mode 100644
index 0000000..e286ef8
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_site_privileges.py
@@ -0,0 +1,105 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import base64
+import json
+from xossynchronizer.steps.syncstep import SyncStep
+
+class SyncControllerSitePrivileges(SyncStep):
+    requested_interval = 0
+    observes = "ControllerSitePrivilege"
+    playbook = "sync_controller_users.yaml"
+
+    def map_sync_inputs(self, controller_site_privilege):
+        controller_register = json.loads(
+            controller_site_privilege.controller.backend_register
+        )
+        if not controller_site_privilege.controller.admin_user:
+            return
+
+        roles = [controller_site_privilege.site_privilege.role.role]
+        # setup user home site roles at controller
+        if not controller_site_privilege.site_privilege.user.site:
+            raise Exception(
+                "Siteless user %s" % controller_site_privilege.site_privilege.user.email
+            )
+        else:
+            # look up tenant id for the user's site at the controller
+            # ctrl_site_deployments = SiteDeployment.objects.filter(
+            #  site_deployment__site=controller_site_privilege.user.site,
+            #  controller=controller_site_privilege.controller)
+
+            # if ctrl_site_deployments:
+            #    # need the correct tenant id for site at the controller
+            #    tenant_id = ctrl_site_deployments[0].tenant_id
+            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
+            user_fields = {
+                "endpoint": controller_site_privilege.controller.auth_url,
+                "endpoint_v3": controller_site_privilege.controller.auth_url_v3,
+                "domain": controller_site_privilege.controller.domain,
+                "name": controller_site_privilege.site_privilege.user.email,
+                "email": controller_site_privilege.site_privilege.user.email,
+                "password": controller_site_privilege.site_privilege.user.remote_password,
+                "admin_user": controller_site_privilege.controller.admin_user,
+                "admin_password": controller_site_privilege.controller.admin_password,
+                "ansible_tag": "%s@%s"
+                % (
+                    controller_site_privilege.site_privilege.user.email.replace(
+                        "@", "-at-"
+                    ),
+                    controller_site_privilege.controller.name,
+                ),
+                "admin_tenant": controller_site_privilege.controller.admin_tenant,
+                "roles": roles,
+                "tenant": controller_site_privilege.site_privilege.site.login_base,
+            }
+
+            return user_fields
+
+    def map_sync_outputs(self, controller_site_privilege, res):
+        # results is an array in which each element corresponds to an
+        # "ok" string received per operation. If we get as many oks as
+        # the number of operations we issued, that means a grand success.
+        # Otherwise, the number of oks tell us which operation failed.
+        controller_site_privilege.role_id = res[0]["id"]
+        controller_site_privilege.save()
+
+    def delete_record(self, controller_site_privilege):
+        controller_register = json.loads(
+            controller_site_privilege.controller.backend_register
+        )
+        if controller_register.get("disabled", False):
+            raise InnocuousException(
+                "Controller %s is disabled" % controller_site_privilege.controller.name
+            )
+
+        if controller_site_privilege.role_id:
+            driver = self.driver.admin_driver(
+                controller=controller_site_privilege.controller
+            )
+            user = ControllerUser.objects.get(
+                controller=controller_site_privilege.controller,
+                user=controller_site_privilege.site_privilege.user,
+            )
+            site = ControllerSite.objects.get(
+                controller=controller_site_privilege.controller,
+                user=controller_site_privilege.site_privilege.user,
+            )
+            driver.delete_user_role(
+                user.kuser_id,
+                site.tenant_id,
+                controller_site_privilege.site_prvilege.role.role,
+            )
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_sites.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_sites.py
new file mode 100644
index 0000000..24aa76f
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_sites.py
@@ -0,0 +1,86 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import base64
+import json
+from xossynchronizer.steps.syncstep import SyncStep
+
+class SyncControllerSites(SyncStep):
+    requested_interval = 0
+    observes = "ControllerSite"
+    playbook = "sync_controller_sites.yaml"
+
+    def fetch_pending(self, deleted=False):
+        lobjs = super(SyncControllerSites, self).fetch_pending(deleted)
+
+        if not deleted:
+            # filter out objects with null controllers
+            lobjs = [x for x in lobjs if x.controller]
+
+        return lobjs
+
+    def map_sync_inputs(self, controller_site):
+        tenant_fields = {
+            "endpoint": controller_site.controller.auth_url,
+            "endpoint_v3": controller_site.controller.auth_url_v3,
+            "domain": controller_site.controller.domain,
+            "admin_user": controller_site.controller.admin_user,
+            "admin_password": controller_site.controller.admin_password,
+            "admin_tenant": controller_site.controller.admin_tenant,
+            # name of ansible playbook
+            "ansible_tag": "%s@%s"
+            % (controller_site.site.login_base, controller_site.controller.name),
+            "tenant": controller_site.site.login_base,
+            "tenant_description": controller_site.site.name,
+        }
+        return tenant_fields
+
+    def map_sync_outputs(self, controller_site, res):
+        controller_site.tenant_id = res[0]["id"]
+        controller_site.backend_status = "1 - OK"
+        controller_site.save()
+
+    def delete_record(self, controller_site):
+        controller_register = json.loads(controller_site.controller.backend_register)
+        if controller_register.get("disabled", False):
+            raise InnocuousException(
+                "Controller %s is disabled" % controller_site.controller.name
+            )
+
+        if controller_site.tenant_id:
+            driver = self.driver.admin_driver(controller=controller_site.controller)
+            driver.delete_tenant(controller_site.tenant_id)
+
+        """
+        Ansible does not support tenant deletion yet
+
+        import pdb
+        pdb.set_trace()
+        template = os_template_env.get_template('delete_controller_sites.yaml')
+        tenant_fields = {'endpoint':controller_site.controller.auth_url,
+                         'admin_user': controller_site.controller.admin_user,
+                         'admin_password': controller_site.controller.admin_password,
+                         'admin_tenant': 'admin',
+                         'ansible_tag': 'controller_sites/%s@%s'%(controller_site.controller_site.site.login_base,controller_site.controller_site.deployment.name), # name of ansible playbook
+                         'tenant': controller_site.controller_site.site.login_base,
+                         'delete': True}
+
+        rendered = template.render(tenant_fields)
+        res = run_template('sync_controller_sites.yaml', tenant_fields)
+
+        if (len(res)!=1):
+                raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
+        """
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slice_privileges.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slice_privileges.py
new file mode 100644
index 0000000..09b63e6
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slice_privileges.py
@@ -0,0 +1,93 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import base64
+import json
+from xossynchronizer.steps.syncstep import SyncStep
+
+class SyncControllerSlicePrivileges(SyncStep):
+    requested_interval = 0
+    observes = "ControllerSlicePrivilege"
+    playbook = "sync_controller_users.yaml"
+
+    def map_sync_inputs(self, controller_slice_privilege):
+        if not controller_slice_privilege.controller.admin_user:
+            return
+
+        template = os_template_env.get_template("sync_controller_users.yaml")
+        roles = [controller_slice_privilege.slice_privilege.role.role]
+        # setup user home slice roles at controller
+        if not controller_slice_privilege.slice_privilege.user.site:
+            raise Exception(
+                "Sliceless user %s"
+                % controller_slice_privilege.slice_privilege.user.email
+            )
+        else:
+            user_fields = {
+                "endpoint": controller_slice_privilege.controller.auth_url,
+                "endpoint_v3": controller_slice_privilege.controller.auth_url_v3,
+                "domain": controller_slice_privilege.controller.domain,
+                "name": controller_slice_privilege.slice_privilege.user.email,
+                "email": controller_slice_privilege.slice_privilege.user.email,
+                "password": controller_slice_privilege.slice_privilege.user.remote_password,
+                "admin_user": controller_slice_privilege.controller.admin_user,
+                "admin_password": controller_slice_privilege.controller.admin_password,
+                "ansible_tag": "%s@%s@%s"
+                % (
+                    controller_slice_privilege.slice_privilege.user.email.replace(
+                        "@", "-at-"
+                    ),
+                    controller_slice_privilege.slice_privilege.slice.name,
+                    controller_slice_privilege.controller.name,
+                ),
+                "admin_tenant": controller_slice_privilege.controller.admin_tenant,
+                "roles": roles,
+                "tenant": controller_slice_privilege.slice_privilege.slice.name,
+            }
+            return user_fields
+
+    def map_sync_outputs(self, controller_slice_privilege, res):
+        controller_slice_privilege.role_id = res[0]["id"]
+        controller_slice_privilege.save()
+
+    def delete_record(self, controller_slice_privilege):
+        controller_register = json.loads(
+            controller_slice_privilege.controller.backend_register
+        )
+        if controller_register.get("disabled", False):
+            raise InnocuousException(
+                "Controller %s is disabled" % controller_slice_privilege.controller.name
+            )
+
+        if controller_slice_privilege.role_id:
+            driver = self.driver.admin_driver(
+                controller=controller_slice_privilege.controller
+            )
+            user = ControllerUser.objects.filter(
+                controller_id=controller_slice_privilege.controller.id,
+                user_id=controller_slice_privilege.slice_privilege.user.id,
+            )
+            user = user[0]
+            slice = ControllerSlice.objects.filter(
+                controller_id=controller_slice_privilege.controller.id,
+                user_id=controller_slice_privilege.slice_privilege.user.id,
+            )
+            slice = slice[0]
+            driver.delete_user_role(
+                user.kuser_id,
+                slice.tenant_id,
+                controller_slice_privilege.slice_prvilege.role.role,
+            )
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slices.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slices.py
new file mode 100644
index 0000000..31c62f1
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_slices.py
@@ -0,0 +1,45 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import base64
+from xossynchronizer.steps.syncstep import DeferredException
+from xossynchronizer.steps.ansiblesyncstep import AnsibleSyncStep
+
+class SyncControllerSlices(AnsibleSyncStep):
+    requested_interval = 0
+    observes = "ControllerSlice"
+    playbook = "sync_controller_slices.yaml"
+
+    def map_sync_inputs(self, controller_slice):
+        if getattr(controller_slice, "force_fail", None):
+            raise Exception("Forced failure")
+        elif getattr(controller_slice, "force_defer", None):
+            raise DeferredException("Forced defer")
+
+        tenant_fields = {"endpoint": "endpoint", "name": "Flagrant Haircut"}
+
+        return tenant_fields
+
+    def map_sync_outputs(self, controller_slice, res):
+        controller_slice.save()
+
+    def map_delete_inputs(self, controller_slice):
+        tenant_fields = {
+            "endpoint": "endpoint",
+            "name": "Conscientious Plastic",
+            "delete": True,
+        }
+        return tenant_fields
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_users.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_users.py
new file mode 100644
index 0000000..a039257
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_controller_users.py
@@ -0,0 +1,70 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import base64
+from xossynchronizer.steps.syncstep import SyncStep
+
+
+class SyncControllerUsers(SyncStep):
+    requested_interval = 0
+    observes = "ControllerUser"
+    playbook = "sync_controller_users.yaml"
+
+    def map_sync_inputs(self, controller_user):
+        if not controller_user.controller.admin_user:
+            return
+
+        # All users will have at least the 'user' role at their home site/tenant.
+        # We must also check if the user should have the admin role
+
+        roles = ["user"]
+        if controller_user.user.is_admin:
+            driver = self.driver.admin_driver(controller=controller_user.controller)
+            roles.append(driver.get_admin_role().name)
+
+        # setup user home site roles at controller
+        if not controller_user.user.site:
+            raise Exception("Siteless user %s" % controller_user.user.email)
+        else:
+            user_fields = {
+                "endpoint": controller_user.controller.auth_url,
+                "endpoint_v3": controller_user.controller.auth_url_v3,
+                "domain": controller_user.controller.domain,
+                "name": controller_user.user.email,
+                "email": controller_user.user.email,
+                "password": controller_user.user.remote_password,
+                "admin_user": controller_user.controller.admin_user,
+                "admin_password": controller_user.controller.admin_password,
+                "ansible_tag": "%s@%s"
+                % (
+                    controller_user.user.email.replace("@", "-at-"),
+                    controller_user.controller.name,
+                ),
+                "admin_project": controller_user.controller.admin_tenant,
+                "roles": roles,
+                "project": controller_user.user.site.login_base,
+            }
+            return user_fields
+
+    def map_sync_outputs(self, controller_user, res):
+        controller_user.kuser_id = res[0]["user"]["id"]
+        controller_user.backend_status = "1 - OK"
+        controller_user.save()
+
+    def delete_record(self, controller_user):
+        if controller_user.kuser_id:
+            driver = self.driver.admin_driver(controller=controller_user.controller)
+            driver.delete_user(controller_user.kuser_id)
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_images.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_images.py
new file mode 100644
index 0000000..b3ed9bd
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_images.py
@@ -0,0 +1,24 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+from xossynchronizer.steps.syncstep import SyncStep
+
+class SyncImages(SyncStep):
+    requested_interval = 0
+    observes = ["Image"]
+
+    def sync_record(self, role):
+        # do nothing
+        pass
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_instances.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_instances.py
new file mode 100644
index 0000000..1a70884
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_instances.py
@@ -0,0 +1,59 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+from xossynchronizer.steps import ansiblesyncstep
+
+
+def escape(s):
+    s = s.replace("\n", r"\n").replace('"', r"\"")
+    return s
+
+
+class SyncInstances(ansiblesyncstep.AnsibleSyncStep):
+    requested_interval = 0
+    # This observes is intentionally a list of one string, to test steps where observes is a list of strings.
+    observes = ["Instance"]
+    playbook = "sync_instances.yaml"
+
+    def fetch_pending(self, deletion=False):
+        objs = super(SyncInstances, self).fetch_pending(deletion)
+        objs = [x for x in objs if x.isolation == "vm"]
+        return objs
+
+    def map_sync_inputs(self, instance):
+        inputs = {}
+        metadata_update = {}
+
+        fields = {"name": instance.name, "delete": False}
+        return fields
+
+    def map_sync_outputs(self, instance, res):
+        instance.save()
+
+    def map_delete_inputs(self, instance):
+        input = {
+            "endpoint": "endpoint",
+            "admin_user": "admin_user",
+            "admin_password": "admin_password",
+            "project_name": "project_name",
+            "tenant": "tenant",
+            "tenant_description": "tenant_description",
+            "name": instance.name,
+            "ansible_tag": "ansible_tag",
+            "delete": True,
+        }
+
+        return input
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_ports.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_ports.py
new file mode 100644
index 0000000..a7eb7d1
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_ports.py
@@ -0,0 +1,37 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import base64
+from xossynchronizer.steps.syncstep import SyncStep
+
+
+class SyncPort(SyncStep):
+    requested_interval = 0
+
+    # This observes is intentionally a string, to test steps where observes is a string
+    observes = "Port"
+
+    def call(self, failed=[], deletion=False):
+        if deletion:
+            self.delete_ports()
+        else:
+            self.sync_ports()
+
+    def sync_ports(self):
+        open("/tmp/sync_ports", "w").write("Sync successful")
+
+    def delete_ports(self):
+        open("/tmp/delete_ports", "w").write("Delete successful")
diff --git a/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_roles.py b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_roles.py
new file mode 100644
index 0000000..1bd2d0f
--- /dev/null
+++ b/lib/xos-synchronizer/xos-synchronizer-tests/test_steps/sync_roles.py
@@ -0,0 +1,34 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import os
+import base64
+from xossynchronizer.steps.syncstep import SyncStep
+from mock_modelaccessor import *
+
+
+class SyncRoles(SyncStep):
+    requested_interval = 0
+
+    # This observes is intentionally a list of three classes, to test steps where observes is a list of classes.
+    observes = [SiteRole, SliceRole, ControllerRole]
+
+    def sync_record(self, role):
+        if not role.enacted:
+            controllers = Controller.objects.all()
+            for controller in controllers:
+                driver = self.driver.admin_driver(controller=controller)
+                driver.create_role(role.role)
+            role.save()