CORD-1243 move vsg model policies to model policy framework

Change-Id: I0874b6ec7504e9e3964580cd1fac8aedaf26f7e9
diff --git a/xos/attic/vsgtenant_bottom.py b/xos/attic/vsgtenant_bottom.py
deleted file mode 100644
index 767a903..0000000
--- a/xos/attic/vsgtenant_bottom.py
+++ /dev/null
@@ -1,11 +0,0 @@
-def model_policy_vcpe(pk):
-    # TODO: this should be made in to a real model_policy
-    with transaction.atomic():
-        vcpe = VSGTenant.objects.select_for_update().filter(pk=pk)
-        if not vcpe:
-            return
-        vcpe = vcpe[0]
-        vcpe.manage_container()
-        vcpe.manage_vrouter()
-        vcpe.cleanup_orphans()
-
diff --git a/xos/attic/vsgtenant_model.py b/xos/attic/vsgtenant_model.py
index 3b58df4..d9e8a5d 100644
--- a/xos/attic/vsgtenant_model.py
+++ b/xos/attic/vsgtenant_model.py
@@ -97,196 +97,6 @@
 def is_synced(self, value):
     pass
 
-def get_vrouter_service(self):
-    vrouterServices = VRouterService.objects.all()
-    if not vrouterServices:
-        raise XOSConfigurationError("No VROUTER Services available")
-    return vrouterServices[0]
-
-def manage_vrouter(self):
-    # Each vCPE object owns exactly one vRouterTenant object
-
-    if self.deleted:
-        return
-
-    if self.vrouter is None:
-        vrouter = self.get_vrouter_service().get_tenant(address_pool_name="addresses_vsg", subscriber_tenant = self)
-        vrouter.caller = self.creator
-        vrouter.save()
-
-def cleanup_vrouter(self):
-    if self.vrouter:
-        # print "XXX cleanup vrouter", self.vrouter
-        self.vrouter.delete()
-
-def cleanup_orphans(self):
-    # ensure vCPE only has one vRouter
-    cur_vrouter = self.vrouter
-    for vrouter in list(self.get_subscribed_tenants(VRouterTenant)):
-        if (not cur_vrouter) or (vrouter.id != cur_vrouter.id):
-            # print "XXX clean up orphaned vrouter", vrouter
-            vrouter.delete()
-
-    if self.orig_instance_id and (self.orig_instance_id != self.get_attribute("instance_id")):
-        instances=Instance.objects.filter(id=self.orig_instance_id)
-        if instances:
-            # print "XXX clean up orphaned instance", instances[0]
-            instances[0].delete()
-
-def get_slice(self):
-    if not self.provider_service.slices.count():
-        print self, "dio porco"
-        raise XOSConfigurationError("The service has no slices")
-    slice = self.provider_service.slices.all()[0]
-    return slice
-
-def get_vsg_service(self):
-    return VSGService.objects.get(id=self.provider_service.id)
-
-def find_instance_for_s_tag(self, s_tag):
-    tags = Tag.objects.filter(name="s_tag", value=s_tag)
-    if tags:
-        return Tag.get_content_object(tags[0].content_type, tags[0].object_id)
-
-    return None
-
-def find_or_make_instance_for_s_tag(self, s_tag):
-    instance = self.find_instance_for_s_tag(self.volt.s_tag)
-    if instance:
-        return instance
-
-    flavors = Flavor.objects.filter(name="m1.small")
-    if not flavors:
-        raise XOSConfigurationError("No m1.small flavor")
-
-    slice = self.provider_service.slices.all()[0]
-
-    if slice.default_isolation == "container_vm":
-        (node, parent) = ContainerVmScheduler(slice).pick()
-    else:
-        (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_vsg_service().node_label).pick()
-
-    instance = Instance(slice = slice,
-                    node = node,
-                    image = self.image,
-                    creator = self.creator,
-                    deployment = node.site_deployment.deployment,
-                    flavor = flavors[0],
-                    isolation = slice.default_isolation,
-                    parent = parent)
-
-    self.save_instance(instance)
-
-    return instance
-
-def manage_container(self):
-    from core.models import Instance, Flavor
-
-    if self.deleted:
-        return
-
-    # For container or container_vm isolation, use what TenantWithCotnainer
-    # provides us
-    slice = self.get_slice()
-    if slice.default_isolation in ["container_vm", "container"]:
-        super(VSGTenant,self).manage_container()
-        return
-
-    if not self.volt:
-        raise XOSConfigurationError("This vCPE container has no volt")
-
-    if self.instance:
-        # We're good.
-        return
-
-    instance = self.find_or_make_instance_for_s_tag(self.volt.s_tag)
-    self.instance = instance
-    super(TenantWithContainer, self).save()
-
-def cleanup_container(self):
-    if self.get_slice().default_isolation in ["container_vm", "container"]:
-        super(VSGTenant,self).cleanup_container()
-
-    # To-do: cleanup unused instances
-    pass
-
-def find_or_make_port(self, instance, network, **kwargs):
-    port = Port.objects.filter(instance=instance, network=network)
-    if port:
-        port = port[0]
-    else:
-        port = Port(instance=instance, network=network, **kwargs)
-        port.save()
-    return port
-
-def get_lan_network(self, instance):
-    slice = self.provider_service.slices.all()[0]
-    # there should only be one network private network, and its template should not be the management template
-    lan_networks = [x for x in slice.networks.all() if x.template.visibility=="private" and (not "management" in x.template.name)]
-    if len(lan_networks)>1:
-        raise XOSProgrammingError("The vSG slice should only have one non-management private network")
-    if not lan_networks:
-        raise XOSProgrammingError("No lan_network")
-    return lan_networks[0]
-
-def port_set_parameter(self, port, name, value):
-    instance_type = port.get_content_type_key()
-    existing_params = NetworkParameter.objects.filter(parameter__name=name, content_type=instance_type, object_id=port.id)
-    if existing_params:
-        p=existing_params[0]
-        p.value = value
-        p.save()
-    else:
-        pt = NetworkParameterType.objects.get(name=name)
-        p = NetworkParameter(parameter=pt, content_type=instance_type, object_id=port.id, value=value)
-        p.save()
-
-def save_instance(self, instance):
-    with transaction.atomic():
-        instance.volumes = "/etc/dnsmasq.d,/etc/ufw"
-        super(VSGTenant, self).save_instance(instance)
-
-        if instance.isolation in ["container", "container_vm"]:
-            lan_network = self.get_lan_network(instance)
-            port = self.find_or_make_port(instance, lan_network, ip="192.168.0.1", port_id="unmanaged")
-            self.port_set_parameter(port, "c_tag", self.volt.c_tag)
-            self.port_set_parameter(port, "s_tag", self.volt.s_tag)
-            self.port_set_parameter(port, "device", "eth1")
-            self.port_set_parameter(port, "bridge", "br-lan")
-
-            wan_networks = [x for x in instance.slice.networks.all() if "wan" in x.name]
-            if not wan_networks:
-                raise XOSProgrammingError("No wan_network")
-            port = self.find_or_make_port(instance, wan_networks[0])
-            self.port_set_parameter(port, "next_hop", value="10.0.1.253")   # FIX ME
-            self.port_set_parameter(port, "device", "eth0")
-
-        if instance.isolation in ["vm"]:
-            lan_network = self.get_lan_network(instance)
-            port = self.find_or_make_port(instance, lan_network)
-            self.port_set_parameter(port, "c_tag", self.volt.c_tag)
-            self.port_set_parameter(port, "s_tag", self.volt.s_tag)
-            self.port_set_parameter(port, "neutron_port_name", "stag-%s" % self.volt.s_tag)
-            port.save()
-
-        # tag the instance with the s-tag, so we can easily find the
-        # instance later
-        if self.volt and self.volt.s_tag:
-            tags = Tag.objects.filter(name="s_tag", value=self.volt.s_tag)
-            if not tags:
-                tag = Tag(service=self.provider_service, content_type=instance.get_content_type_key(), object_id=instance.id, name="s_tag", value=self.volt.s_tag)
-                tag.save()
-
-        # VTN-CORD needs a WAN address for the VM, so that the VM can
-        # be configured.
-        tags = Tag.objects.filter(content_type=instance.get_content_type_key(), object_id=instance.id, name="vm_vrouter_tenant")
-        if not tags:
-            vrouter = self.get_vrouter_service().get_tenant(address_pool_name="addresses_vsg", subscriber_service = self.provider_service)
-            vrouter.set_attribute("tenant_for_instance_id", instance.id)
-            vrouter.save()
-            tag = Tag(service=self.provider_service, content_type=instance.get_content_type_key(), object_id=instance.id, name="vm_vrouter_tenant", value="%d" % vrouter.id)
-            tag.save()
-
 def save(self, *args, **kwargs):
     if not self.creator:
         if not getattr(self, "caller", None):
@@ -297,10 +107,7 @@
             raise XOSProgrammingError("VSGTenant's self.creator was not set")
 
     super(VSGTenant, self).save(*args, **kwargs)
-    model_policy_vcpe(self.pk)
 
 def delete(self, *args, **kwargs):
-    self.cleanup_vrouter()
-    self.cleanup_container()
     super(VSGTenant, self).delete(*args, **kwargs)
 
diff --git a/xos/synchronizer/model_policies/model_policy_vsgtenant.py b/xos/synchronizer/model_policies/model_policy_vsgtenant.py
new file mode 100644
index 0000000..cf7be71
--- /dev/null
+++ b/xos/synchronizer/model_policies/model_policy_vsgtenant.py
@@ -0,0 +1,174 @@
+from synchronizers.new_base.modelaccessor import *
+from synchronizers.new_base.model_policies.model_policy_tenantwithcontainer import TenantWithContainerPolicy, LeastLoadedNodeScheduler
+from synchronizers.new_base.exceptions import *
+
+class VSGTenantPolicy(TenantWithContainerPolicy):
+    model_name = "VSGTenant"
+
+    def handle_create(self, tenant):
+        return self.handle_update(tenant)
+
+    def handle_update(self, tenant):
+        self.manage_container(tenant)
+        self.manage_vrouter(tenant)
+        self.cleanup_orphans(tenant)
+
+    def handle_delete(self, tenant):
+        if tenant.vrouter:
+            tenant.vrouter.delete()
+
+    def manage_vrouter(self, tenant):
+        if tenant.deleted:
+            return
+
+        if tenant.vrouter is None:
+            vrouter = self.allocate_public_service_instance(address_pool_name="addresses_vsg", subscriber_tenant=tenant)
+            vrouter.save()
+
+    def cleanup_orphans(self, tenant):
+        # ensure vSG only has one vRouter
+        cur_vrouter = tenant.vrouter
+        for vrouter in list(VRouterTenant.objects.filter(subscriber_tenant_id=tenant.id)):  # TODO: Hardcoded dependency
+            if (not cur_vrouter) or (vrouter.id != cur_vrouter.id):
+                # print "XXX clean up orphaned vrouter", vrouter
+                vrouter.delete()
+
+    def get_vsg_service(self, tenant):
+        return VSGService.objects.get(id=tenant.provider_service.id)
+
+    def find_instance_for_s_tag(self, s_tag):
+        tags = Tag.objects.filter(name="s_tag", value=s_tag)
+        if tags:
+            return tags[0].content_object
+
+        return None
+
+    def find_or_make_instance_for_s_tag(self, tenant, s_tag):
+        instance = self.find_instance_for_s_tag(tenant.volt.s_tag)
+        if instance:
+            if instance.no_sync:
+                # if no_sync is still set, then perhaps we failed while saving it and need to retry.
+                self.save_instance(tenant, instance)
+            return instance
+
+        desired_image = self.get_image(tenant)
+
+        flavors = Flavor.objects.filter(name="m1.small")
+        if not flavors:
+            raise SynchronizerConfigurationError("No m1.small flavor")
+
+        slice = tenant.provider_service.slices.first()
+
+        (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_vsg_service(tenant).node_label).pick()
+
+        assert (slice is not None)
+        assert (node is not None)
+        assert (desired_image is not None)
+        assert (tenant.creator is not None)
+        assert (node.site_deployment.deployment is not None)
+        assert (desired_image is not None)
+
+        instance = Instance(slice=slice,
+                            node=node,
+                            image=desired_image,
+                            creator=tenant.creator,
+                            deployment=node.site_deployment.deployment,
+                            flavor=flavors[0],
+                            isolation=slice.default_isolation,
+                            parent=parent)
+
+        self.save_instance(tenant, instance)
+
+        return instance
+
+    def manage_container(self, tenant):
+        if tenant.deleted:
+            return
+
+        if not tenant.volt:
+            raise SynchronizerConfigurationError("This VSG container has no volt")
+
+        if tenant.instance:
+            # We're good.
+            return
+
+        instance = self.find_or_make_instance_for_s_tag(tenant, tenant.volt.s_tag)
+        tenant.instance = instance
+        # TODO: possible for partial failure here?
+        tenant.save()
+
+    def find_or_make_port(self, instance, network, **kwargs):
+        port = Port.objects.filter(instance_id=instance.id, network_id=network.id)
+        if port:
+            port = port[0]
+        else:
+            port = Port(instance=instance, network=network, **kwargs)
+            port.save()
+        return port
+
+    def get_lan_network(self, tenant, instance):
+        slice = tenant.provider_service.slices.all()[0]
+        # there should only be one network private network, and its template should not be the management template
+        lan_networks = [x for x in slice.networks.all() if
+                        x.template.visibility == "private" and (not "management" in x.template.name)]
+        if len(lan_networks) > 1:
+            raise SynchronizerProgrammingError("The vSG slice should only have one non-management private network")
+        if not lan_networks:
+            raise SynchronizerProgrammingError("No lan_network")
+        return lan_networks[0]
+
+    def port_set_parameter(self, port, name, value):
+        pt = NetworkParameterType.objects.get(name=name)
+        existing_params = NetworkParameter.objects.filter(parameter_id=pt.id, content_type=port.self_content_type_id, object_id=port.id)
+
+        if existing_params:
+            p = existing_params[0]
+            p.value = str(value)
+            p.save()
+        else:
+            p = NetworkParameter(parameter=pt, content_type=port.self_content_type_id, object_id=port.id, value=str(value))
+            p.save()
+
+    def save_instance(self, tenant, instance):
+        instance.volumes = "/etc/dnsmasq.d,/etc/ufw"
+        instance.no_sync = True   # prevent instance from being synced until we're done with it
+        super(VSGTenantPolicy, self).save_instance(instance)
+        try:
+            if instance.isolation in ["container", "container_vm"]:
+                raise Exception("Not supported")
+
+            if instance.isolation in ["vm"]:
+                lan_network = self.get_lan_network(tenant, instance)
+                port = self.find_or_make_port(instance, lan_network)
+                self.port_set_parameter(port, "c_tag", tenant.volt.c_tag)
+                self.port_set_parameter(port, "s_tag", tenant.volt.s_tag)
+                self.port_set_parameter(port, "neutron_port_name", "stag-%s" % tenant.volt.s_tag)
+                port.save()
+
+            # tag the instance with the s-tag, so we can easily find the
+            # instance later
+            if tenant.volt and tenant.volt.s_tag:
+                tags = Tag.objects.filter(name="s_tag", value=tenant.volt.s_tag)
+                if not tags:
+                    tag = Tag(service=tenant.provider_service, content_type=instance.self_content_type_id, object_id=instance.id, name="s_tag", value=str(tenant.volt.s_tag))
+                    tag.save()
+
+            # VTN-CORD needs a WAN address for the VM, so that the VM can
+            # be configured.
+            tags = Tag.objects.filter(content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant")
+            if not tags:
+                vrouter = self.allocate_public_service_instance(address_pool_name="addresses_vsg",
+                                                                subscriber_service=tenant.provider_service)
+                vrouter.set_attribute("tenant_for_instance_id", instance.id)
+                vrouter.save()
+                # TODO: potential partial failure
+                tag = Tag(service=tenant.provider_service, content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant", value="%d" % vrouter.id)
+                tag.save()
+
+            instance.no_sync = False   # allow the synchronizer to run now
+            super(VSGTenantPolicy, self).save_instance(instance)
+        except:
+            # need to clean up any failures here
+            raise
+
+
diff --git a/xos/synchronizer/model_policies/test_config.yaml b/xos/synchronizer/model_policies/test_config.yaml
new file mode 100644
index 0000000..c05965e
--- /dev/null
+++ b/xos/synchronizer/model_policies/test_config.yaml
@@ -0,0 +1,4 @@
+name: test-model-policies
+accessor:
+  username: xosadmin@opencord.org
+  password: "sample"
diff --git a/xos/synchronizer/model_policies/test_model_policy_vsgtenant.py b/xos/synchronizer/model_policies/test_model_policy_vsgtenant.py
new file mode 100644
index 0000000..a2fc5d0
--- /dev/null
+++ b/xos/synchronizer/model_policies/test_model_policy_vsgtenant.py
@@ -0,0 +1,34 @@
+import unittest
+from mock import patch
+import mock
+
+import os, sys
+sys.path.append("../../..")
+sys.path.append("../../new_base/model_policies")
+config = basic_conf = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/test_config.yaml")
+from xosconfig import Config
+Config.init(config, 'synchronizer-config-schema.yaml')
+
+import synchronizers.new_base.modelaccessor
+
+from model_policy_vsgtenant import VSGTenantPolicy
+
+class MockVSGTenant:
+    provider_service = None
+    deleted = False
+    instance = None
+    volt = None
+
+class TestModelPolicyVsgTenant(unittest.TestCase):
+    def setUp(self):
+        self.policy = VSGTenantPolicy()
+        self.tenant = MockVSGTenant()
+        
+    def test_manage_container_no_volt(self):
+        with self.assertRaises(Exception) as e:
+            self.policy.manage_container(self.tenant)
+        self.assertEqual(e.exception.message, "This VSG container has no volt")
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/xos/synchronizer/steps/sync_vcpetenant.py b/xos/synchronizer/steps/sync_vcpetenant.py
index 5c45cf3..d3b0f11 100644
--- a/xos/synchronizer/steps/sync_vcpetenant.py
+++ b/xos/synchronizer/steps/sync_vcpetenant.py
@@ -126,8 +126,15 @@
 
         o.last_ansible_hash = ansible_hash
 
-    def delete_record(self, m):
-        pass
+    def sync_record(self, o):
+        if (not o.policed) or (o.policed<o.updated):
+            defer_sync("waiting on model policy")
+        super(SyncVSGTenant, self).sync_record(o)
+
+    def delete_record(self, o):
+        if (not o.policed) or (o.policed<o.updated):
+            defer_sync("waiting on model policy")
+        # do not call super, as we don't want to re-run the playbook
 
     def handle_service_monitoringagentinfo_watch_notification(self, monitoring_agent_info):
         if not monitoring_agent_info.service:
diff --git a/xos/synchronizer/vsg_config.yaml b/xos/synchronizer/vsg_config.yaml
index 3c97239..109867e 100644
--- a/xos/synchronizer/vsg_config.yaml
+++ b/xos/synchronizer/vsg_config.yaml
@@ -4,4 +4,5 @@
   password: "@/opt/xos/services/vsg/credentials/xosadmin@opencord.org"
 dependency_graph: "/opt/xos/synchronizers/vsg/model-deps"
 steps_dir: "/opt/xos/synchronizers/vsg/steps"
-sys_dir: "/opt/xos/synchronizers/vsg/sys"
\ No newline at end of file
+sys_dir: "/opt/xos/synchronizers/vsg/sys"
+model_policies_dir: "/opt/xos/synchronizers/vsg/model_policies"