Scott Baker | 9674688 | 2017-06-09 14:12:15 -0700 | [diff] [blame^] | 1 | from synchronizers.new_base.modelaccessor import * |
| 2 | from synchronizers.new_base.model_policies.model_policy_tenantwithcontainer import TenantWithContainerPolicy, LeastLoadedNodeScheduler |
| 3 | from synchronizers.new_base.exceptions import * |
| 4 | |
| 5 | class VSGTenantPolicy(TenantWithContainerPolicy): |
| 6 | model_name = "VSGTenant" |
| 7 | |
| 8 | def handle_create(self, tenant): |
| 9 | return self.handle_update(tenant) |
| 10 | |
| 11 | def handle_update(self, tenant): |
| 12 | self.manage_container(tenant) |
| 13 | self.manage_vrouter(tenant) |
| 14 | self.cleanup_orphans(tenant) |
| 15 | |
| 16 | def handle_delete(self, tenant): |
| 17 | if tenant.vrouter: |
| 18 | tenant.vrouter.delete() |
| 19 | |
| 20 | def manage_vrouter(self, tenant): |
| 21 | if tenant.deleted: |
| 22 | return |
| 23 | |
| 24 | if tenant.vrouter is None: |
| 25 | vrouter = self.allocate_public_service_instance(address_pool_name="addresses_vsg", subscriber_tenant=tenant) |
| 26 | vrouter.save() |
| 27 | |
| 28 | def cleanup_orphans(self, tenant): |
| 29 | # ensure vSG only has one vRouter |
| 30 | cur_vrouter = tenant.vrouter |
| 31 | for vrouter in list(VRouterTenant.objects.filter(subscriber_tenant_id=tenant.id)): # TODO: Hardcoded dependency |
| 32 | if (not cur_vrouter) or (vrouter.id != cur_vrouter.id): |
| 33 | # print "XXX clean up orphaned vrouter", vrouter |
| 34 | vrouter.delete() |
| 35 | |
| 36 | def get_vsg_service(self, tenant): |
| 37 | return VSGService.objects.get(id=tenant.provider_service.id) |
| 38 | |
| 39 | def find_instance_for_s_tag(self, s_tag): |
| 40 | tags = Tag.objects.filter(name="s_tag", value=s_tag) |
| 41 | if tags: |
| 42 | return tags[0].content_object |
| 43 | |
| 44 | return None |
| 45 | |
| 46 | def find_or_make_instance_for_s_tag(self, tenant, s_tag): |
| 47 | instance = self.find_instance_for_s_tag(tenant.volt.s_tag) |
| 48 | if instance: |
| 49 | if instance.no_sync: |
| 50 | # if no_sync is still set, then perhaps we failed while saving it and need to retry. |
| 51 | self.save_instance(tenant, instance) |
| 52 | return instance |
| 53 | |
| 54 | desired_image = self.get_image(tenant) |
| 55 | |
| 56 | flavors = Flavor.objects.filter(name="m1.small") |
| 57 | if not flavors: |
| 58 | raise SynchronizerConfigurationError("No m1.small flavor") |
| 59 | |
| 60 | slice = tenant.provider_service.slices.first() |
| 61 | |
| 62 | (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_vsg_service(tenant).node_label).pick() |
| 63 | |
| 64 | assert (slice is not None) |
| 65 | assert (node is not None) |
| 66 | assert (desired_image is not None) |
| 67 | assert (tenant.creator is not None) |
| 68 | assert (node.site_deployment.deployment is not None) |
| 69 | assert (desired_image is not None) |
| 70 | |
| 71 | instance = Instance(slice=slice, |
| 72 | node=node, |
| 73 | image=desired_image, |
| 74 | creator=tenant.creator, |
| 75 | deployment=node.site_deployment.deployment, |
| 76 | flavor=flavors[0], |
| 77 | isolation=slice.default_isolation, |
| 78 | parent=parent) |
| 79 | |
| 80 | self.save_instance(tenant, instance) |
| 81 | |
| 82 | return instance |
| 83 | |
| 84 | def manage_container(self, tenant): |
| 85 | if tenant.deleted: |
| 86 | return |
| 87 | |
| 88 | if not tenant.volt: |
| 89 | raise SynchronizerConfigurationError("This VSG container has no volt") |
| 90 | |
| 91 | if tenant.instance: |
| 92 | # We're good. |
| 93 | return |
| 94 | |
| 95 | instance = self.find_or_make_instance_for_s_tag(tenant, tenant.volt.s_tag) |
| 96 | tenant.instance = instance |
| 97 | # TODO: possible for partial failure here? |
| 98 | tenant.save() |
| 99 | |
| 100 | def find_or_make_port(self, instance, network, **kwargs): |
| 101 | port = Port.objects.filter(instance_id=instance.id, network_id=network.id) |
| 102 | if port: |
| 103 | port = port[0] |
| 104 | else: |
| 105 | port = Port(instance=instance, network=network, **kwargs) |
| 106 | port.save() |
| 107 | return port |
| 108 | |
| 109 | def get_lan_network(self, tenant, instance): |
| 110 | slice = tenant.provider_service.slices.all()[0] |
| 111 | # there should only be one network private network, and its template should not be the management template |
| 112 | lan_networks = [x for x in slice.networks.all() if |
| 113 | x.template.visibility == "private" and (not "management" in x.template.name)] |
| 114 | if len(lan_networks) > 1: |
| 115 | raise SynchronizerProgrammingError("The vSG slice should only have one non-management private network") |
| 116 | if not lan_networks: |
| 117 | raise SynchronizerProgrammingError("No lan_network") |
| 118 | return lan_networks[0] |
| 119 | |
| 120 | def port_set_parameter(self, port, name, value): |
| 121 | pt = NetworkParameterType.objects.get(name=name) |
| 122 | existing_params = NetworkParameter.objects.filter(parameter_id=pt.id, content_type=port.self_content_type_id, object_id=port.id) |
| 123 | |
| 124 | if existing_params: |
| 125 | p = existing_params[0] |
| 126 | p.value = str(value) |
| 127 | p.save() |
| 128 | else: |
| 129 | p = NetworkParameter(parameter=pt, content_type=port.self_content_type_id, object_id=port.id, value=str(value)) |
| 130 | p.save() |
| 131 | |
| 132 | def save_instance(self, tenant, instance): |
| 133 | instance.volumes = "/etc/dnsmasq.d,/etc/ufw" |
| 134 | instance.no_sync = True # prevent instance from being synced until we're done with it |
| 135 | super(VSGTenantPolicy, self).save_instance(instance) |
| 136 | try: |
| 137 | if instance.isolation in ["container", "container_vm"]: |
| 138 | raise Exception("Not supported") |
| 139 | |
| 140 | if instance.isolation in ["vm"]: |
| 141 | lan_network = self.get_lan_network(tenant, instance) |
| 142 | port = self.find_or_make_port(instance, lan_network) |
| 143 | self.port_set_parameter(port, "c_tag", tenant.volt.c_tag) |
| 144 | self.port_set_parameter(port, "s_tag", tenant.volt.s_tag) |
| 145 | self.port_set_parameter(port, "neutron_port_name", "stag-%s" % tenant.volt.s_tag) |
| 146 | port.save() |
| 147 | |
| 148 | # tag the instance with the s-tag, so we can easily find the |
| 149 | # instance later |
| 150 | if tenant.volt and tenant.volt.s_tag: |
| 151 | tags = Tag.objects.filter(name="s_tag", value=tenant.volt.s_tag) |
| 152 | if not tags: |
| 153 | tag = Tag(service=tenant.provider_service, content_type=instance.self_content_type_id, object_id=instance.id, name="s_tag", value=str(tenant.volt.s_tag)) |
| 154 | tag.save() |
| 155 | |
| 156 | # VTN-CORD needs a WAN address for the VM, so that the VM can |
| 157 | # be configured. |
| 158 | tags = Tag.objects.filter(content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant") |
| 159 | if not tags: |
| 160 | vrouter = self.allocate_public_service_instance(address_pool_name="addresses_vsg", |
| 161 | subscriber_service=tenant.provider_service) |
| 162 | vrouter.set_attribute("tenant_for_instance_id", instance.id) |
| 163 | vrouter.save() |
| 164 | # TODO: potential partial failure |
| 165 | tag = Tag(service=tenant.provider_service, content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant", value="%d" % vrouter.id) |
| 166 | tag.save() |
| 167 | |
| 168 | instance.no_sync = False # allow the synchronizer to run now |
| 169 | super(VSGTenantPolicy, self).save_instance(instance) |
| 170 | except: |
| 171 | # need to clean up any failures here |
| 172 | raise |
| 173 | |
| 174 | |