blob: fc2cec5ea1de05a7b4797ea7a5a90444ef17d28b [file] [log] [blame]
Andrea Campanella2a2df422017-08-30 16:59:17 +02001# Copyright 2017-present Open Networking Foundation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15from synchronizers.new_base.modelaccessor import *
16from synchronizers.new_base.model_policies.model_policy_tenantwithcontainer import TenantWithContainerPolicy, LeastLoadedNodeScheduler
17from synchronizers.new_base.exceptions import *
Scott Baker3b8ceca2017-10-12 11:38:50 -070018import wrappers.vegtenant
Andrea Campanella2a2df422017-08-30 16:59:17 +020019
20class VEGTenantPolicy(TenantWithContainerPolicy):
21 model_name = "VEGTenant"
22
23 def handle_create(self, tenant):
24 return self.handle_update(tenant)
25
26 def handle_update(self, tenant):
27 self.manage_container(tenant)
28 self.manage_address_service_instance(tenant)
29 self.cleanup_orphans(tenant)
30
31 def handle_delete(self, tenant):
32 if tenant.address_service_instance:
33 tenant.address_service_instance.delete()
34
35 def manage_address_service_instance(self, tenant):
36 if tenant.deleted:
37 return
38
Andrea Campanella2a2df422017-08-30 16:59:17 +020039 if tenant.vrouter is None:
40 vrouter = self.allocate_public_service_instance(address_pool_name="addresses_veg", subscriber_tenant=tenant)
41 vrouter.save()
42
Andrea Campanella2a2df422017-08-30 16:59:17 +020043 def cleanup_orphans(self, tenant):
44 # ensure vEG only has one AddressManagerServiceInstance
45 cur_asi = tenant.address_service_instance
46 for link in tenant.subscribed_links.all():
47 # TODO: hardcoded dependency
48 # cast from ServiceInstance to AddressManagerServiceInstance
49 asis = AddressManagerServiceInstance.objects.filter(id = link.provider_service_instance.id)
50 for asi in asis:
51 if (not cur_asi) or (asi.id != cur_asi.id):
52 asi.delete()
53
54 def get_veg_service(self, tenant):
55 return VEGService.objects.get(id=tenant.owner.id)
56
57 def find_instance_for_s_tag(self, s_tag):
58 tags = Tag.objects.filter(name="s_tag", value=s_tag)
59 if tags:
60 return tags[0].content_object
61
62 return None
63
64 def find_or_make_instance_for_s_tag(self, tenant, s_tag):
65 instance = self.find_instance_for_s_tag(tenant.volt.s_tag)
66 if instance:
67 if instance.no_sync:
68 # if no_sync is still set, then perhaps we failed while saving it and need to retry.
69 self.save_instance(tenant, instance)
70 return instance
71
72 desired_image = self.get_image(tenant)
73
74 flavors = Flavor.objects.filter(name="m1.small")
75 if not flavors:
76 raise SynchronizerConfigurationError("No m1.small flavor")
77
78 slice = tenant.owner.slices.first()
79
80 (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_veg_service(tenant).node_label).pick()
81
82 assert (slice is not None)
83 assert (node is not None)
84 assert (desired_image is not None)
85 assert (tenant.creator is not None)
86 assert (node.site_deployment.deployment is not None)
87 assert (desired_image is not None)
88
89 instance = Instance(slice=slice,
90 node=node,
91 image=desired_image,
92 creator=tenant.creator,
93 deployment=node.site_deployment.deployment,
94 flavor=flavors[0],
95 isolation=slice.default_isolation,
96 parent=parent)
97
98 self.save_instance(tenant, instance)
99
100 return instance
101
102 def manage_container(self, tenant):
103 if tenant.deleted:
104 return
105
106 if not tenant.volt:
107 raise SynchronizerConfigurationError("This VEG container has no volt")
108
109 if tenant.instance:
110 # We're good.
111 return
112
113 instance = self.find_or_make_instance_for_s_tag(tenant, tenant.volt.s_tag)
114 tenant.instance = instance
115 # TODO: possible for partial failure here?
116 tenant.save()
117
118 def find_or_make_port(self, instance, network, **kwargs):
119 port = Port.objects.filter(instance_id=instance.id, network_id=network.id)
120 if port:
121 port = port[0]
122 else:
123 port = Port(instance=instance, network=network, **kwargs)
124 port.save()
125 return port
126
127 def get_lan_network(self, tenant, instance):
128 slice = tenant.owner.slices.all()[0]
129 # there should only be one network private network, and its template should not be the management template
130 lan_networks = [x for x in slice.networks.all() if
131 x.template.visibility == "private" and (not "management" in x.template.name)]
132 if len(lan_networks) > 1:
133 raise SynchronizerProgrammingError("The vEG slice should only have one non-management private network")
134 if not lan_networks:
135 raise SynchronizerProgrammingError("No lan_network")
136 return lan_networks[0]
137
138 def port_set_parameter(self, port, name, value):
139 pt = NetworkParameterType.objects.get(name=name)
140 existing_params = NetworkParameter.objects.filter(parameter_id=pt.id, content_type=port.self_content_type_id, object_id=port.id)
141
142 if existing_params:
143 p = existing_params[0]
144 p.value = str(value)
145 p.save()
146 else:
147 p = NetworkParameter(parameter=pt, content_type=port.self_content_type_id, object_id=port.id, value=str(value))
148 p.save()
149
150 def save_instance(self, tenant, instance):
151 instance.volumes = "/etc/dnsmasq.d,/etc/ufw"
152 instance.no_sync = True # prevent instance from being synced until we're done with it
153 super(VEGTenantPolicy, self).save_instance(instance)
154 try:
155 if instance.isolation in ["container", "container_vm"]:
156 raise Exception("Not supported")
157
158 if instance.isolation in ["vm"]:
159 lan_network = self.get_lan_network(tenant, instance)
160 port = self.find_or_make_port(instance, lan_network)
161 self.port_set_parameter(port, "c_tag", tenant.volt.c_tag)
162 self.port_set_parameter(port, "s_tag", tenant.volt.s_tag)
163 self.port_set_parameter(port, "neutron_port_name", "stag-%s" % tenant.volt.s_tag)
164 port.save()
165
166 # tag the instance with the s-tag, so we can easily find the
167 # instance later
168 if tenant.volt and tenant.volt.s_tag:
169 tags = Tag.objects.filter(name="s_tag", value=tenant.volt.s_tag)
170 if not tags:
171 tag = Tag(service=tenant.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="s_tag", value=str(tenant.volt.s_tag))
172 tag.save()
173
174 # VTN-CORD needs a WAN address for the VM, so that the VM can
175 # be configured.
176 tags = Tag.objects.filter(content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant")
177 if not tags:
178 address_service_instance = self.allocate_public_service_instance(address_pool_name="addresses_veg",
179 subscriber_service=tenant.owner)
180 address_service_instance.set_attribute("tenant_for_instance_id", instance.id)
181 address_service_instance.save()
182 # TODO: potential partial failure
183 tag = Tag(service=tenant.owner, content_type=instance.self_content_type_id, object_id=instance.id, name="vm_vrouter_tenant", value="%d" % address_service_instance.id)
184 tag.save()
185
186 instance.no_sync = False # allow the synchronizer to run now
187 super(VEGTenantPolicy, self).save_instance(instance)
188 except:
189 # need to clean up any failures here
190 raise
191
192