blob: 66ac3485f6461fe6ecfe6a4af11055c2fd79d388 [file] [log] [blame]
Scott Bakerbba67b62019-01-28 17:38:21 -08001# Copyright 2017-present Open Networking Foundation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15
16from xossynchronizer.modelaccessor import *
17from xossynchronizer.model_policies.policy import Policy
18from xossynchronizer.exceptions import *
19
20
21class Scheduler(object):
22 # XOS Scheduler Abstract Base Class
23 # Used to implement schedulers that pick which node to put instances on
24
25 def __init__(self, slice, label=None, constrain_by_service_instance=False):
26 self.slice = slice
27 self.label = label # Only pick nodes with this label
28 # Apply service-instance-based constraints
29 self.constrain_by_service_instance = constrain_by_service_instance
30
31 def pick(self):
32 # this method should return a tuple (node, parent)
33 # node is the node to instantiate on
34 # parent is for container_vm instances only, and is the VM that will
35 # hold the container
36
37 raise Exception("Abstract Base")
38
39
40class LeastLoadedNodeScheduler(Scheduler):
41 # This scheduler always return the node with the fewest number of
42 # instances.
43
44 def pick(self):
45 set_label = False
46
47 nodes = []
48 if self.label:
49 nodes = Node.objects.filter(nodelabels__name=self.label)
50 if not nodes:
51 set_label = self.constrain_by_service_instance
52
53 if not nodes:
54 if self.slice.default_node:
55 # if slice.default_node is set, then filter by default_node
56 nodes = Node.objects.filter(name=self.slice.default_node)
57 else:
58 nodes = Node.objects.all()
59
60 # convert to list
61 nodes = list(nodes)
62
63 # sort so that we pick the least-loaded node
64 nodes = sorted(nodes, key=lambda node: node.instances.count())
65
66 if not nodes:
67 raise Exception("LeastLoadedNodeScheduler: No suitable nodes to pick from")
68
69 picked_node = nodes[0]
70
71 if set_label:
72 nl = NodeLabel(name=self.label)
73 nl.node.add(picked_node)
74 nl.save()
75
76 # TODO: logic to filter nodes by which nodes are up, and which
77 # nodes the slice can instantiate on.
78 return [picked_node, None]
79
80
81class TenantWithContainerPolicy(Policy):
82 # This policy is abstract. Inherit this class into your own policy and override model_name
83 model_name = None
84
85 def handle_create(self, tenant):
86 return self.handle_update(tenant)
87
88 def handle_update(self, service_instance):
89 if (service_instance.link_deleted_count > 0) and (
90 not service_instance.provided_links.exists()
91 ):
92 model = globals()[self.model_name]
93 self.log.info(
94 "The last provided link has been deleted -- self-destructing."
95 )
96 self.handle_delete(service_instance)
97 if model.objects.filter(id=service_instance.id).exists():
98 service_instance.delete()
99 else:
100 self.log.info("Tenant %s is already deleted" % service_instance)
101 return
102 self.manage_container(service_instance)
103
104 # def handle_delete(self, tenant):
105 # if tenant.vcpe:
106 # tenant.vcpe.delete()
107
108 def save_instance(self, instance):
109 # Override this function to do custom pre-save or post-save processing,
110 # such as creating ports for containers.
111 instance.save()
112
113 def ip_to_mac(self, ip):
114 (a, b, c, d) = ip.split(".")
115 return "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
116
117 def allocate_public_service_instance(self, **kwargs):
118 """ Get a ServiceInstance that provides public connectivity. Currently this means to use AddressPool and
119 the AddressManager Service.
120
121 Expect this to be refactored when we break hard-coded service dependencies.
122 """
123 address_pool_name = kwargs.pop("address_pool_name")
124
125 am_service = AddressManagerService.objects.all() # TODO: Hardcoded dependency
126 if not am_service:
127 raise Exception("no addressing services")
128 am_service = am_service[0]
129
130 ap = AddressPool.objects.filter(
131 name=address_pool_name, service_id=am_service.id
132 )
133 if not ap:
134 raise Exception("Addressing service unable to find addresspool %s" % name)
135 ap = ap[0]
136
137 ip = ap.get_address()
138 if not ip:
139 raise Exception("AddressPool '%s' has run out of addresses." % ap.name)
140
141 ap.save() # save the AddressPool to account for address being removed from it
142
143 subscriber_service = None
144 if "subscriber_service" in kwargs:
145 subscriber_service = kwargs.pop("subscriber_service")
146
147 subscriber_service_instance = None
148 if "subscriber_tenant" in kwargs:
149 subscriber_service_instance = kwargs.pop("subscriber_tenant")
150 elif "subscriber_service_instance" in kwargs:
151 subscriber_service_instance = kwargs.pop("subscriber_service_instance")
152
153 # TODO: potential partial failure -- AddressPool address is allocated and saved before addressing tenant
154
155 t = None
156 try:
157 t = AddressManagerServiceInstance(
158 owner=am_service, **kwargs
159 ) # TODO: Hardcoded dependency
160 t.public_ip = ip
161 t.public_mac = self.ip_to_mac(ip)
162 t.address_pool_id = ap.id
163 t.save()
164
165 if subscriber_service:
166 link = ServiceInstanceLink(
167 subscriber_service=subscriber_service, provider_service_instance=t
168 )
169 link.save()
170
171 if subscriber_service_instance:
172 link = ServiceInstanceLink(
173 subscriber_service_instance=subscriber_service_instance,
174 provider_service_instance=t,
175 )
176 link.save()
177 except BaseException:
178 # cleanup if anything went wrong
179 ap.put_address(ip)
180 ap.save() # save the AddressPool to account for address being added to it
181 if t and t.id:
182 t.delete()
183 raise
184
185 return t
186
187 def get_image(self, tenant):
188 slice = tenant.owner.slices.all()
189 if not slice:
190 raise SynchronizerProgrammingError("provider service has no slice")
191 slice = slice[0]
192
193 # If slice has default_image set then use it
194 if slice.default_image:
195 return slice.default_image
196
197 raise SynchronizerProgrammingError(
198 "Please set a default image for %s" % self.slice.name
199 )
200
201 """ get_legacy_tenant_attribute
202 pick_least_loaded_instance_in_slice
203 count_of_tenants_of_an_instance
204
205 These three methods seem to be used by A-CORD. Look for ways to consolidate with existing methods and eliminate
206 these legacy ones
207 """
208
209 def get_legacy_tenant_attribute(self, tenant, name, default=None):
210 if tenant.service_specific_attribute:
211 attributes = json.loads(tenant.service_specific_attribute)
212 else:
213 attributes = {}
214 return attributes.get(name, default)
215
216 def pick_least_loaded_instance_in_slice(self, tenant, slices, image):
217 for slice in slices:
218 if slice.instances.all().count() > 0:
219 for instance in slice.instances.all():
220 if instance.image != image:
221 continue
222 # Pick the first instance that has lesser than 5 tenants
223 if self.count_of_tenants_of_an_instance(tenant, instance) < 5:
224 return instance
225 return None
226
227 # TODO: Ideally the tenant count for an instance should be maintained using a
228 # many-to-one relationship attribute, however this model being proxy, it does
229 # not permit any new attributes to be defined. Find if any better solutions
230 def count_of_tenants_of_an_instance(self, tenant, instance):
231 tenant_count = 0
232 for tenant in self.__class__.objects.all():
233 if (
234 self.get_legacy_tenant_attribute(tenant, "instance_id", None)
235 == instance.id
236 ):
237 tenant_count += 1
238 return tenant_count
239
240 def manage_container(self, tenant):
241 if tenant.deleted:
242 return
243
244 desired_image = self.get_image(tenant)
245
246 if (tenant.instance is not None) and (
247 tenant.instance.image.id != desired_image.id
248 ):
249 tenant.instance.delete()
250 tenant.instance = None
251
252 if tenant.instance is None:
253 if not tenant.owner.slices.count():
254 raise SynchronizerConfigurationError("The service has no slices")
255
256 new_instance_created = False
257 instance = None
258 if self.get_legacy_tenant_attribute(
259 tenant, "use_same_instance_for_multiple_tenants", default=False
260 ):
261 # Find if any existing instances can be used for this tenant
262 slices = tenant.owner.slices.all()
263 instance = self.pick_least_loaded_instance_in_slice(
264 tenant, slices, desired_image
265 )
266
267 if not instance:
268 slice = tenant.owner.slices.first()
269
270 flavor = slice.default_flavor
271 if not flavor:
272 flavors = Flavor.objects.filter(name="m1.small")
273 if not flavors:
274 raise SynchronizerConfigurationError("No m1.small flavor")
275 flavor = flavors[0]
276
277 if slice.default_isolation == "container_vm":
278 raise Exception("Not implemented")
279 else:
280 scheduler = getattr(self, "scheduler", LeastLoadedNodeScheduler)
281 constrain_by_service_instance = getattr(
282 self, "constrain_by_service_instance", False
283 )
284 tenant_node_label = getattr(tenant, "node_label", None)
285 (node, parent) = scheduler(
286 slice,
287 label=tenant_node_label,
288 constrain_by_service_instance=constrain_by_service_instance,
289 ).pick()
290
291 assert slice is not None
292 assert node is not None
293 assert desired_image is not None
294 assert tenant.creator is not None
295 assert node.site_deployment.deployment is not None
296 assert flavor is not None
297
298 try:
299 instance = Instance(
300 slice=slice,
301 node=node,
302 image=desired_image,
303 creator=tenant.creator,
304 deployment=node.site_deployment.deployment,
305 flavor=flavor,
306 isolation=slice.default_isolation,
307 parent=parent,
308 )
309 self.save_instance(instance)
310 new_instance_created = True
311
312 tenant.instance = instance
313 tenant.save()
314 except BaseException:
315 # NOTE: We don't have transactional support, so if the synchronizer crashes and exits after
316 # creating the instance, but before adding it to the tenant, then we will leave an
317 # orphaned instance.
318 if new_instance_created:
319 instance.delete()
320 raise