blob: 6ed656cd0178af4f6176881277bedd479133110c [file] [log] [blame]
Scott Bakerbba67b62019-01-28 17:38:21 -08001# Copyright 2017-present Open Networking Foundation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15
16import hashlib
17import os
18import socket
19import sys
20import base64
21import time
22from xosconfig import Config
23
24from xossynchronizer.steps.syncstep import SyncStep, DeferredException
25from xossynchronizer.ansible_helper import run_template_ssh
26from xossynchronizer.modelaccessor import *
27
28
29class SyncInstanceUsingAnsible(SyncStep):
30 # All of the following should be defined for classes derived from this
31 # base class. Examples below use VSGTenant.
32
33 # provides=[VSGTenant]
34 # observes=VSGTenant
35 # requested_interval=0
36 # template_name = "sync_vcpetenant.yaml"
37
38 def __init__(self, **args):
39 SyncStep.__init__(self, **args)
40
41 def skip_ansible_fields(self, o):
42 # Return True if the instance processing and get_ansible_fields stuff
43 # should be skipped. This hook is primarily for the OnosApp
44 # sync step, so it can do its external REST API sync thing.
45 return False
46
47 def defer_sync(self, o, reason):
48 # zdw, 2017-02-18 - is raising the exception here necessary? - seems like
49 # it's just logging the same thing twice
50 self.log.info("defer object", object=str(o), reason=reason, **o.tologdict())
51 raise DeferredException("defer object %s due to %s" % (str(o), reason))
52
53 def get_extra_attributes(self, o):
54 # This is a place to include extra attributes that aren't part of the
55 # object itself.
56
57 return {}
58
59 def get_instance(self, o):
60 # We need to know what instance is associated with the object. Let's
61 # assume 'o' has a field called 'instance'. If the field is called
62 # something else, or if custom logic is needed, then override this
63 # method.
64
65 return o.instance
66
67 def get_external_sync(self, o):
68 hostname = getattr(o, "external_hostname", None)
69 container = getattr(o, "external_container", None)
70 if hostname and container:
71 return (hostname, container)
72 else:
73 return None
74
75 def run_playbook(self, o, fields, template_name=None):
76 if not template_name:
77 template_name = self.template_name
78 tStart = time.time()
79 run_template_ssh(template_name, fields, object=o)
80 self.log.info(
81 "playbook execution time", time=int(time.time() - tStart), **o.tologdict()
82 )
83
84 def pre_sync_hook(self, o, fields):
85 pass
86
87 def post_sync_hook(self, o, fields):
88 pass
89
90 def sync_fields(self, o, fields):
91 self.run_playbook(o, fields)
92
93 def prepare_record(self, o):
94 pass
95
96 def get_node(self, o):
97 return o.node
98
99 def get_node_key(self, node):
100 # NOTE `node_key` is never defined, does it differ from `proxy_ssh_key`? the value looks to be the same
101 return Config.get("node_key")
102
103 def get_key_name(self, instance):
104 if instance.isolation == "vm":
105 if (
106 instance.slice
107 and instance.slice.service
108 and instance.slice.service.private_key_fn
109 ):
110 key_name = instance.slice.service.private_key_fn
111 else:
112 raise Exception("Make sure to set private_key_fn in the service")
113 elif instance.isolation == "container":
114 node = self.get_node(instance)
115 key_name = self.get_node_key(node)
116 else:
117 # container in VM
118 key_name = instance.parent.slice.service.private_key_fn
119
120 return key_name
121
122 def get_ansible_fields(self, instance):
123 # return all of the fields that tell Ansible how to talk to the context
124 # that's setting up the container.
125
126 if instance.isolation == "vm":
127 # legacy where container was configured by sync_vcpetenant.py
128
129 fields = {
130 "instance_name": instance.name,
131 "hostname": instance.node.name,
132 "instance_id": instance.instance_id,
133 "username": "ubuntu",
134 "ssh_ip": instance.get_ssh_ip(),
135 }
136
137 elif instance.isolation == "container":
138 # container on bare metal
139 node = self.get_node(instance)
140 hostname = node.name
141 fields = {
142 "hostname": hostname,
143 "baremetal_ssh": True,
144 "instance_name": "rootcontext",
145 "username": "root",
146 "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
147 # ssh_ip is not used for container-on-metal
148 }
149 else:
150 # container in a VM
151 if not instance.parent:
152 raise Exception("Container-in-VM has no parent")
153 if not instance.parent.instance_id:
154 raise Exception("Container-in-VM parent is not yet instantiated")
155 if not instance.parent.slice.service:
156 raise Exception("Container-in-VM parent has no service")
157 if not instance.parent.slice.service.private_key_fn:
158 raise Exception("Container-in-VM parent service has no private_key_fn")
159 fields = {
160 "hostname": instance.parent.node.name,
161 "instance_name": instance.parent.name,
162 "instance_id": instance.parent.instance_id,
163 "username": "ubuntu",
164 "ssh_ip": instance.parent.get_ssh_ip(),
165 "container_name": "%s-%s" % (instance.slice.name, str(instance.id)),
166 }
167
168 key_name = self.get_key_name(instance)
169 if not os.path.exists(key_name):
170 raise Exception("Node key %s does not exist" % key_name)
171
172 key = file(key_name).read()
173
174 fields["private_key"] = key
175
176 # Now the ceilometer stuff
177 # Only do this if the instance is not being deleted.
178 if not instance.deleted:
179 cslice = ControllerSlice.objects.get(slice_id=instance.slice.id)
180 if not cslice:
181 raise Exception(
182 "Controller slice object for %s does not exist"
183 % instance.slice.name
184 )
185
186 cuser = ControllerUser.objects.get(user_id=instance.creator.id)
187 if not cuser:
188 raise Exception(
189 "Controller user object for %s does not exist" % instance.creator
190 )
191
192 fields.update(
193 {
194 "keystone_tenant_id": cslice.tenant_id,
195 "keystone_user_id": cuser.kuser_id,
196 "rabbit_user": getattr(instance.controller, "rabbit_user", None),
197 "rabbit_password": getattr(
198 instance.controller, "rabbit_password", None
199 ),
200 "rabbit_host": getattr(instance.controller, "rabbit_host", None),
201 }
202 )
203
204 return fields
205
206 def sync_record(self, o):
207 self.log.info("sync'ing object", object=str(o), **o.tologdict())
208
209 self.prepare_record(o)
210
211 if self.skip_ansible_fields(o):
212 fields = {}
213 else:
214 if self.get_external_sync(o):
215 # sync to some external host
216
217 # UNTESTED
218
219 (hostname, container_name) = self.get_external_sync(o)
220 fields = {
221 "hostname": hostname,
222 "baremetal_ssh": True,
223 "instance_name": "rootcontext",
224 "username": "root",
225 "container_name": container_name,
226 }
227 key_name = self.get_node_key(node)
228 if not os.path.exists(key_name):
229 raise Exception("Node key %s does not exist" % key_name)
230
231 key = file(key_name).read()
232
233 fields["private_key"] = key
234 # TO DO: Ceilometer stuff
235 else:
236 instance = self.get_instance(o)
237 # sync to an XOS instance
238 if not instance:
239 self.defer_sync(o, "waiting on instance")
240 return
241
242 if not instance.instance_name:
243 self.defer_sync(o, "waiting on instance.instance_name")
244 return
245
246 fields = self.get_ansible_fields(instance)
247
248 fields["ansible_tag"] = getattr(
249 o, "ansible_tag", o.__class__.__name__ + "_" + str(o.id)
250 )
251
252 # If 'o' defines a 'sync_attributes' list, then we'll copy those
253 # attributes into the Ansible recipe's field list automatically.
254 if hasattr(o, "sync_attributes"):
255 for attribute_name in o.sync_attributes:
256 fields[attribute_name] = getattr(o, attribute_name)
257
258 fields.update(self.get_extra_attributes(o))
259
260 self.sync_fields(o, fields)
261
262 o.save()
263
264 def delete_record(self, o):
265 try:
266 # TODO: This may be broken, as get_controller() does not exist in convenience wrapper
267 controller = o.get_controller()
268 controller_register = json.loads(
269 o.node.site_deployment.controller.backend_register
270 )
271
272 if controller_register.get("disabled", False):
273 raise InnocuousException(
274 "Controller %s is disabled" % o.node.site_deployment.controller.name
275 )
276 except AttributeError:
277 pass
278
279 instance = self.get_instance(o)
280
281 if not instance:
282 # the instance is gone. There's nothing left for us to do.
283 return
284
285 if instance.deleted:
286 # the instance is being deleted. There's nothing left for us to do.
287 return
288
289 if isinstance(instance, basestring):
290 # sync to some external host
291
292 # XXX - this probably needs more work...
293
294 fields = {
295 "hostname": instance,
296 "instance_id": "ubuntu", # this is the username to log into
297 "private_key": service.key,
298 }
299 else:
300 # sync to an XOS instance
301 fields = self.get_ansible_fields(instance)
302
303 fields["ansible_tag"] = getattr(
304 o, "ansible_tag", o.__class__.__name__ + "_" + str(o.id)
305 )
306
307 # If 'o' defines a 'sync_attributes' list, then we'll copy those
308 # attributes into the Ansible recipe's field list automatically.
309 if hasattr(o, "sync_attributes"):
310 for attribute_name in o.sync_attributes:
311 fields[attribute_name] = getattr(o, attribute_name)
312
313 if hasattr(self, "map_delete_inputs"):
314 fields.update(self.map_delete_inputs(o))
315
316 fields["delete"] = True
317 res = self.run_playbook(o, fields)
318
319 if hasattr(self, "map_delete_outputs"):
320 self.map_delete_outputs(o, res)