blob: 1bc54ce7430733fa1f99836e5847aa2c7e930a8f [file] [log] [blame]
Scott Bakerbba67b62019-01-28 17:38:21 -08001# Copyright 2017-present Open Networking Foundation
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15
16import hashlib
17import os
18import socket
19import sys
20import base64
21import time
22from xosconfig import Config
23
24from xossynchronizer.steps.syncstep import SyncStep, DeferredException
25from xossynchronizer.ansible_helper import run_template_ssh
Scott Bakerbba67b62019-01-28 17:38:21 -080026
27
28class SyncInstanceUsingAnsible(SyncStep):
29 # All of the following should be defined for classes derived from this
30 # base class. Examples below use VSGTenant.
31
32 # provides=[VSGTenant]
33 # observes=VSGTenant
34 # requested_interval=0
35 # template_name = "sync_vcpetenant.yaml"
36
37 def __init__(self, **args):
38 SyncStep.__init__(self, **args)
39
40 def skip_ansible_fields(self, o):
41 # Return True if the instance processing and get_ansible_fields stuff
42 # should be skipped. This hook is primarily for the OnosApp
43 # sync step, so it can do its external REST API sync thing.
44 return False
45
46 def defer_sync(self, o, reason):
47 # zdw, 2017-02-18 - is raising the exception here necessary? - seems like
48 # it's just logging the same thing twice
49 self.log.info("defer object", object=str(o), reason=reason, **o.tologdict())
50 raise DeferredException("defer object %s due to %s" % (str(o), reason))
51
52 def get_extra_attributes(self, o):
53 # This is a place to include extra attributes that aren't part of the
54 # object itself.
55
56 return {}
57
58 def get_instance(self, o):
59 # We need to know what instance is associated with the object. Let's
60 # assume 'o' has a field called 'instance'. If the field is called
61 # something else, or if custom logic is needed, then override this
62 # method.
63
64 return o.instance
65
66 def get_external_sync(self, o):
67 hostname = getattr(o, "external_hostname", None)
68 container = getattr(o, "external_container", None)
69 if hostname and container:
70 return (hostname, container)
71 else:
72 return None
73
74 def run_playbook(self, o, fields, template_name=None):
75 if not template_name:
76 template_name = self.template_name
77 tStart = time.time()
78 run_template_ssh(template_name, fields, object=o)
79 self.log.info(
80 "playbook execution time", time=int(time.time() - tStart), **o.tologdict()
81 )
82
83 def pre_sync_hook(self, o, fields):
84 pass
85
86 def post_sync_hook(self, o, fields):
87 pass
88
89 def sync_fields(self, o, fields):
90 self.run_playbook(o, fields)
91
92 def prepare_record(self, o):
93 pass
94
95 def get_node(self, o):
96 return o.node
97
98 def get_node_key(self, node):
99 # NOTE `node_key` is never defined, does it differ from `proxy_ssh_key`? the value looks to be the same
100 return Config.get("node_key")
101
102 def get_key_name(self, instance):
103 if instance.isolation == "vm":
104 if (
105 instance.slice
106 and instance.slice.service
107 and instance.slice.service.private_key_fn
108 ):
109 key_name = instance.slice.service.private_key_fn
110 else:
111 raise Exception("Make sure to set private_key_fn in the service")
112 elif instance.isolation == "container":
113 node = self.get_node(instance)
114 key_name = self.get_node_key(node)
115 else:
116 # container in VM
117 key_name = instance.parent.slice.service.private_key_fn
118
119 return key_name
120
121 def get_ansible_fields(self, instance):
122 # return all of the fields that tell Ansible how to talk to the context
123 # that's setting up the container.
124
125 if instance.isolation == "vm":
126 # legacy where container was configured by sync_vcpetenant.py
127
128 fields = {
129 "instance_name": instance.name,
130 "hostname": instance.node.name,
131 "instance_id": instance.instance_id,
132 "username": "ubuntu",
133 "ssh_ip": instance.get_ssh_ip(),
134 }
135
136 elif instance.isolation == "container":
137 # container on bare metal
138 node = self.get_node(instance)
139 hostname = node.name
140 fields = {
141 "hostname": hostname,
142 "baremetal_ssh": True,
143 "instance_name": "rootcontext",
144 "username": "root",
145 "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
146 # ssh_ip is not used for container-on-metal
147 }
148 else:
149 # container in a VM
150 if not instance.parent:
151 raise Exception("Container-in-VM has no parent")
152 if not instance.parent.instance_id:
153 raise Exception("Container-in-VM parent is not yet instantiated")
154 if not instance.parent.slice.service:
155 raise Exception("Container-in-VM parent has no service")
156 if not instance.parent.slice.service.private_key_fn:
157 raise Exception("Container-in-VM parent service has no private_key_fn")
158 fields = {
159 "hostname": instance.parent.node.name,
160 "instance_name": instance.parent.name,
161 "instance_id": instance.parent.instance_id,
162 "username": "ubuntu",
163 "ssh_ip": instance.parent.get_ssh_ip(),
164 "container_name": "%s-%s" % (instance.slice.name, str(instance.id)),
165 }
166
167 key_name = self.get_key_name(instance)
168 if not os.path.exists(key_name):
169 raise Exception("Node key %s does not exist" % key_name)
170
171 key = file(key_name).read()
172
173 fields["private_key"] = key
174
175 # Now the ceilometer stuff
176 # Only do this if the instance is not being deleted.
177 if not instance.deleted:
178 cslice = ControllerSlice.objects.get(slice_id=instance.slice.id)
179 if not cslice:
180 raise Exception(
181 "Controller slice object for %s does not exist"
182 % instance.slice.name
183 )
184
185 cuser = ControllerUser.objects.get(user_id=instance.creator.id)
186 if not cuser:
187 raise Exception(
188 "Controller user object for %s does not exist" % instance.creator
189 )
190
191 fields.update(
192 {
193 "keystone_tenant_id": cslice.tenant_id,
194 "keystone_user_id": cuser.kuser_id,
195 "rabbit_user": getattr(instance.controller, "rabbit_user", None),
196 "rabbit_password": getattr(
197 instance.controller, "rabbit_password", None
198 ),
199 "rabbit_host": getattr(instance.controller, "rabbit_host", None),
200 }
201 )
202
203 return fields
204
205 def sync_record(self, o):
206 self.log.info("sync'ing object", object=str(o), **o.tologdict())
207
208 self.prepare_record(o)
209
210 if self.skip_ansible_fields(o):
211 fields = {}
212 else:
213 if self.get_external_sync(o):
214 # sync to some external host
215
216 # UNTESTED
217
218 (hostname, container_name) = self.get_external_sync(o)
219 fields = {
220 "hostname": hostname,
221 "baremetal_ssh": True,
222 "instance_name": "rootcontext",
223 "username": "root",
224 "container_name": container_name,
225 }
226 key_name = self.get_node_key(node)
227 if not os.path.exists(key_name):
228 raise Exception("Node key %s does not exist" % key_name)
229
230 key = file(key_name).read()
231
232 fields["private_key"] = key
233 # TO DO: Ceilometer stuff
234 else:
235 instance = self.get_instance(o)
236 # sync to an XOS instance
237 if not instance:
238 self.defer_sync(o, "waiting on instance")
239 return
240
241 if not instance.instance_name:
242 self.defer_sync(o, "waiting on instance.instance_name")
243 return
244
245 fields = self.get_ansible_fields(instance)
246
247 fields["ansible_tag"] = getattr(
248 o, "ansible_tag", o.__class__.__name__ + "_" + str(o.id)
249 )
250
251 # If 'o' defines a 'sync_attributes' list, then we'll copy those
252 # attributes into the Ansible recipe's field list automatically.
253 if hasattr(o, "sync_attributes"):
254 for attribute_name in o.sync_attributes:
255 fields[attribute_name] = getattr(o, attribute_name)
256
257 fields.update(self.get_extra_attributes(o))
258
259 self.sync_fields(o, fields)
260
261 o.save()
262
263 def delete_record(self, o):
264 try:
265 # TODO: This may be broken, as get_controller() does not exist in convenience wrapper
266 controller = o.get_controller()
267 controller_register = json.loads(
268 o.node.site_deployment.controller.backend_register
269 )
270
271 if controller_register.get("disabled", False):
272 raise InnocuousException(
273 "Controller %s is disabled" % o.node.site_deployment.controller.name
274 )
275 except AttributeError:
276 pass
277
278 instance = self.get_instance(o)
279
280 if not instance:
281 # the instance is gone. There's nothing left for us to do.
282 return
283
284 if instance.deleted:
285 # the instance is being deleted. There's nothing left for us to do.
286 return
287
288 if isinstance(instance, basestring):
289 # sync to some external host
290
291 # XXX - this probably needs more work...
292
293 fields = {
294 "hostname": instance,
295 "instance_id": "ubuntu", # this is the username to log into
296 "private_key": service.key,
297 }
298 else:
299 # sync to an XOS instance
300 fields = self.get_ansible_fields(instance)
301
302 fields["ansible_tag"] = getattr(
303 o, "ansible_tag", o.__class__.__name__ + "_" + str(o.id)
304 )
305
306 # If 'o' defines a 'sync_attributes' list, then we'll copy those
307 # attributes into the Ansible recipe's field list automatically.
308 if hasattr(o, "sync_attributes"):
309 for attribute_name in o.sync_attributes:
310 fields[attribute_name] = getattr(o, attribute_name)
311
312 if hasattr(self, "map_delete_inputs"):
313 fields.update(self.map_delete_inputs(o))
314
315 fields["delete"] = True
316 res = self.run_playbook(o, fields)
317
318 if hasattr(self, "map_delete_outputs"):
319 self.map_delete_outputs(o, res)