allow vtr to use a specific key for contacting vsg instances
diff --git a/xos/onboard/vtr/synchronizer/steps/sync_vtrtenant.py b/xos/onboard/vtr/synchronizer/steps/sync_vtrtenant.py
index 1932d70..f0f7ef3 100644
--- a/xos/onboard/vtr/synchronizer/steps/sync_vtrtenant.py
+++ b/xos/onboard/vtr/synchronizer/steps/sync_vtrtenant.py
@@ -9,7 +9,7 @@
from synchronizers.base.ansible import run_template_ssh
from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
from core.models import Service, Slice, Tag
-from services.vsg.models import VSGService
+from services.vsg.models import VSGService, VCPE_KIND
from services.vtr.models import VTRService, VTRTenant
from services.hpc.models import HpcService, CDNPrefix
from xos.logger import Logger, logging
@@ -27,7 +27,7 @@
observes=VTRTenant
requested_interval=0
template_name = "sync_vtrtenant.yaml"
- service_key_name = "/opt/xos/synchronizers/vtr/vcpe_private_key"
+ #service_key_name = "/opt/xos/services/vtr/vcpe_private_key"
def __init__(self, *args, **kwargs):
super(SyncVTRTenant, self).__init__(*args, **kwargs)
@@ -66,6 +66,15 @@
else:
return None
+ def get_key_name(self, instance):
+ if instance.slice.service and (instance.slice.service.kind==VCPE_KIND):
+ # We need to use the vsg service's private key. Onboarding won't
+ # by default give us another service's private key, so let's assume
+ # onboarding has been configured to add vsg_rsa to the vtr service.
+ return "/opt/xos/services/vtr/keys/vsg_rsa"
+ else:
+ raise Exception("VTR doesn't know how to get the private key for this instance")
+
def get_extra_attributes(self, o):
vtr_service = self.get_vtr_service(o)
vcpe_service = self.get_vcpe_service(o)
diff --git a/xos/synchronizers/base/SyncInstanceUsingAnsible.py b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
index 49ca23b..dd7e5c6 100644
--- a/xos/synchronizers/base/SyncInstanceUsingAnsible.py
+++ b/xos/synchronizers/base/SyncInstanceUsingAnsible.py
@@ -82,6 +82,21 @@
def get_node_key(self, node):
return "/root/setup/node_key"
+ def get_key_name(self, instance):
+ if instance.isolation=="vm":
+ if (instance.slice) and (instance.slice.service) and (instance.slice.service.private_key_fn):
+ key_name = instance.slice.service.private_key_fn
+ else:
+ raise Exception("Make sure to set private_key_fn in the service")
+ elif instance.isolation=="container":
+ node = self.get_node(instance)
+ key_name = self.get_node_key(node)
+ else:
+ # container in VM
+ key_name = instance.parent.slice.service.private_key_fn
+
+ return key_name
+
def get_ansible_fields(self, instance):
# return all of the fields that tell Ansible how to talk to the context
# that's setting up the container.
@@ -95,10 +110,7 @@
"username": "ubuntu",
"ssh_ip": instance.get_ssh_ip(),
}
- if (instance.slice) and (instance.slice.service) and (instance.slice.service.private_key_fn):
- key_name = instance.slice.service.private_key_fn
- else:
- raise Exception("Make sure to set private_key_fn in the service")
+
elif (instance.isolation == "container"):
# container on bare metal
node = self.get_node(instance)
@@ -110,7 +122,6 @@
"container_name": "%s-%s" % (instance.slice.name, str(instance.id))
# ssh_ip is not used for container-on-metal
}
- key_name = self.get_node_key(node)
else:
# container in a VM
if not instance.parent:
@@ -128,8 +139,8 @@
"ssh_ip": instance.parent.get_ssh_ip(),
"container_name": "%s-%s" % (instance.slice.name, str(instance.id))
}
- key_name = instance.parent.slice.service.private_key_fn
+ key_name = self.get_key_name(instance)
if not os.path.exists(key_name):
raise Exception("Node key %s does not exist" % key_name)