Merged ansible branch into master
diff --git a/planetstack/openstack_observer/event_loop.py b/planetstack/openstack_observer/event_loop.py
index 12965bb..500c0e0 100644
--- a/planetstack/openstack_observer/event_loop.py
+++ b/planetstack/openstack_observer/event_loop.py
@@ -254,10 +254,13 @@
except KeyError:
has_deps = False
+ go = False
+
if (has_deps):
for d in deps:
if d==step.__name__:
logger.info(" step %s self-wait skipped" % step.__name__)
+ go = True
continue
cond = self.step_conditions[d]
@@ -266,7 +269,7 @@
logger.info(" step %s wait on dep %s" % (step.__name__, d))
cond.wait()
cond.release()
- go = self.step_status[d] == STEP_STATUS_OK
+ go = go or self.step_status[d] == STEP_STATUS_OK
else:
go = True
diff --git a/planetstack/openstack_observer/steps/sync_external_routes.py b/planetstack/openstack_observer/steps/sync_external_routes.py
index 334d19d..28d24cc 100644
--- a/planetstack/openstack_observer/steps/sync_external_routes.py
+++ b/planetstack/openstack_observer/steps/sync_external_routes.py
@@ -2,6 +2,7 @@
import base64
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import Deployment
class SyncExternalRoutes(OpenStackSyncStep):
# XXX what does this provide?
@@ -9,6 +10,8 @@
requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
def call(self, **args):
+ deployments = Deployment.objects.all()
+ self.driver = self.driver.admin_driver(deployment=deployments[0],tenant='admin')
routes = self.driver.get_external_routes()
subnets = self.driver.shell.quantum.list_subnets()['subnets']
for subnet in subnets:
diff --git a/planetstack/openstack_observer/steps/sync_network_deployments.py b/planetstack/openstack_observer/steps/sync_network_deployments.py
index 77d3a3a..0312f3a 100644
--- a/planetstack/openstack_observer/steps/sync_network_deployments.py
+++ b/planetstack/openstack_observer/steps/sync_network_deployments.py
@@ -25,7 +25,8 @@
def get_next_subnet(self, deployment=None):
# limit ourself to 10.0.x.x for now
valid_subnet = lambda net: net.startswith('10.0')
- driver = self.driver.admin_driver(deployment=deployment)
+
+ driver = self.driver.admin_driver(deployment=deployment,tenant='admin')
subnets = driver.shell.quantum.list_subnets()['subnets']
ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
if valid_subnet(subnet['cidr'])]
@@ -107,15 +108,12 @@
logger.info("deployment %r has no admin_user, skipping" % network_deployment.deployment)
return
+ self.driver = self.driver.admin_driver(deployment=network_deployment.deployment,tenant='admin')
if network_deployment.network.owner and network_deployment.network.owner.creator:
try:
# update manager context
- real_driver = self.driver
- self.driver = self.driver.client_driver(caller=network_deployment.network.owner.creator,
- tenant=network_deployment.network.owner.name,
- deployment=network_deployment.deployment.name)
+ # Bring back
self.save_network_deployment(network_deployment)
- self.driver = real_driver
logger.info("saved network deployment: %s" % (network_deployment))
except Exception,e:
logger.log_exc("save network deployment failed: %s" % network_deployment)
diff --git a/planetstack/openstack_observer/steps/sync_network_slivers.py b/planetstack/openstack_observer/steps/sync_network_slivers.py
index c003ba8..3e85e05 100644
--- a/planetstack/openstack_observer/steps/sync_network_slivers.py
+++ b/planetstack/openstack_observer/steps/sync_network_slivers.py
@@ -47,7 +47,7 @@
logger.info("deployment %s has no admin_tenant" % deployment.name)
continue
try:
- driver = self.driver.admin_driver(deployment=deployment.name)
+ driver = self.driver.admin_driver(deployment=deployment.name,tenant='admin')
ports = driver.shell.quantum.list_ports()["ports"]
except:
logger.log_exc("failed to get ports from deployment %s" % deployment.name)
@@ -137,7 +137,7 @@
if (neutron_nat_list != nat_list):
logger.info("Setting nat:forward_ports for port %s network %s sliver %s to %s" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list)))
try:
- driver = self.driver.client_driver(caller=networkSliver.sliver.creator, tenant=networkSliver.sliver.slice.name, deployment=networkSliver.sliver.node.deployment.name)
+ driver = self.driver.admin_driver(deployment=networkSliver.sliver.node.deployment,tenant='admin')
driver.shell.quantum.update_port(networkSliver.port_id, {"port": {"nat:forward_ports": nat_list}})
except:
logger.log_exc("failed to update port with nat_list %s" % str(nat_list))
diff --git a/planetstack/openstack_observer/steps/sync_nodes.py b/planetstack/openstack_observer/steps/sync_nodes.py
index d648b7d..3936311 100644
--- a/planetstack/openstack_observer/steps/sync_nodes.py
+++ b/planetstack/openstack_observer/steps/sync_nodes.py
@@ -24,7 +24,7 @@
# collect local nodes
sites = Site.objects.all()
nodes = Node.objects.all()
- node_hostnames = [node.name for node in nodes]
+ node_hostnames = [node.name for node in nodes]
# fetch all nodes from each deployment
deployments = Deployment.objects.all()
diff --git a/planetstack/openstack_observer/steps/sync_site_deployments.py b/planetstack/openstack_observer/steps/sync_site_deployments.py
index a8a00f6..b5e9f9a 100644
--- a/planetstack/openstack_observer/steps/sync_site_deployments.py
+++ b/planetstack/openstack_observer/steps/sync_site_deployments.py
@@ -4,24 +4,32 @@
from planetstack.config import Config
from observer.openstacksyncstep import OpenStackSyncStep
from core.models.site import *
+from observer.ansible import *
class SyncSiteDeployments(OpenStackSyncStep):
requested_interval=0
provides=[SiteDeployments, Site]
def sync_record(self, site_deployment):
- if not site_deployment.tenant_id:
- driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
- tenant = driver.create_tenant(tenant_name=site_deployment.site.login_base,
- description=site_deployment.site.name,
- enabled=site_deployment.site.enabled)
- site_deployment.tenant_id = tenant.id
- site_deployment.save()
- elif site_deployment.site.id and site_deployment.tenant_id:
- driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
- driver.update_tenant(site_deployment.tenant_id,
- description=site_deployment.site.name,
- enabled=site_deployment.site.enabled)
+
+ template = os_template_env.get_template('sync_site_deployments.yaml')
+ tenant_fields = {'endpoint':site_deployment.deployment.auth_url,
+ 'admin_user': site_deployment.deployment.admin_user,
+ 'admin_password': site_deployment.deployment.admin_password,
+ 'admin_tenant': 'admin',
+ 'tenant': site_deployment.site.login_base,
+ 'tenant_description': site_deployment.site.name}
+
+ rendered = template.render(tenant_fields)
+ res = run_template('sync_site_deployments.yaml', tenant_fields)
+
+ if (len(res)==1):
+ site_deployment.tenant_id = res[0]['id']
+ site_deployment.save()
+ elif (len(res)):
+ raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
+ else:
+ raise Exception('Could not create or update user %s'%tenant_fields['tenant'])
def delete_record(self, site_deployment):
if site_deployment.tenant_id:
diff --git a/planetstack/openstack_observer/steps/sync_slice_deployments.py b/planetstack/openstack_observer/steps/sync_slice_deployments.py
index 03ea2ca..97196d6 100644
--- a/planetstack/openstack_observer/steps/sync_slice_deployments.py
+++ b/planetstack/openstack_observer/steps/sync_slice_deployments.py
@@ -9,6 +9,7 @@
from core.models.slice import Slice, SliceDeployments
from core.models.userdeployments import UserDeployments
from util.logger import Logger, logging
+from observer.ansible import *
logger = Logger(level=logging.INFO)
@@ -47,46 +48,41 @@
logger.info("deployment %r has no admin_user, skipping" % slice_deployment.deployment)
return
- if not slice_deployment.tenant_id:
- nova_fields = {'tenant_name': slice_deployment.slice.name,
- 'description': slice_deployment.slice.description,
- 'enabled': slice_deployment.slice.enabled}
- driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
- tenant = driver.create_tenant(**nova_fields)
- slice_deployment.tenant_id = tenant.id
-
- # XXX give caller an admin role at the tenant they've created
- deployment_users = UserDeployments.objects.filter(user=slice_deployment.slice.creator,
+ deployment_users = UserDeployments.objects.filter(user=slice_deployment.slice.creator,
deployment=slice_deployment.deployment)
- if not deployment_users:
- logger.info("slice createor %s has not accout at deployment %s" % (slice_deployment.slice.creator, slice_deployment.deployment.name))
- else:
- deployment_user = deployment_users[0]
- # lookup user id at this deployment
- kuser= driver.shell.keystone.users.find(email=slice_deployment.slice.creator.email)
+ if not deployment_users:
+ logger.info("slice createor %s has not accout at deployment %s" % (slice_deployment.slice.creator, slice_deployment.deployment.name))
+ roles = []
+ else:
+ deployment_user = deployment_users[0]
+ roles = ['admin']
+
+ max_instances=int(slice_deployment.slice.max_slivers)
+ tenant_fields = {'endpoint':slice_deployment.deployment.auth_url,
+ 'admin_user': slice_deployment.deployment.admin_user,
+ 'admin_password': slice_deployment.deployment.admin_password,
+ 'admin_tenant': 'admin',
+ 'tenant': slice_deployment.slice.name,
+ 'tenant_description': slice_deployment.slice.description,
+ 'roles':roles,
+ 'name':deployment_user.email,
+ 'max_instances':max_instances}
- # add required roles at the slice's tenant
- driver.add_user_role(kuser.id, tenant.id, 'admin')
-
- # refresh credentials using this tenant
- client_driver = self.driver.client_driver(caller=deployment_user.user,
- tenant=tenant.name,
- deployment=slice_deployment.deployment.name)
-
-
- if slice_deployment.id and slice_deployment.tenant_id:
- # update existing tenant
- driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
- driver.update_tenant(slice_deployment.tenant_id,
- description=slice_deployment.slice.description,
- enabled=slice_deployment.slice.enabled)
-
- if slice_deployment.tenant_id:
- # update slice/tenant quota
- driver = self.driver.client_driver(deployment=slice_deployment.deployment.name, tenant=slice_deployment.slice.name)
- driver.shell.nova.quotas.update(tenant_id=slice_deployment.tenant_id, instances=int(slice_deployment.slice.max_slivers))
-
- slice_deployment.save()
+ res = run_template('sync_slice_deployments.yaml', tenant_fields)
+ expected_num = len(roles)+1
+ if (len(res)!=expected_num):
+ raise Exception('Could not sync tenants for slice %s'%slice_deployment.slice.name)
+ else:
+ tenant_id = res[0]['id']
+ if (not slice_deployment.tenant_id):
+ handle = os.popen('nova quota-update --instances %d %s'%(max_instances,tenant_id))
+ output = handle.read()
+ result = handle.close()
+ if (result):
+ logging.info('Could not update quota for %s'%slice_deployment.slice.name)
+ slice_deployment.tenant_id = tenant_id
+ slice_deployment.save()
+
def delete_record(self, slice_deployment):
@@ -106,11 +102,4 @@
client_driver.delete_network(slice_deployment.network_id)
if slice_deployment.tenant_id:
driver.delete_tenant(slice_deployment.tenant_id)
- # delete external route
- #subnet = None
- #subnets = client_driver.shell.quantum.list_subnets()['subnets']
- #for snet in subnets:
- # if snet['id'] == slice_deployment.subnet_id:
- # subnet = snet
- #if subnet:
- # driver.delete_external_route(subnet)
+
diff --git a/planetstack/openstack_observer/steps/sync_slivers.py b/planetstack/openstack_observer/steps/sync_slivers.py
index dcedd1d..4f33bba 100644
--- a/planetstack/openstack_observer/steps/sync_slivers.py
+++ b/planetstack/openstack_observer/steps/sync_slivers.py
@@ -7,9 +7,14 @@
from core.models.slice import Slice, SlicePrivilege, SliceDeployments
from core.models.network import Network, NetworkSlice, NetworkDeployments
from util.logger import Logger, logging
+from observer.ansible import *
logger = Logger(level=logging.INFO)
+def escape(s):
+ s = s.replace('\n',r'\n').replace('"',r'\"')
+ return s
+
class SyncSlivers(OpenStackSyncStep):
provides=[Sliver]
requested_interval=0
@@ -20,81 +25,97 @@
def sync_record(self, sliver):
logger.info("sync'ing sliver:%s deployment:%s " % (sliver, sliver.node.deployment))
+
metadata_update = {}
- if ("numberCores" in sliver.changed_fields):
+ if (sliver.numberCores):
metadata_update["cpu_cores"] = str(sliver.numberCores)
for tag in sliver.slice.tags.all():
if tag.name.startswith("sysctl-"):
metadata_update[tag.name] = tag.value
- if not sliver.instance_id:
- driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.deploymentNetwork.name)
- # public keys
- slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
- pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
- if sliver.creator.public_key:
- pubkeys.append(sliver.creator.public_key)
- if sliver.slice.creator.public_key:
- pubkeys.append(sliver.slice.creator.public_key)
- # netowrks
- # include all networks available to the slice and/or associated network templates
- nics = []
- networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
- network_deployments = NetworkDeployments.objects.filter(network__in=networks,
- deployment=sliver.node.deployment)
- # Gather private networks first. This includes networks with a template that has
- # visibility = private and translation = none
- for network_deployment in network_deployments:
- if network_deployment.network.template.visibility == 'private' and \
- network_deployment.network.template.translation == 'none':
- nics.append({'net-id': network_deployment.net_id})
-
- # now include network template
- network_templates = [network.template.sharedNetworkName for network in networks \
- if network.template.sharedNetworkName]
- #logger.info("%s %s %s %s" % (driver.shell.quantum.username, driver.shell.quantum.password, driver.shell.quantum.tenant, driver.shell.quantum.url))
- for net in driver.shell.quantum.list_networks()['networks']:
- if net['name'] in network_templates:
- nics.append({'net-id': net['id']})
+ # public keys
+ slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
+ pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
+ if sliver.creator.public_key:
+ pubkeys.add(sliver.creator.public_key)
- # look up image id
- deployment_driver = self.driver.admin_driver(deployment=sliver.deploymentNetwork.name)
- image_id = None
- images = deployment_driver.shell.glance.get_images()
- for image in images:
- if image['name'] == sliver.image.name:
- image_id = image['id']
-
- # look up key name at the deployment
- # create/fetch keypair
- keyname = None
- if sliver.creator.public_key:
- keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
- sliver.slice.name
- key_fields = {'name': keyname,
- 'public_key': sliver.creator.public_key}
- driver.create_keypair(**key_fields)
+ if sliver.slice.creator.public_key:
+ pubkeys.add(sliver.slice.creator.public_key)
- userData = self.get_userdata(sliver)
- if sliver.userData:
- userData = sliver.userData
+ nics = []
+ networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
+ network_deployments = NetworkDeployments.objects.filter(network__in=networks,
+ deployment=sliver.node.deployment)
- instance = driver.spawn_instance(name=sliver.name,
- key_name = keyname,
- image_id = image_id,
- hostname = sliver.node.name,
- pubkeys = pubkeys,
- nics = nics,
- userdata = userData,
- flavor_name = sliver.flavor.flavor )
- sliver.instance_id = instance.id
- sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+ for network_deployment in network_deployments:
+ if network_deployment.network.template.visibility == 'private' and \
+ network_deployment.network.template.translation == 'none' and network_deployment.net_id:
+ nics.append(network_deployment.net_id)
+
+ # now include network template
+ network_templates = [network.template.sharedNetworkName for network in networks \
+ if network.template.sharedNetworkName]
+
+ #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.deploymentNetwork)
+ driver = self.driver.admin_driver(tenant='admin', deployment=sliver.deploymentNetwork)
+ nets = driver.shell.quantum.list_networks()['networks']
+ for net in nets:
+ if net['name'] in network_templates:
+ nics.append(net['id'])
+
+ if (not nics):
+ for net in nets:
+ if net['name']=='public':
+ nics.append(net['id'])
+
+ # look up image id
+ deployment_driver = self.driver.admin_driver(deployment=sliver.deploymentNetwork.name)
+ image_id = None
+ images = deployment_driver.shell.glanceclient.images.list()
+ for image in images:
+ if image.name == sliver.image.name or not image_id:
+ image_id = image.id
+
+ # look up key name at the deployment
+ # create/fetch keypair
+ keyname = None
+ keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
+ sliver.slice.name
+ key_fields = {'name': keyname,
+ 'public_key': sliver.creator.public_key}
+
+
+ userData = self.get_userdata(sliver)
+ if sliver.userData:
+ userData = sliver.userData
+
+ sliver_name = '@'.join([sliver.slice.name,sliver.node.name])
+ tenant_fields = {'endpoint':sliver.node.deployment.auth_url,
+ 'admin_user': sliver.node.deployment.admin_user,
+ 'admin_password': sliver.node.deployment.admin_password,
+ 'admin_tenant': 'admin',
+ 'tenant': sliver.slice.name,
+ 'tenant_description': sliver.slice.description,
+ 'name':sliver_name,
+ 'image_id':image_id,
+ 'key_name':keyname,
+ 'flavor_id':1,
+ 'nics':nics,
+ 'meta':metadata_update,
+ 'key':key_fields,
+ 'user_data':r'%s'%escape(userData)}
+
+ res = run_template('sync_slivers.yaml', tenant_fields)
+ if (len(res)!=2):
+ raise Exception('Could not sync sliver %s'%sliver.slice.name)
+ else:
+ sliver_id = res[1]['id'] # 0 is for the key
+
+ sliver.instance_id = sliver_id
+ sliver.instance_name = sliver_name
sliver.save()
- if sliver.instance_id and metadata_update:
- driver.update_instance_metadata(sliver.instance_id, metadata_update)
-
def delete_record(self, sliver):
if sliver.instance_id:
driver = self.driver.client_driver(caller=sliver.creator,
diff --git a/planetstack/openstack_observer/steps/sync_user_deployments.py b/planetstack/openstack_observer/steps/sync_user_deployments.py
index 0c28392..f7e41a0 100644
--- a/planetstack/openstack_observer/steps/sync_user_deployments.py
+++ b/planetstack/openstack_observer/steps/sync_user_deployments.py
@@ -10,6 +10,8 @@
from core.models.userdeployments import UserDeployments
from util.logger import Logger, logging
+from observer.ansible import *
+
logger = Logger(level=logging.INFO)
class SyncUserDeployments(OpenStackSyncStep):
@@ -30,50 +32,55 @@
logger.info("deployment %r has no admin_user, skipping" % user_deployment.deployment)
return
+ template = os_template_env.get_template('sync_user_deployments.yaml')
+
name = user_deployment.user.email[:user_deployment.user.email.find('@')]
- user_fields = {'name': user_deployment.user.email,
- 'email': user_deployment.user.email,
- 'password': hashlib.md5(user_deployment.user.password).hexdigest()[:6],
- 'enabled': True}
- driver = self.driver.admin_driver(deployment=user_deployment.deployment.name)
- if not user_deployment.kuser_id:
- keystone_user = driver.create_user(**user_fields)
- user_deployment.kuser_id = keystone_user.id
- else:
- driver.update_user(user_deployment.kuser_id, user_fields)
- # setup user deployment home site roles
+ roles = []
+ # setup user deployment home site roles
if user_deployment.user.site:
site_deployments = SiteDeployments.objects.filter(site=user_deployment.user.site,
deployment=user_deployment.deployment)
if site_deployments:
# need the correct tenant id for site at the deployment
tenant_id = site_deployments[0].tenant_id
- driver.add_user_role(user_deployment.kuser_id,
- tenant_id, 'user')
+ tenant_name = site_deployments[0].site.login_base
+
+ roles.append('user')
if user_deployment.user.is_admin:
- driver.add_user_role(user_deployment.kuser_id, tenant_id, 'admin')
- else:
- # may have admin role so attempt to remove it
- driver.delete_user_role(user_deployment.kuser_id, tenant_id, 'admin')
+ roles.append('admin')
+ else:
+ raise Exception('Internal error. Missing SiteDeployment for user %s'%user_deployment.user.email)
+ else:
+ raise Exception('Siteless user %s'%user_deployment.user.email)
- #if user_deployment.user.public_key:
- # if not user_deployment.user.keyname:
- # keyname = user_deployment.user.email.lower().replace('@', 'AT').replace('.', '')
- # user_deployment.user.keyname = keyname
- # user_deployment.user.save()
- #
- # user_driver = driver.client_driver(caller=user_deployment.user,
- # tenant=user_deployment.user.site.login_base,
- # deployment=user_deployment.deployment.name)
- # key_fields = {'name': user_deployment.user.keyname,
- # 'public_key': user_deployment.user.public_key}
- # user_driver.create_keypair(**key_fields)
- user_deployment.save()
+ user_fields = {'endpoint':user_deployment.deployment.auth_url,
+ 'name': user_deployment.user.email,
+ 'email': user_deployment.user.email,
+ 'password': hashlib.md5(user_deployment.user.password).hexdigest()[:6],
+ 'admin_user': user_deployment.deployment.admin_user,
+ 'admin_password': user_deployment.deployment.admin_password,
+ 'admin_tenant': 'admin',
+ 'roles':roles,
+ 'tenant':tenant_name}
+
+ rendered = template.render(user_fields)
+ res = run_template('sync_user_deployments.yaml', user_fields)
+
+ # results is an array in which each element corresponds to an
+ # "ok" string received per operation. If we get as many oks as
+ # the number of operations we issued, that means a grand success.
+ # Otherwise, the number of oks tell us which operation failed.
+ expected_length = len(roles) + 1
+ if (len(res)==expected_length):
+ user_deployment.save()
+ elif (len(res)):
+ raise Exception('Could not assign roles for user %s'%user_fields['name'])
+ else:
+ raise Exception('Could not create or update user %s'%user_fields['name'])
def delete_record(self, user_deployment):
if user_deployment.kuser_id:
driver = self.driver.admin_driver(deployment=user_deployment.deployment.name)
driver.delete_user(user_deployment.kuser_id)
-