Added ec2 observer, WIP
diff --git a/planetstack/ec2_observer/steps/__init__.py b/planetstack/ec2_observer/steps/__init__.py
new file mode 100644
index 0000000..eabf46c
--- /dev/null
+++ b/planetstack/ec2_observer/steps/__init__.py
@@ -0,0 +1,15 @@
+#from .sync_external_routes import SyncExternalRoutes
+from .sync_network_slivers import SyncNetworkSlivers
+from .sync_networks import SyncNetworks
+from .sync_network_deployments import SyncNetworkDeployments
+from .sync_site_privileges import SyncSitePrivileges
+from .sync_sites import SyncSites
+from .sync_slice_memberships import SyncSliceMemberships
+from .sync_slices import SyncSlices
+#from .sync_sliver_ips import SyncSliverIps
+from .sync_slivers import SyncSlivers
+from .sync_users import SyncUsers
+from .sync_roles import SyncRoles
+from .sync_nodes import SyncNodes
+from .sync_images import SyncImages
+from .garbage_collector import GarbageCollector
diff --git a/planetstack/ec2_observer/steps/sync_external_routes.py b/planetstack/ec2_observer/steps/sync_external_routes.py
new file mode 100644
index 0000000..1e1a347
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_external_routes.py
@@ -0,0 +1,18 @@
+import os
+import base64
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+
+class SyncExternalRoutes(SyncStep):
+ # XXX what does this provide?
+ provides=[]
+ requested_interval = 86400 # This step is slow like a pig. Let's run it infrequently
+
+ def call(self, **args):
+ routes = self.driver.get_external_routes()
+ subnets = self.driver.shell.quantum.list_subnets()['subnets']
+ for subnet in subnets:
+ try:
+ self.driver.add_external_route(subnet, routes)
+ except:
+ logger.log_exc("failed to add external route for subnet %s" % subnet)
diff --git a/planetstack/ec2_observer/steps/sync_images.py b/planetstack/ec2_observer/steps/sync_images.py
new file mode 100644
index 0000000..32b3363
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_images.py
@@ -0,0 +1,32 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+from core.models.image import Image
+from awslib import *
+
+class SyncImages(OpenStackSyncStep):
+ provides=[Image]
+ requested_interval=3600
+
+ def fetch_pending(self):
+ images = Image.objects.all()
+ image_names = [image.name for image in images]
+
+ new_images = []
+
+ aws_images = aws_run('ec2 describe-images')
+
+ for aws_image in aws_images:
+ if aws_image not in image_names:
+ image = Image(image_id=image_id,
+ name=aws_image['name'],
+ disk_format='XXX'
+ container_format='XXX'
+ new_images.append(image)
+
+ return new_images
+
+ def sync_record(self, image):
+ image.save()
diff --git a/planetstack/ec2_observer/steps/sync_mock_nodes.py b/planetstack/ec2_observer/steps/sync_mock_nodes.py
new file mode 100644
index 0000000..3cb3dd0
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_mock_nodes.py
@@ -0,0 +1,42 @@
+import os
+import base64
+import random
+from datetime import datetime
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.node import Node
+from core.models.deployment import Deployment
+from core.models.site import Site
+
+class SyncNodes(OpenStackSyncStep):
+ provides=[Node]
+ requested_interval=0
+
+ def fetch_pending(self):
+ # collect local nodes
+ sites = Site.objects.all()
+ one_and_only_deployment = Deployments.objects.all()
+
+ node_hostnames = [node.name for node in nodes]
+
+ instance_types = 'm1.small | m1.medium | m1.large | m1.xlarge | m3.medium | m3.large | m3.xlarge | m3.2xlarge'.split(' | ')
+
+ all_new_nodes = []
+ for s in sites:
+ node_names = [n.name for n in s.nodes]
+ new_node_names = list(set(instance_types) - set(node_names))
+ new_nodes = []
+ for node_name in new_node_names:
+ node = Node(name=node_name,
+ site=s, deployment=one_and_only_deployment)
+ new_nodes.append(node)
+
+ all_new_nodes.extend(new_nodes)
+
+ return all_new_nodes
+
+
+ def sync_record(self, node):
+ node.save()
+
diff --git a/planetstack/ec2_observer/steps/sync_network_deployments.py b/planetstack/ec2_observer/steps/sync_network_deployments.py
new file mode 100644
index 0000000..d1b51d5
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_network_deployments.py
@@ -0,0 +1,117 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.network import *
+from core.models.slice import *
+from core.models.slice import Sliver
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncNetworkDeployments(OpenStackSyncStep):
+ requested_interval = 0
+ provides=[Networ, NetworkDeployments, Sliver]
+
+ def fetch_pending(self):
+ # network deployments are not visible to users. We must ensure
+ # networks are deployed at all deploymets available to their slices.
+ slice_deployments = SliceDeployments.objects.all()
+ slice_deploy_lookup = defaultdict(list)
+ for slice_deployment in slice_deployments:
+ slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
+
+ network_deployments = NetworkDeployments.objects.all()
+ network_deploy_lookup = defaultdict(list)
+ for network_deployment in network_deployments:
+ network_deploy_lookup[network_deployment.network].append(network_deployment.deployment)
+
+ for network in Network.objects.filter():
+ # ignore networks that have
+ # template.visibility = private and template.translation = none
+ if network.template.visibility == 'private' and not network.template.translation == 'none':
+ continue
+ expected_deployments = slice_deploy_lookup[network.owner]
+ for expected_deployment in expected_deployments:
+ if network not in network_deploy_lookup or \
+ expected_deployment not in network_deploy_lookup[network]:
+ nd = NetworkDeployments(network=network, deployment=expected_deployment)
+ nd.save()
+ return NetworkDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def get_next_subnet(self, deployment=None):
+ # limit ourself to 10.0.x.x for now
+ valid_subnet = lambda net: net.startswith('10.0')
+ driver = self.driver.admin_driver(deployment=deployment)
+ subnets = driver.shell.quantum.list_subnets()['subnets']
+ ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
+ if valid_subnet(subnet['cidr'])]
+ ints.sort()
+ if ints:
+ last_ip = IPAddress(ints[-1])
+ else:
+ last_ip = IPAddress('10.0.0.0')
+ last_network = IPNetwork(str(last_ip) + "/24")
+ next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
+ return next_network
+
+ def save_network_deployment(self, network_deployment):
+ if not network_deployment.network_id and network_deployment.network.template.sharedNetworkName:
+ network_deployment.network_id = network_deployment.network.template.sharedNetworkId
+
+ if not network_deployment.net_id:
+ network_name = network_deployment.network.name
+
+ # create network
+ os_network = self.driver.create_network(network_name, shared=True)
+ network_deployment.net_id = os_network['id']
+
+ # create router
+ #router = self.driver.create_router(network_name)
+ #network_deployment.router_id = router['id']
+
+ # create subnet
+ next_subnet = self.get_next_subnet(deployment=network_deployment.deployment.name)
+ cidr = str(next_subnet.cidr)
+ ip_version = next_subnet.version
+ start = str(next_subnet[2])
+ end = str(next_subnet[-2])
+ subnet = self.driver.create_subnet(name=network_name,
+ network_id = network_deployment.net_id,
+ cidr_ip = cidr,
+ ip_version = ip_version,
+ start = start,
+ end = end)
+ network_deployment.subnet = cidr
+ network_deployment.subnet_id = subnet['id']
+ # add subnet as interface to slice's router
+ #self.driver.add_router_interface(router['id'], subnet['id'])
+ # add external route
+ #self.driver.add_external_route(subnet)
+ logger.info("created private subnet (%s) for network: %s" % (cidr, network_deployment.network))
+ else:
+ (network_deployment.subnet_id, network_deployment.subnet) = self.driver.get_network_subnet(network_deployment.net_id)
+ logger.info("sync'ed subnet (%s) for network: %s" % (network_deployment.subnet, network_deployment.network))
+
+ network_deployment.save()
+
+ def sync_record(self, network_deployment):
+ if network_deployment.network.owner and network_deployment.network.owner.creator:
+ try:
+ # update manager context
+ real_driver = self.driver
+ self.driver = self.driver.client_driver(caller=network_deployment.network.owner.creator,
+ tenant=network_deployment.network.owner.name,
+ deployment=network_deployment.deployment.name)
+ self.save_network_deployment(network_deployment)
+ self.driver = real_driver
+ logger.info("saved network deployment: %s" % (network_deployment))
+ except Exception,e:
+ logger.log_exc("save network deployment failed: %s" % network_deployment)
+ raise e
+
+
+
diff --git a/planetstack/ec2_observer/steps/sync_network_slivers.py b/planetstack/ec2_observer/steps/sync_network_slivers.py
new file mode 100644
index 0000000..7e69330
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_network_slivers.py
@@ -0,0 +1,80 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.network import *
+
+class SyncNetworkSlivers(OpenStackSyncStep):
+ requested_interval = 3600
+ provides=[NetworkSliver]
+
+ def fetch_pending(self):
+ return NetworkSliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def call(self, failed=[]):
+ networkSlivers = NetworkSliver.objects.all()
+ networkSlivers_by_id = {}
+ networkSlivers_by_port = {}
+ for networkSliver in networkSlivers:
+ networkSlivers_by_id[networkSliver.id] = networkSliver
+ networkSlivers_by_port[networkSliver.port_id] = networkSliver
+
+ networks = Network.objects.all()
+ networks_by_id = {}
+ for network in networks:
+ networks_by_id[network.network_id] = network
+
+ slivers = Sliver.objects.all()
+ slivers_by_instance_id = {}
+ for sliver in slivers:
+ slivers_by_instance_id[sliver.instance_id] = sliver
+
+ driver = self.driver.admin_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.node.deployment.name)
+ ports = driver.shell.quantum.list_ports()["ports"]
+ for port in ports:
+ if port["id"] in networkSlivers_by_port:
+ # we already have it
+ print "already accounted for port", port["id"]
+ continue
+
+ if port["device_owner"] != "compute:nova":
+ # we only want the ports that connect to instances
+ continue
+
+ network = networks_by_id.get(port['network_id'], None)
+ if not network:
+ #print "no network for port", port["id"], "network", port["network_id"]
+ continue
+
+ sliver = slivers_by_instance_id.get(port['device_id'], None)
+ if not sliver:
+ print "no sliver for port", port["id"], "device_id", port['device_id']
+ continue
+
+ if network.template.sharedNetworkId is not None:
+ # If it's a shared network template, then more than one network
+ # object maps to the quantum network. We have to do a whole bunch
+ # of extra work to find the right one.
+ networks = network.template.network_set.all()
+ network = None
+ for candidate_network in networks:
+ if (candidate_network.owner == sliver.slice):
+ print "found network", candidate_network
+ network = candidate_network
+
+ if not network:
+ print "failed to find the correct network for a shared template for port", port["id"], "network", port["network_id"]
+ continue
+
+ if not port["fixed_ips"]:
+ print "port", port["id"], "has no fixed_ips"
+ continue
+
+# print "XXX", port
+
+ ns = NetworkSliver(network=network,
+ sliver=sliver,
+ ip=port["fixed_ips"][0]["ip_address"],
+ port_id=port["id"])
+ ns.save()
diff --git a/planetstack/ec2_observer/steps/sync_networks.py b/planetstack/ec2_observer/steps/sync_networks.py
new file mode 100644
index 0000000..cc277c6
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_networks.py
@@ -0,0 +1,20 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.network import *
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncNetworks(SyncStep):
+ provides=[Network]
+ requested_interval = 0
+
+ def fetch_pending(self):
+ return Network.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, network):
+ network.save()
+
diff --git a/planetstack/ec2_observer/steps/sync_roles.py b/planetstack/ec2_observer/steps/sync_roles.py
new file mode 100644
index 0000000..5dc30d9
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_roles.py
@@ -0,0 +1,37 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.role import Role
+from core.models.site import SiteRole
+from core.models.slice import SliceRole
+from core.models.deployment import DeploymentRole
+
+class SyncRoles(OpenStackSyncStep):
+ provides=[Role]
+ requested_interval=0
+
+ def fetch_pending(self):
+ site_roles = SiteRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+ slice_roles = SliceRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+ deployment_roles = DeploymentRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ roles = []
+ for site_role in site_roles:
+ roles.append(site_role)
+ for slice_role in slice_roles:
+ roles.append(slice_role)
+ for deployment_role in deployment_roles:
+ roles.append(deployment_role)
+
+ return roles
+
+
+ def sync_record(self, role):
+ if not role.enacted:
+ deployments = Deployment.objects.all()
+ for deployment in deployments:
+ driver = self.driver.admin_driver(deployment=deployment.name)
+ driver.create_role(role.role)
+ role.save()
diff --git a/planetstack/ec2_observer/steps/sync_site_deployments.py b/planetstack/ec2_observer/steps/sync_site_deployments.py
new file mode 100644
index 0000000..a996c85
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_site_deployments.py
@@ -0,0 +1,28 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+
+class SyncSiteDeployments(OpenStackSyncStep):
+ requested_interval=0
+ provides=[Site, SiteDeployments]
+
+ def fetch_pending(self):
+ return SiteDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, site_deployment):
+ if not site_deployment.tenant_id:
+ driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
+ tenant = driver.create_tenant(tenant_name=site_deployment.site.login_base,
+ description=site_deployment.site.name,
+ enabled=site_deployment.site.enabled)
+ site_deployment.tenant_id = tenant.id
+ site_deployment.save()
+ elif site_deployment.site.id and site_deployment.tenant_id:
+ driver = self.driver.admin_driver(deployment=site_deployment.name)
+ driver.update_tenant(site_deployment.tenant_id,
+ description=site_deployment.site.name,
+ enabled=site_deployment.site.enabled)
+
diff --git a/planetstack/ec2_observer/steps/sync_site_privileges.py b/planetstack/ec2_observer/steps/sync_site_privileges.py
new file mode 100644
index 0000000..b57ae43
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_site_privileges.py
@@ -0,0 +1,31 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import *
+from core.models.user import User, UserDeployments
+
+class SyncSitePrivileges(OpenStackSyncStep):
+ requested_interval=0
+ provides=[SitePrivilege]
+
+ def fetch_pending(self):
+ return SitePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, site_priv):
+ if site_priv.user.kuser_id and site_priv.site.tenant_id:
+ self.driver.add_user_role(site_priv.user.kuser_id,
+ site_priv.site.tenant_id,
+ site_priv.role.role)
+
+ # sync site privileges at all site deployments
+ site_deployments = SiteDeployments.objects.filter(site=site_priv.site)
+ for site_deployment in site_deployments:
+ user_deployments = UserDeployments.objects.filter(deployment=site_deployment.deployment)
+ if user_deployments:
+ kuser_id = user_deployments[0].kuser_id
+ driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
+ driver.add_user_role(kuser_id,
+ site_deployment.tenant_id,
+ site_priv.role.role)
diff --git a/planetstack/ec2_observer/steps/sync_sites.py b/planetstack/ec2_observer/steps/sync_sites.py
new file mode 100644
index 0000000..5771aef
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_sites.py
@@ -0,0 +1,34 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.syncstep import SyncStep
+from core.models.site import Site
+from ec2_observer.awslib import *
+
+class SyncSites(SyncStep):
+ provides=[Site]
+ requested_interval=3600
+
+ def fetch_pending(self):
+ current_sites = Site.objects.all()
+ zones = aws_run('ec2 describe-availability-zones')
+ available_sites = [zone['ZoneName'] for zone in zones]
+
+ new_site_names = list(set(available_sites) - set(zones))
+
+ new_sites = []
+ for s in new_site_names:
+ site = Site(name=s,
+ login_base=s,
+ site_url="www.amazon.com",
+ enabled=True,
+ is_public=True,
+ abbreviated_name=s)
+ new_sites.append(site)
+
+ return new_sites
+
+ def sync_record(self, site):
+ site.save()
+
diff --git a/planetstack/ec2_observer/steps/sync_slice_deployments.py b/planetstack/ec2_observer/steps/sync_slice_deployments.py
new file mode 100644
index 0000000..580edd1
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_slice_deployments.py
@@ -0,0 +1,107 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.deployment import Deployment
+from core.models.site import SiteDeployments
+from core.models.slice import Slice, SliceDeployments
+from core.models.user import UserDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSliceDeployments(OpenStackSyncStep):
+ provides=[SliceDeployments]
+ requested_interval=0
+
+ def fetch_pending(self):
+ # slice deployments are not visible to users. We must ensure
+ # slices are deployed at all deploymets available to their site.
+ site_deployments = SiteDeployments.objects.all()
+ site_deploy_lookup = defaultdict(list)
+ for site_deployment in site_deployments:
+ site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
+
+ slice_deployments = SliceDeployments.objects.all()
+ slice_deploy_lookup = defaultdict(list)
+ for slice_deployment in slice_deployments:
+ slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
+
+ all_deployments = Deployment.objects.all()
+ for slice in Slice.objects.all():
+ # slices are added to all deployments for now
+ expected_deployments = all_deployments
+ #expected_deployments = site_deploy_lookup[slice.site]
+ for expected_deployment in expected_deployments:
+ if slice not in slice_deploy_lookup or \
+ expected_deployment not in slice_deploy_lookup[slice]:
+ sd = SliceDeployments(slice=slice, deployment=expected_deployment)
+ sd.save()
+
+ # now we can return all slice deployments that need to be enacted
+ return SliceDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def get_next_subnet(self, deployment=None):
+ # limit ourself to 10.0.x.x for now
+ valid_subnet = lambda net: net.startswith('10.0')
+ driver = self.driver.admin_driver(deployment=deployment)
+ subnets = driver.shell.quantum.list_subnets()['subnets']
+ ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
+ if valid_subnet(subnet['cidr'])]
+ ints.sort()
+ if ints:
+ last_ip = IPAddress(ints[-1])
+ else:
+ last_ip = IPAddress('10.0.0.1')
+ last_ip = IPAddress(ints[-1])
+ last_network = IPNetwork(str(last_ip) + "/24")
+ next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
+ return next_network
+
+
+ def sync_record(self, slice_deployment):
+ logger.info("sync'ing slice deployment %s" % slice_deployment)
+ if not slice_deployment.tenant_id:
+ nova_fields = {'tenant_name': slice_deployment.slice.name,
+ 'description': slice_deployment.slice.description,
+ 'enabled': slice_deployment.slice.enabled}
+ driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+ tenant = driver.create_tenant(**nova_fields)
+ slice_deployment.tenant_id = tenant.id
+
+ # XXX give caller an admin role at the tenant they've created
+ deployment_users = UserDeployments.objects.filter(user=slice_deployment.slice.creator,
+ deployment=slice_deployment.deployment)
+ if not deployment_users:
+ logger.info("slice createor %s has not accout at deployment %s" % (slice_deployment.slice.creator, slice_deployment.deployment.name))
+ else:
+ deployment_user = deployment_users[0]
+ # lookup user id at this deployment
+ kuser= driver.shell.keystone.users.find(email=slice_deployment.slice.creator.email)
+
+ # add required roles at the slice's tenant
+ driver.add_user_role(kuser.id, tenant.id, 'admin')
+
+ # refresh credentials using this tenant
+ client_driver = self.driver.client_driver(caller=deployment_user.user,
+ tenant=tenant.name,
+ deployment=slice_deployment.deployment.name)
+
+
+ if slice_deployment.id and slice_deployment.tenant_id:
+ # update existing tenant
+ driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+ driver.update_tenant(slice_deployment.tenant_id,
+ description=slice_deployment.slice.description,
+ enabled=slice_deployment.slice.enabled)
+
+ if slice_deployment.tenant_id:
+ # update slice/tenant quota
+ driver = self.driver.client_driver(deployment=slice_deployment.deployment.name,
+ tenant=slice_deployment.slice.name)
+ driver.shell.nova.quotas.update(tenant_id=slice_deployment.tenant_id, instances=int(slice_deployment.slice.max_slivers))
+
+ slice_deployment.save()
diff --git a/planetstack/ec2_observer/steps/sync_slice_memberships.py b/planetstack/ec2_observer/steps/sync_slice_memberships.py
new file mode 100644
index 0000000..b6b1638
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_slice_memberships.py
@@ -0,0 +1,29 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.slice import *
+from core.models.user import UserDeployments
+
+class SyncSliceMemberships(OpenStackSyncStep):
+ requested_interval=0
+ provides=[SlicePrivilege]
+
+ def fetch_pending(self):
+ return SlicePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, slice_memb):
+ # sync slice memberships at all slice deployments
+ slice_deployments = SliceDeployments.objects.filter(slice=slice_memb.slice)
+ for slice_deployment in slice_deployments:
+ if not slice_deployment.tenant_id:
+ continue
+ user_deployments = UserDeployments.objects.filter(deployment=slice_deployment.deployment,
+ user=slice_memb.user)
+ if user_deployments:
+ kuser_id = user_deployments[0].kuser_id
+ driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+ driver.add_user_role(kuser_id,
+ slice_deployment.tenant_id,
+ slice_memb.role.role)
diff --git a/planetstack/ec2_observer/steps/sync_slices.py b/planetstack/ec2_observer/steps/sync_slices.py
new file mode 100644
index 0000000..6cf0772
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_slices.py
@@ -0,0 +1,23 @@
+import os
+import base64
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.slice import Slice, SliceDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSlices(OpenStackSyncStep):
+ provides=[Slice]
+ requested_interval=0
+
+ def fetch_pending(self):
+ return Slice.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, slice):
+ for slice_deployment in SliceDeployments.objects.filter(slice=slice):
+ # bump the 'updated' timestamp and trigger observer to update
+ # slice across all deployments
+ slice_deployment.save()
diff --git a/planetstack/ec2_observer/steps/sync_sliver_ips.py b/planetstack/ec2_observer/steps/sync_sliver_ips.py
new file mode 100644
index 0000000..e2212d1
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_sliver_ips.py
@@ -0,0 +1,29 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.sliver import Sliver
+
+class SyncSliverIps(OpenStackSyncStep):
+ provides=[Sliver]
+ requested_interval=0
+
+ def fetch_pending(self):
+ slivers = Sliver.objects.filter(ip=None)
+ return slivers
+
+ def sync_record(self, sliver):
+ driver = self.driver.client_driver(tenant=sliver.slice.name,
+ deployment=sliver.node.deployment.name)
+ servers = driver.shell.nova.servers.findall(id=sliver.instance_id)
+ if not servers:
+ return
+ server = servers[0]
+ ips = server.addresses.get(sliver.slice.name, [])
+ if not ips:
+ return
+ sliver.ip = ips[0]['addr']
+ if sliver.ip:
+ sliver.save()
+ logger.info("saved sliver ip: %s %s" % (sliver, ips[0]))
diff --git a/planetstack/ec2_observer/steps/sync_slivers.py b/planetstack/ec2_observer/steps/sync_slivers.py
new file mode 100644
index 0000000..b576bbc
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_slivers.py
@@ -0,0 +1,91 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.sliver import Sliver
+from core.models.slice import SlicePrivilege, SliceDeployments
+from core.models.network import Network, NetworkSlice, NetworkDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSlivers(OpenStackSyncStep):
+ provides=[Sliver]
+ requested_interval=0
+
+ def fetch_pending(self):
+ return Sliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, sliver):
+ logger.info("sync'ing sliver:%s deployment:%s " % (sliver, sliver.node.deployment))
+ metadata_update = {}
+ if ("numberCores" in sliver.changed_fields):
+ metadata_update["cpu_cores"] = str(sliver.numberCores)
+
+ for tag in sliver.slice.tags.all():
+ if tag.name.startswith("sysctl-"):
+ metadata_update[tag.name] = tag.value
+
+ if not sliver.instance_id:
+ driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.deploymentNetwork.name)
+ # public keys
+ slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
+ pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
+ if sliver.creator.public_key:
+ pubkeys.append(sliver.creator.public_key)
+ if sliver.slice.creator.public_key:
+ pubkeys.append(sliver.slice.creator.public_key)
+ # netowrks
+ # include all networks available to the slice and/or associated network templates
+ nics = []
+ networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
+ network_deployments = NetworkDeployments.objects.filter(network__in=networks,
+ deployment=sliver.node.deployment)
+ # Gather private networks first. This includes networks with a template that has
+ # visibility = private and translation = none
+ for network_deployment in network_deployments:
+ if network_deployment.network.template.visibility == 'private' and \
+ network_deployment.network.template.translation == 'none':
+ nics.append({'net-id': network_deployment.net_id})
+
+ # now include network template
+ network_templates = [network.template.sharedNetworkName for network in networks \
+ if network.template.sharedNetworkName]
+ for net in driver.shell.quantum.list_networks()['networks']:
+ if net['name'] in network_templates:
+ nics.append({'net-id': net['id']})
+
+ file("/tmp/scott-manager","a").write("slice: %s\nreq: %s\n" % (str(sliver.slice.name), str(nics)))
+
+ # look up image id
+ deployment_driver = self.driver.admin_driver(deployment=sliver.deploymentNetwork.name)
+ image_id = None
+ images = deployment_driver.shell.glance.get_images()
+ for image in images:
+ if image['name'] == sliver.image.name:
+ image_id = image['id']
+
+ # look up key name at the deployment
+ # create/fetch keypair
+ keyname = None
+ if sliver.creator.public_key:
+ keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
+ sliver.slice.name
+ key_fields = {'name': keyname,
+ 'public_key': sliver.creator.public_key}
+ driver.create_keypair(**key_fields)
+
+ instance = driver.spawn_instance(name=sliver.name,
+ key_name = keyname,
+ image_id = image_id,
+ hostname = sliver.node.name,
+ pubkeys = pubkeys,
+ nics = nics )
+ sliver.instance_id = instance.id
+ sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+ sliver.save()
+
+ if sliver.instance_id and metadata_update:
+ driver.update_instance_metadata(sliver.instance_id, metadata_update)
+
diff --git a/planetstack/ec2_observer/steps/sync_user_deployments.py b/planetstack/ec2_observer/steps/sync_user_deployments.py
new file mode 100644
index 0000000..39943f7
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_user_deployments.py
@@ -0,0 +1,98 @@
+import os
+import base64
+import hashlib
+from collections import defaultdict
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.site import SiteDeployments, Deployment
+from core.models.user import User, UserDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncUserDeployments(OpenStackSyncStep):
+ provides=[User, UserDeployments]
+ requested_interval=0
+
+ def fetch_pending(self):
+ # user deployments are not visible to users. We must ensure
+ # user are deployed at all deploymets available to their sites.
+
+ deployments = Deployment.objects.all()
+ site_deployments = SiteDeployments.objects.all()
+ site_deploy_lookup = defaultdict(list)
+ for site_deployment in site_deployments:
+ site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
+
+ user_deploy_lookup = defaultdict(list)
+ for user_deployment in UserDeployments.objects.all():
+ user_deploy_lookup[user_deployment.user].append(user_deployment.deployment)
+
+ all_deployments = Deployment.objects.filter()
+ for user in User.objects.all():
+ if user.is_admin:
+ # admins should have an account at all deployments
+ expected_deployments = deployments
+ else:
+ # normal users should have an account at their site's deployments
+ #expected_deployments = site_deploy_lookup[user.site]
+ # users are added to all deployments for now
+ expected_deployments = deployments
+ for expected_deployment in expected_deployments:
+ if not user in user_deploy_lookup or \
+ expected_deployment not in user_deploy_lookup[user]:
+ # add new record
+ ud = UserDeployments(user=user, deployment=expected_deployment)
+ ud.save()
+ #user_deployments.append(ud)
+ #else:
+ # # update existing record
+ # ud = UserDeployments.objects.get(user=user, deployment=expected_deployment)
+ # user_deployments.append(ud)
+
+ return UserDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, user_deployment):
+ logger.info("sync'ing user %s at deployment %s" % (user_deployment.user, user_deployment.deployment.name))
+ name = user_deployment.user.email[:user_deployment.user.email.find('@')]
+ user_fields = {'name': user_deployment.user.email,
+ 'email': user_deployment.user.email,
+ 'password': hashlib.md5(user_deployment.user.password).hexdigest()[:6],
+ 'enabled': True}
+ driver = self.driver.admin_driver(deployment=user_deployment.deployment.name)
+ if not user_deployment.kuser_id:
+ keystone_user = driver.create_user(**user_fields)
+ user_deployment.kuser_id = keystone_user.id
+ else:
+ driver.update_user(user_deployment.kuser_id, user_fields)
+
+ # setup user deployment home site roles
+ if user_deployment.user.site:
+ site_deployments = SiteDeployments.objects.filter(site=user_deployment.user.site,
+ deployment=user_deployment.deployment)
+ if site_deployments:
+ # need the correct tenant id for site at the deployment
+ tenant_id = site_deployments[0].tenant_id
+ driver.add_user_role(user_deployment.kuser_id,
+ tenant_id, 'user')
+ if user_deployment.user.is_admin:
+ driver.add_user_role(user_deployment.kuser_id, tenant_id, 'admin')
+ else:
+ # may have admin role so attempt to remove it
+ driver.delete_user_role(user_deployment.kuser_id, tenant_id, 'admin')
+
+ #if user_deployment.user.public_key:
+ # if not user_deployment.user.keyname:
+ # keyname = user_deployment.user.email.lower().replace('@', 'AT').replace('.', '')
+ # user_deployment.user.keyname = keyname
+ # user_deployment.user.save()
+ #
+ # user_driver = driver.client_driver(caller=user_deployment.user,
+ # tenant=user_deployment.user.site.login_base,
+ # deployment=user_deployment.deployment.name)
+ # key_fields = {'name': user_deployment.user.keyname,
+ # 'public_key': user_deployment.user.public_key}
+ # user_driver.create_keypair(**key_fields)
+
+ user_deployment.save()
diff --git a/planetstack/ec2_observer/steps/sync_users.py b/planetstack/ec2_observer/steps/sync_users.py
new file mode 100644
index 0000000..71f9c0f
--- /dev/null
+++ b/planetstack/ec2_observer/steps/sync_users.py
@@ -0,0 +1,20 @@
+import os
+import base64
+import hashlib
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.user import User, UserDeployments
+
+class SyncUsers(OpenStackSyncStep):
+ provides=[User]
+ requested_interval=0
+
+ def fetch_pending(self):
+ return User.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+ def sync_record(self, user):
+ for user_deployment in UserDeployments.objects.filter(user=user):
+ # bump the 'updated' field so user account are updated across
+ # deployments.
+ user_deployment.save()