remove openstack synchronizer
Change-Id: I2994842dfc717f23ff0971f4f6cb5dc83d5dbed9
diff --git a/xos/core/models/plcorebase.py b/xos/core/models/plcorebase.py
index b84b526..e2f4b48 100644
--- a/xos/core/models/plcorebase.py
+++ b/xos/core/models/plcorebase.py
@@ -10,7 +10,6 @@
from django.forms.models import model_to_dict
from django.utils import timezone
from django.core.exceptions import PermissionDenied
-import synchronizers.model_policy
from model_autodeletion import ephemeral_models
from cgi import escape as html_escape
diff --git a/xos/core/models/user.py b/xos/core/models/user.py
index d85c58c..aa00d44 100644
--- a/xos/core/models/user.py
+++ b/xos/core/models/user.py
@@ -5,7 +5,6 @@
from collections import defaultdict
from operator import attrgetter, itemgetter
-import synchronizers.model_policy
from core.middleware import get_request
from core.models import DashboardView, PlCoreBase, PlModelMixIn, Site
from core.models.plcorebase import StrippedCharField
diff --git a/xos/manage.py b/xos/manage.py
index 219d0e7..48695f0 100644
--- a/xos/manage.py
+++ b/xos/manage.py
@@ -12,10 +12,10 @@
os.system("/opt/xos/tools/xos-manage makemigrations")
sys.argv.remove("--makemigrations")
- if "--nomodelpolicy" in sys.argv:
- import synchronizers.model_policy as model_policy
- model_policy.EnableModelPolicy(False)
- sys.argv.remove("--nomodelpolicy")
+# if "--nomodelpolicy" in sys.argv:
+# import synchronizers.model_policy as model_policy
+# model_policy.EnableModelPolicy(False)
+# sys.argv.remove("--nomodelpolicy")
if "--noobserver" in sys.argv:
import synchronizers.base as observer
diff --git a/xos/synchronizers/base/backend.py b/xos/synchronizers/base/backend.py
index 206c27f..3526d55 100644
--- a/xos/synchronizers/base/backend.py
+++ b/xos/synchronizers/base/backend.py
@@ -3,9 +3,7 @@
import threading
import time
from synchronizers.base.event_loop import XOSObserver
-#from synchronizers.base.event_manager import EventListener
from xos.logger import Logger, logging
-from synchronizers.model_policy import run_policy
from xos.config import Config
from django.utils import timezone
from diag import update_diag
@@ -25,18 +23,13 @@
# start model policies thread
observer_name = getattr(Config(), "observer_name", "")
if (not observer_name):
+ from synchronizers.model_policy import run_policy
model_policy_thread = threading.Thread(target=run_policy)
model_policy_thread.start()
else:
model_policy_thread = None
print "Skipping model policies thread for service observer."
-
- # start event listene
- #event_manager = EventListener(wake_up=observer.wake_up)
- #event_manager_thread = threading.Thread(target=event_manager.run)
- #event_manager_thread.start()
-
while True:
try:
time.sleep(1000)
diff --git a/xos/synchronizers/base/steps/delete_slivers.yaml b/xos/synchronizers/base/steps/delete_slivers.yaml
deleted file mode 100644
index fa6b879..0000000
--- a/xos/synchronizers/base/steps/delete_slivers.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
-
- - nova_compute:
- state: absent
- name: {{ name }}
diff --git a/xos/synchronizers/base/steps/purge_disabled_users.py b/xos/synchronizers/base/steps/purge_disabled_users.py
deleted file mode 100644
index 0973b8c..0000000
--- a/xos/synchronizers/base/steps/purge_disabled_users.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-import base64
-import datetime
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models.user import User
-from xos.logger import observer_logger as logger
-
-class SyncRoles(OpenStackSyncStep):
- provides=[User]
- requested_interval=0
- observes=User
-
- def fetch_pending(self, deleted):
- if (deleted):
- # users marked as deleted
- return User.deleted_objects.all()
- else:
- # disabled users that haven't been updated in over a week
- one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
- return User.objects.filter(is_active=False, updated__gt=one_week_ago)
-
- def sync_record(self, user):
- user.delete()
diff --git a/xos/synchronizers/base/steps/sliver.yaml b/xos/synchronizers/base/steps/sliver.yaml
deleted file mode 100644
index e630415..0000000
--- a/xos/synchronizers/base/steps/sliver.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - nova_compute:
- state: present
- auth_url: http://172.31.38.128:5000/v2.0/
- login_username: admin
- login_password: 6a789bf69dd647e2
- login_tenant_name: admin
- name: gloopy
- image_id: 3ee851df-b35a-41c5-8551-f681e7209095
- key_name: boo
- wait_for: 200
- flavor_id: 3
- nics:
- - net-id: d1de537b-80dc-4c1b-aa5f-4a197b33b5f6
diff --git a/xos/synchronizers/base/steps/sync_container.py b/xos/synchronizers/base/steps/sync_container.py
deleted file mode 100644
index b944495..0000000
--- a/xos/synchronizers/base/steps/sync_container.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from synchronizers.base.syncstep import SyncStep, DeferredException
-from synchronizers.base.ansible import run_template_ssh
-from core.models import Service, Slice, Instance
-from services.onos.models import ONOSService, ONOSApp
-from xos.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncContainer(SyncInstanceUsingAnsible):
- provides=[Instance]
- observes=Instance
- requested_interval=0
- template_name = "sync_container.yaml"
-
- def __init__(self, *args, **kwargs):
- super(SyncContainer, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deletion=False):
- objs = super(SyncContainer, self).fetch_pending(deletion)
- objs = [x for x in objs if x.isolation in ["container", "container_vm"]]
- return objs
-
- def get_instance_port(self, container_port):
- for p in container_port.network.links.all():
- if (p.instance) and (p.instance.isolation=="vm") and (p.instance.node == container_port.instance.node) and (p.mac):
- return p
- return None
-
- def get_parent_port_mac(self, instance, port):
- if not instance.parent:
- raise Exception("instance has no parent")
- for parent_port in instance.parent.ports.all():
- if parent_port.network == port.network:
- if not parent_port.mac:
- raise DeferredException("parent port on network %s does not have mac yet" % parent_port.network.name)
- return parent_port.mac
- raise Exception("failed to find corresponding parent port for network %s" % port.network.name)
-
- def get_ports(self, o):
- i=0
- ports = []
- if (o.slice.network in ["host", "bridged"]):
- pass # no ports in host or bridged mode
- else:
- for port in o.ports.all():
- if (not port.ip):
- # 'unmanaged' ports may have an ip, but no mac
- # XXX: are there any ports that have a mac but no ip?
- raise DeferredException("Port on network %s is not yet ready" % port.network.name)
-
- pd={}
- pd["mac"] = port.mac or ""
- pd["ip"] = port.ip or ""
- pd["xos_network_id"] = port.network.id
-
- if port.network.name == "wan_network":
- if port.ip:
- (a, b, c, d) = port.ip.split('.')
- pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
-
-
- if o.isolation == "container":
- # container on bare metal
- instance_port = self.get_instance_port(port)
- if not instance_port:
- raise DeferredException("No instance on slice for port on network %s" % port.network.name)
-
- pd["snoop_instance_mac"] = instance_port.mac
- pd["snoop_instance_id"] = instance_port.instance.instance_id
- pd["src_device"] = ""
- pd["bridge"] = "br-int"
- else:
- # container in VM
- pd["snoop_instance_mac"] = ""
- pd["snoop_instance_id"] = ""
- pd["parent_mac"] = self.get_parent_port_mac(o, port)
- pd["bridge"] = ""
-
- for (k,v) in port.get_parameters().items():
- pd[k] = v
-
- ports.append(pd)
-
- # for any ports that don't have a device, assign one
- used_ports = [x["device"] for x in ports if ("device" in x)]
- avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)]
- for port in ports:
- if not port.get("device",None):
- port["device"] = avail_ports.pop(0)
-
- return ports
-
- def get_extra_attributes(self, o):
- fields={}
- fields["ansible_tag"] = "container-%s" % str(o.id)
- if o.image.tag:
- fields["docker_image"] = o.image.path + ":" + o.image.tag
- else:
- fields["docker_image"] = o.image.path
- fields["ports"] = self.get_ports(o)
- if o.volumes:
- fields["volumes"] = [x.strip() for x in o.volumes.split(",")]
- else:
- fields["volumes"] = ""
- fields["network_method"] = o.slice.network or "default"
- return fields
-
- def sync_record(self, o):
- logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
-
- fields = self.get_ansible_fields(o)
-
- # If 'o' defines a 'sync_attributes' list, then we'll copy those
- # attributes into the Ansible recipe's field list automatically.
- if hasattr(o, "sync_attributes"):
- for attribute_name in o.sync_attributes:
- fields[attribute_name] = getattr(o, attribute_name)
-
- fields.update(self.get_extra_attributes(o))
-
- self.run_playbook(o, fields)
-
- o.instance_id = fields["container_name"]
- o.instance_name = fields["container_name"]
-
- o.save()
-
- def delete_record(self, o):
- logger.info("delete'ing object %s" % str(o),extra=o.tologdict())
-
- fields = self.get_ansible_fields(o)
-
- # If 'o' defines a 'sync_attributes' list, then we'll copy those
- # attributes into the Ansible recipe's field list automatically.
- if hasattr(o, "sync_attributes"):
- for attribute_name in o.sync_attributes:
- fields[attribute_name] = getattr(o, attribute_name)
-
- fields.update(self.get_extra_attributes(o))
-
- self.run_playbook(o, fields, "teardown_container.yaml")
-
- def run_playbook(self, o, fields, template_name=None):
- if not template_name:
- template_name = self.template_name
- tStart = time.time()
- run_template_ssh(template_name, fields, path="container")
- logger.info("playbook execution time %d" % int(time.time()-tStart,extra=o.tologdict())
-
-
diff --git a/xos/synchronizers/base/steps/sync_container.yaml b/xos/synchronizers/base/steps/sync_container.yaml
deleted file mode 100644
index 77e57cd..0000000
--- a/xos/synchronizers/base/steps/sync_container.yaml
+++ /dev/null
@@ -1,116 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: {{ username }}
- sudo: yes
-
- vars:
- container_name: {{ container_name }}
- docker_image: {{ docker_image }}
- network_method: {{ network_method }}
- ports:
- {% for port in ports %}
- - device: {{ port.device }}
- xos_network_id: {{ port.xos_network_id }}
- mac: {{ port.mac|default("") }}
- ip: {{ port.ip }}
- snoop_instance_mac: {{ port.snoop_instance_mac }}
- snoop_instance_id: {{ port.snoop_instance_id }}
- parent_mac: {{ port.parent_mac|default("") }}
- s_tag: {{ port.s_tag|default("") }}
- c_tag: {{ port.c_tag|default("") }}
- next_hop: {{ port.next_hop|default("") }}
- bridge: {{ port.bridge }}
- {% endfor %}
- volumes:
- {% for volume in volumes %}
- - {{ volume }}
- {% endfor %}
-
- tasks:
-
-# - name: Fix /etc/hosts
-# lineinfile:
-# dest=/etc/hosts
-# regexp="127.0.0.1 localhost"
-# line="127.0.0.1 localhost {{ instance_hostname }}"
-
- - name: Add repo key
- apt_key:
- keyserver=hkp://pgp.mit.edu:80
- id=58118E89F3A912897C070ADBF76221572C52609D
-
- - name: Install Docker repo
- apt_repository:
- repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
- state=present
-
- - name: Install Docker
- apt:
- name={{ '{{' }} item {{ '}}' }}
- state=latest
- update_cache=yes
- with_items:
- - docker-engine
- - python-pip
- - python-httplib2
-
- # Something is installing a requests library that is incompative with pip, and
- # will cause this recipe to fail next time it tries to run pip. Only the one
- # in /usr/local/lib is bad. There's still a good one in /usr/lib
- - name: check if bad requests library installed
- stat: path=/usr/local/lib/python2.7/dist-packages/requests
- register: bad_requests
-
- - name: remove bad request library
- shell: mv /usr/local/lib/python2.7/dist-packages/requests /usr/local/lib/python2.7/dist-packages/requests-bad
- when: bad_requests.stat.exists == True
-
- - name: Install docker-py
- pip:
- name=docker-py
- state=latest
-
- - name: install Pipework
- get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
- dest=/usr/local/bin/pipework
- mode=0755
-
-# - name: Start Container
-# docker:
-# docker_api_version: "1.18"
-# name: {{ container_name }}
-# # was: reloaded
-# state: running
-# image: {{ docker_image }}
-
- - name: check if systemd is installed
- stat: path=/usr/bin/systemctl
- register: systemctl
-
- - name: container upstart
- template: src=/opt/xos/openstack_observer/templates/container.conf.j2 dest=/etc/init/container-{{ container_name }}.conf
-
- - name: container systemd
- template: src=/opt/xos/openstack_observer/templates/container.service.j2 dest=/lib/systemd/system/container-{{ container_name }}.service
-
- - name: container startup script
- template: src=/opt/xos/openstack_observer/templates/start-container.sh.j2 dest=/usr/local/sbin/start-container-{{ container_name }}.sh mode=0755
-
- - name: container teardown script
- template: src=/opt/xos/openstack_observer/templates/stop-container.sh.j2 dest=/usr/local/sbin/stop-container-{{ container_name }}.sh mode=0755
-
- - name: restart systemd
- shell: systemctl daemon-reload
- when: systemctl.stat.exists == True
-
-{% if ports %}
- - name: make sure bridges are setup
- shell: ifconfig {{ '{{' }} item.bridge {{ '}}' }}
- with_items: "ports"
-{% endif %}
-
- - name: Make sure container is running
- service: name=container-{{ container_name }} state=started
-
diff --git a/xos/synchronizers/base/steps/sync_controller_images.py b/xos/synchronizers/base/steps/sync_controller_images.py
deleted file mode 100644
index c1e5136..0000000
--- a/xos/synchronizers/base/steps/sync_controller_images.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models import Controller
-from core.models import Image, ControllerImages
-from xos.logger import observer_logger as logger
-from synchronizers.base.ansible import *
-import json
-
-class SyncControllerImages(OpenStackSyncStep):
- provides=[ControllerImages]
- observes = ControllerImages
- requested_interval=0
- playbook='sync_controller_images.yaml'
-
- def fetch_pending(self, deleted):
- if (deleted):
- return []
-
- # now we return all images that need to be enacted
- return ControllerImages.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-
- def map_sync_inputs(self, controller_image):
- image_fields = {'endpoint':controller_image.controller.auth_url,
- 'endpoint_v3': controller_image.controller.auth_url_v3,
- 'admin_user':controller_image.controller.admin_user,
- 'admin_password':controller_image.controller.admin_password,
- 'domain': controller_image.controller.domain,
- 'name':controller_image.image.name,
- 'filepath':controller_image.image.path,
- 'ansible_tag': '%s@%s'%(controller_image.image.name,controller_image.controller.name), # name of ansible playbook
- }
-
- return image_fields
-
- def map_sync_outputs(self, controller_image, res):
- image_id = res[0]['id']
- controller_image.glance_image_id = image_id
- controller_image.backend_status = '1 - OK'
- controller_image.save()
diff --git a/xos/synchronizers/base/steps/sync_controller_images.yaml b/xos/synchronizers/base/steps/sync_controller_images.yaml
deleted file mode 100644
index 6247a30..0000000
--- a/xos/synchronizers/base/steps/sync_controller_images.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - glance_image:
- auth_url={{ endpoint }}
- login_username="{{ admin_user }}"
- login_tenant_name="admin"
- login_password="{{ admin_password }}"
- name="{{ name }}"
- file="{{ filepath }}"
- disk_format='raw'
- is_public=true
diff --git a/xos/synchronizers/base/steps/sync_controller_networks.py b/xos/synchronizers/base/steps/sync_controller_networks.py
deleted file mode 100644
index f209f01..0000000
--- a/xos/synchronizers/base/steps/sync_controller_networks.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from netaddr import IPAddress, IPNetwork
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models.network import *
-from core.models.slice import *
-from core.models.instance import Instance
-from xos.logger import observer_logger as logger
-from synchronizers.base.ansible import *
-from openstack_xos.driver import OpenStackDriver
-from xos.config import Config
-import json
-
-import pdb
-
-class SyncControllerNetworks(OpenStackSyncStep):
- requested_interval = 0
- provides=[Network]
- observes=ControllerNetwork
- playbook='sync_controller_networks.yaml'
-
- def alloc_subnet(self, uuid):
- # 16 bits only
- uuid_masked = uuid & 0xffff
- a = 10
- b = uuid_masked >> 8
- c = uuid_masked & 0xff
- d = 0
-
- cidr = '%d.%d.%d.%d/24'%(a,b,c,d)
- return cidr
-
- def alloc_gateway(self, uuid):
- # 16 bits only
- uuid_masked = uuid & 0xffff
- a = 10
- b = uuid_masked >> 8
- c = uuid_masked & 0xff
- d = 1
-
- gateway = '%d.%d.%d.%d'%(a,b,c,d)
- return gateway
-
-
- def save_controller_network(self, controller_network):
- network_name = controller_network.network.name
- subnet_name = '%s-%d'%(network_name,controller_network.pk)
- cidr = self.alloc_subnet(controller_network.pk)
- self.cidr=cidr
- slice = controller_network.network.owner
-
- network_fields = {'endpoint':controller_network.controller.auth_url,
- 'endpoint_v3': controller_network.controller.auth_url_v3,
- 'admin_user':slice.creator.email,
- 'tenant_name':slice.name,
- 'admin_password':slice.creator.remote_password,
- 'domain': controller_network.controller.domain,
- 'name':network_name,
- 'subnet_name':subnet_name,
- 'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
- 'cidr':cidr,
- 'gateway':self.alloc_gateway(controller_network.pk),
- 'use_vtn':getattr(Config(), "networking_use_vtn", False),
- 'delete':False
- }
- return network_fields
-
- def map_sync_outputs(self, controller_network,res):
- network_id = res[0]['id']
- subnet_id = res[1]['id']
- controller_network.net_id = network_id
- controller_network.subnet = self.cidr
- controller_network.subnet_id = subnet_id
- controller_network.backend_status = '1 - OK'
- controller_network.save()
-
-
- def map_sync_inputs(self, controller_network):
- # XXX This check should really be made from booleans, rather than using hardcoded network names
- if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct']):
- logger.info("skipping network controller %s because it is not private" % controller_network)
- # We only sync private networks
- return SyncStep.SYNC_WITHOUT_RUNNING
-
- if not controller_network.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
- return
-
- if controller_network.network.owner and controller_network.network.owner.creator:
- return self.save_controller_network(controller_network)
- else:
- raise Exception('Could not save network controller %s'%controller_network)
-
- def map_delete_inputs(self, controller_network):
- # XXX This check should really be made from booleans, rather than using hardcoded network names
- if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct']):
- # We only sync private networks
- return
- try:
- slice = controller_network.network.owner # XXX: FIXME!!
- except:
- raise Exception('Could not get slice for Network %s'%controller_network.network.name)
-
- network_name = controller_network.network.name
- subnet_name = '%s-%d'%(network_name,controller_network.pk)
- cidr = controller_network.subnet
- network_fields = {'endpoint':controller_network.controller.auth_url,
- 'admin_user':slice.creator.email, # XXX: FIXME
- 'tenant_name':slice.name, # XXX: FIXME
- 'admin_password':slice.creator.remote_password,
- 'name':network_name,
- 'subnet_name':subnet_name,
- 'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
- 'cidr':cidr,
- 'delete':True
- }
-
- return network_fields
-
- """
- driver = OpenStackDriver().client_driver(caller=controller_network.network.owner.creator,
- tenant=controller_network.network.owner.name,
- controller=controller_network.controller.name)
- if (controller_network.router_id) and (controller_network.subnet_id):
- driver.delete_router_interface(controller_network.router_id, controller_network.subnet_id)
- if controller_network.subnet_id:
- driver.delete_subnet(controller_network.subnet_id)
- if controller_network.router_id:
- driver.delete_router(controller_network.router_id)
- if controller_network.net_id:
- driver.delete_network(controller_network.net_id)
- """
diff --git a/xos/synchronizers/base/steps/sync_controller_networks.yaml b/xos/synchronizers/base/steps/sync_controller_networks.yaml
deleted file mode 100644
index b885516..0000000
--- a/xos/synchronizers/base/steps/sync_controller_networks.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - quantum_network:
- auth_url={{ endpoint }}
- login_username={{ admin_user }}
- login_tenant_name={{ tenant_name }}
- login_password={{ admin_password }}
- tenant_name={{ tenant_name }}
- name={{ name }}
- {% if delete %}
- state=absent
- {% else %}
- state=present
- {% endif %}
- shared=true
- {% if not delete %}
- - quantum_subnet:
- auth_url={{ endpoint }}
- login_username={{ admin_user }}
- login_tenant_name={{ tenant_name }}
- login_password={{ admin_password }}
- tenant_name={{ tenant_name }}
- name={{ subnet_name }}
- network_name={{ name }}
- {% if delete %}
- state=absent
- {% else %}
- state=present
- {% if use_vtn %}
- gateway_ip={{ gateway }}
- {% else %}
- no_gateway=true
- {% endif %}
- dns_nameservers=8.8.8.8
- cidr={{ cidr }}
- {% endif %}
- {% endif %}
diff --git a/xos/synchronizers/base/steps/sync_controller_site_privileges.py b/xos/synchronizers/base/steps/sync_controller_site_privileges.py
deleted file mode 100644
index 59919fe..0000000
--- a/xos/synchronizers/base/steps/sync_controller_site_privileges.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models.site import Controller, SitePrivilege
-from core.models.user import User
-from core.models.controlleruser import ControllerUser, ControllerSitePrivilege
-from xos.logger import observer_logger as logger
-from synchronizers.base.ansible import *
-import json
-
-class SyncControllerSitePrivileges(OpenStackSyncStep):
- provides=[SitePrivilege]
- requested_interval=0
- observes=ControllerSitePrivilege
- playbook='sync_controller_users.yaml'
-
- def map_sync_inputs(self, controller_site_privilege):
- controller_register = json.loads(controller_site_privilege.controller.backend_register)
- if not controller_site_privilege.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
- return
-
- roles = [controller_site_privilege.site_privilege.role.role]
- # setup user home site roles at controller
- if not controller_site_privilege.site_privilege.user.site:
- raise Exception('Siteless user %s'%controller_site_privilege.site_privilege.user.email)
- else:
- # look up tenant id for the user's site at the controller
- #ctrl_site_deployments = SiteDeployment.objects.filter(
- # site_deployment__site=controller_site_privilege.user.site,
- # controller=controller_site_privilege.controller)
-
- #if ctrl_site_deployments:
- # # need the correct tenant id for site at the controller
- # tenant_id = ctrl_site_deployments[0].tenant_id
- # tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
- user_fields = {
- 'endpoint':controller_site_privilege.controller.auth_url,
- 'endpoint_v3': controller_site_privilege.controller.auth_url_v3,
- 'domain': controller_site_privilege.controller.domain,
- 'name': controller_site_privilege.site_privilege.user.email,
- 'email': controller_site_privilege.site_privilege.user.email,
- 'password': controller_site_privilege.site_privilege.user.remote_password,
- 'admin_user': controller_site_privilege.controller.admin_user,
- 'admin_password': controller_site_privilege.controller.admin_password,
- 'ansible_tag':'%s@%s'%(controller_site_privilege.site_privilege.user.email.replace('@','-at-'),controller_site_privilege.controller.name),
- 'admin_tenant': controller_site_privilege.controller.admin_tenant,
- 'roles':roles,
- 'tenant':controller_site_privilege.site_privilege.site.login_base}
-
- return user_fields
-
- def map_sync_outputs(self, controller_site_privilege, res):
- # results is an array in which each element corresponds to an
- # "ok" string received per operation. If we get as many oks as
- # the number of operations we issued, that means a grand success.
- # Otherwise, the number of oks tell us which operation failed.
- controller_site_privilege.role_id = res[0]['id']
- controller_site_privilege.save()
-
- def delete_record(self, controller_site_privilege):
- controller_register = json.loads(controller_site_privilege.controller.backend_register)
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%controller_site_privilege.controller.name)
-
- if controller_site_privilege.role_id:
- driver = self.driver.admin_driver(controller=controller_site_privilege.controller)
- user = ControllerUser.objects.get(
- controller=controller_site_privilege.controller,
- user=controller_site_privilege.site_privilege.user
- )
- site = ControllerSite.objects.get(
- controller=controller_site_privilege.controller,
- user=controller_site_privilege.site_privilege.user
- )
- driver.delete_user_role(
- user.kuser_id,
- site.tenant_id,
- controller_site_privilege.site_prvilege.role.role
- )
diff --git a/xos/synchronizers/base/steps/sync_controller_sites.py b/xos/synchronizers/base/steps/sync_controller_sites.py
deleted file mode 100644
index 1b3c2ba..0000000
--- a/xos/synchronizers/base/steps/sync_controller_sites.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import os
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.openstack.openstacksyncstep import OpenStackSyncStep
-from core.models.site import *
-from synchronizers.base.syncstep import *
-from synchronizers.base.ansible import *
-from xos.logger import observer_logger as logger
-import json
-
-class SyncControllerSites(OpenStackSyncStep):
- requested_interval=0
- provides=[Site]
- observes=ControllerSite
- playbook = 'sync_controller_sites.yaml'
-
- def fetch_pending(self, deleted=False):
- lobjs = ControllerSite.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False),Q(controller__isnull=False))
- return lobjs
-
- def map_sync_inputs(self, controller_site):
- tenant_fields = {'endpoint':controller_site.controller.auth_url,
- 'endpoint_v3': controller_site.controller.auth_url_v3,
- 'domain': controller_site.controller.domain,
- 'admin_user': controller_site.controller.admin_user,
- 'admin_password': controller_site.controller.admin_password,
- 'admin_tenant': controller_site.controller.admin_tenant,
- 'ansible_tag': '%s@%s'%(controller_site.site.login_base,controller_site.controller.name), # name of ansible playbook
- 'tenant': controller_site.site.login_base,
- 'tenant_description': controller_site.site.name}
- return tenant_fields
-
- def map_sync_outputs(self, controller_site, res):
- controller_site.tenant_id = res[0]['id']
- controller_site.backend_status = '1 - OK'
- controller_site.save()
-
- def delete_record(self, controller_site):
- controller_register = json.loads(controller_site.controller.backend_register)
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%controller_site.controller.name)
-
- if controller_site.tenant_id:
- driver = self.driver.admin_driver(controller=controller_site.controller)
- driver.delete_tenant(controller_site.tenant_id)
-
- """
- Ansible does not support tenant deletion yet
-
- import pdb
- pdb.set_trace()
- template = os_template_env.get_template('delete_controller_sites.yaml')
- tenant_fields = {'endpoint':controller_site.controller.auth_url,
- 'admin_user': controller_site.controller.admin_user,
- 'admin_password': controller_site.controller.admin_password,
- 'admin_tenant': 'admin',
- 'ansible_tag': 'controller_sites/%s@%s'%(controller_site.controller_site.site.login_base,controller_site.controller_site.deployment.name), # name of ansible playbook
- 'tenant': controller_site.controller_site.site.login_base,
- 'delete': True}
-
- rendered = template.render(tenant_fields)
- res = run_template('sync_controller_sites.yaml', tenant_fields)
-
- if (len(res)!=1):
- raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
- """
diff --git a/xos/synchronizers/base/steps/sync_controller_sites.yaml b/xos/synchronizers/base/steps/sync_controller_sites.yaml
deleted file mode 100644
index 4129802..0000000
--- a/xos/synchronizers/base/steps/sync_controller_sites.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
diff --git a/xos/synchronizers/base/steps/sync_controller_slice_privileges.py b/xos/synchronizers/base/steps/sync_controller_slice_privileges.py
deleted file mode 100644
index b78e4a0..0000000
--- a/xos/synchronizers/base/steps/sync_controller_slice_privileges.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models.slice import Controller, SlicePrivilege
-from core.models.user import User
-from core.models.controlleruser import ControllerUser, ControllerSlicePrivilege
-from synchronizers.base.ansible import *
-from xos.logger import observer_logger as logger
-import json
-
-class SyncControllerSlicePrivileges(OpenStackSyncStep):
- provides=[SlicePrivilege]
- requested_interval=0
- observes=ControllerSlicePrivilege
- playbook = 'sync_controller_users.yaml'
-
- def map_inputs(self, controller_slice_privilege):
- if not controller_slice_privilege.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_slice_privilege.controller)
- return
-
- template = os_template_env.get_template('sync_controller_users.yaml')
- roles = [controller_slice_privilege.slice_privilege.role.role]
- # setup user home slice roles at controller
- if not controller_slice_privilege.slice_privilege.user.site:
- raise Exception('Sliceless user %s'%controller_slice_privilege.slice_privilege.user.email)
- else:
- # look up tenant id for the user's slice at the controller
- #ctrl_slice_deployments = SliceDeployment.objects.filter(
- # slice_deployment__slice=controller_slice_privilege.user.slice,
- # controller=controller_slice_privilege.controller)
-
- #if ctrl_slice_deployments:
- # # need the correct tenant id for slice at the controller
- # tenant_id = ctrl_slice_deployments[0].tenant_id
- # tenant_name = ctrl_slice_deployments[0].slice_deployment.slice.login_base
- user_fields = {
- 'endpoint':controller_slice_privilege.controller.auth_url,
- 'endpoint_v3': controller_slice_privilege.controller.auth_url_v3,
- 'domain': controller_slice_privilege.controller.domain,
- 'name': controller_slice_privilege.slice_privilege.user.email,
- 'email': controller_slice_privilege.slice_privilege.user.email,
- 'password': controller_slice_privilege.slice_privilege.user.remote_password,
- 'admin_user': controller_slice_privilege.controller.admin_user,
- 'admin_password': controller_slice_privilege.controller.admin_password,
- 'ansible_tag':'%s@%s@%s'%(controller_slice_privilege.slice_privilege.user.email.replace('@','-at-'),controller_slice_privilege.slice_privilege.slice.name,controller_slice_privilege.controller.name),
- 'admin_tenant': controller_slice_privilege.controller.admin_tenant,
- 'roles':roles,
- 'tenant':controller_slice_privilege.slice_privilege.slice.name}
- return user_fields
-
- def map_sync_outputs(self, controller_slice_privilege, res):
- controller_slice_privilege.role_id = res[0]['id']
- controller_slice_privilege.save()
-
- def delete_record(self, controller_slice_privilege):
- controller_register = json.loads(controller_slice_privilege.controller.backend_register)
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%controller_slice_privilege.controller.name)
-
- if controller_slice_privilege.role_id:
- driver = self.driver.admin_driver(controller=controller_slice_privilege.controller)
- user = ControllerUser.objects.get(
- controller=controller_slice_privilege.controller,
- user=controller_slice_privilege.slice_privilege.user
- )
- slice = ControllerSlice.objects.get(
- controller=controller_slice_privilege.controller,
- user=controller_slice_privilege.slice_privilege.user
- )
- driver.delete_user_role(
- user.kuser_id,
- slice.tenant_id,
- controller_slice_privilege.slice_prvilege.role.role
- )
diff --git a/xos/synchronizers/base/steps/sync_controller_slices.py b/xos/synchronizers/base/steps/sync_controller_slices.py
deleted file mode 100644
index 2f36441..0000000
--- a/xos/synchronizers/base/steps/sync_controller_slices.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from netaddr import IPAddress, IPNetwork
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models import *
-from synchronizers.base.ansible import *
-from openstack_xos.driver import OpenStackDriver
-from xos.logger import observer_logger as logger
-import json
-
-class SyncControllerSlices(OpenStackSyncStep):
- provides=[Slice]
- requested_interval=0
- observes=ControllerSlice
- playbook='sync_controller_slices.yaml'
-
- def map_sync_inputs(self, controller_slice):
- logger.info("sync'ing slice controller %s" % controller_slice)
-
- if not controller_slice.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
- return
-
- controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
- controller=controller_slice.controller)
- if not controller_users:
- raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
- else:
- controller_user = controller_users[0]
- roles = ['admin']
-
- max_instances=int(controller_slice.slice.max_instances)
- tenant_fields = {'endpoint':controller_slice.controller.auth_url,
- 'endpoint_v3': controller_slice.controller.auth_url_v3,
- 'domain': controller_slice.controller.domain,
- 'admin_user': controller_slice.controller.admin_user,
- 'admin_password': controller_slice.controller.admin_password,
- 'admin_tenant': 'admin',
- 'tenant': controller_slice.slice.name,
- 'tenant_description': controller_slice.slice.description,
- 'roles':roles,
- 'name':controller_user.user.email,
- 'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
- 'max_instances':max_instances}
-
- return tenant_fields
-
- def map_sync_outputs(self, controller_slice, res):
- tenant_id = res[0]['id']
- if (not controller_slice.tenant_id):
- try:
- driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
- driver.shell.nova.quotas.update(tenant_id=tenant_id, instances=int(controller_slice.slice.max_instances))
- except:
- logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
- raise Exception('Could not update quota for %s'%controller_slice.slice.name)
-
- controller_slice.tenant_id = tenant_id
- controller_slice.backend_status = '1 - OK'
- controller_slice.save()
-
-
- def map_delete_inputs(self, controller_slice):
- controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
- controller=controller_slice.controller)
- if not controller_users:
- raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
- else:
- controller_user = controller_users[0]
-
- tenant_fields = {'endpoint':controller_slice.controller.auth_url,
- 'admin_user': controller_slice.controller.admin_user,
- 'admin_password': controller_slice.controller.admin_password,
- 'admin_tenant': 'admin',
- 'tenant': controller_slice.slice.name,
- 'tenant_description': controller_slice.slice.description,
- 'name':controller_user.user.email,
- 'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
- 'delete': True}
- return tenant_fields
diff --git a/xos/synchronizers/base/steps/sync_controller_slices.yaml b/xos/synchronizers/base/steps/sync_controller_slices.yaml
deleted file mode 100644
index 61470ce..0000000
--- a/xos/synchronizers/base/steps/sync_controller_slices.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- {% if delete -%}
- - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}" state=absent
- {% else -%}
- - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
- {% for role in roles %}
- - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
- {% endfor %}
- {% endif %}
diff --git a/xos/synchronizers/base/steps/sync_controller_users.py b/xos/synchronizers/base/steps/sync_controller_users.py
deleted file mode 100644
index 9af48e5..0000000
--- a/xos/synchronizers/base/steps/sync_controller_users.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models.site import Controller, SiteDeployment, SiteDeployment
-from core.models.user import User
-from core.models.controlleruser import ControllerUser
-from synchronizers.base.ansible import *
-from xos.logger import observer_logger as logger
-import json
-
-class SyncControllerUsers(OpenStackSyncStep):
- provides=[User]
- requested_interval=0
- observes=ControllerUser
- playbook='sync_controller_users.yaml'
-
- def map_sync_inputs(self, controller_user):
- if not controller_user.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_user.controller)
- return
-
- # All users will have at least the 'user' role at their home site/tenant.
- # We must also check if the user should have the admin role
- roles = ['user']
- if controller_user.user.is_admin:
- roles.append('admin')
-
- # setup user home site roles at controller
- if not controller_user.user.site:
- raise Exception('Siteless user %s'%controller_user.user.email)
- else:
- # look up tenant id for the user's site at the controller
- #ctrl_site_deployments = SiteDeployment.objects.filter(
- # site_deployment__site=controller_user.user.site,
- # controller=controller_user.controller)
-
- #if ctrl_site_deployments:
- # # need the correct tenant id for site at the controller
- # tenant_id = ctrl_site_deployments[0].tenant_id
- # tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
- user_fields = {
- 'endpoint':controller_user.controller.auth_url,
- 'endpoint_v3': controller_user.controller.auth_url_v3,
- 'domain': controller_user.controller.domain,
- 'name': controller_user.user.email,
- 'email': controller_user.user.email,
- 'password': controller_user.user.remote_password,
- 'admin_user': controller_user.controller.admin_user,
- 'admin_password': controller_user.controller.admin_password,
- 'ansible_tag':'%s@%s'%(controller_user.user.email.replace('@','-at-'),controller_user.controller.name),
- 'admin_tenant': controller_user.controller.admin_tenant,
- 'roles':roles,
- 'tenant':controller_user.user.site.login_base
- }
- return user_fields
-
- def map_sync_outputs(self, controller_user, res):
- controller_user.kuser_id = res[0]['id']
- controller_user.backend_status = '1 - OK'
- controller_user.save()
-
- def delete_record(self, controller_user):
- if controller_user.kuser_id:
- driver = self.driver.admin_driver(controller=controller_user.controller)
- driver.delete_user(controller_user.kuser_id)
diff --git a/xos/synchronizers/base/steps/sync_controller_users.yaml b/xos/synchronizers/base/steps/sync_controller_users.yaml
deleted file mode 100644
index 4f2db5e..0000000
--- a/xos/synchronizers/base/steps/sync_controller_users.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - keystone_user:
- endpoint={{ endpoint }}
- login_user={{ admin_user }}
- login_password={{ admin_password }}
- login_tenant_name={{ admin_tenant }}
- user="{{ name }}"
- email={{ email }}
- password={{ password }}
- tenant={{ tenant }}
- {% for role in roles %}
- - keystone_user: endpoint={{ endpoint}} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
- {% endfor %}
diff --git a/xos/synchronizers/base/steps/sync_images.py b/xos/synchronizers/base/steps/sync_images.py
deleted file mode 100644
index 8049ac1..0000000
--- a/xos/synchronizers/base/steps/sync_images.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import os
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models.image import Image
-from xos.logger import observer_logger as logger
-
-class SyncImages(OpenStackSyncStep):
- provides=[Image]
- requested_interval=0
- observes=Image
-
- def fetch_pending(self, deleted):
- # Images come from the back end
- # You can't delete them
- if (deleted):
- logger.info("SyncImages: returning because deleted=True")
- return []
-
- # get list of images on disk
- images_path = Config().observer_images_directory
-
- logger.info("SyncImages: deleted=False, images_path=%s" % images_path)
-
- available_images = {}
- if os.path.exists(images_path):
- for f in os.listdir(images_path):
- filename = os.path.join(images_path, f)
- if os.path.isfile(filename):
- available_images[f] = filename
-
- logger.info("SyncImages: available_images = %s" % str(available_images))
-
- images = Image.objects.all()
- image_names = [image.name for image in images]
-
- for image_name in available_images:
- #remove file extension
- clean_name = ".".join(image_name.split('.')[:-1])
- if clean_name not in image_names:
- logger.info("SyncImages: adding %s" % clean_name)
- image = Image(name=clean_name,
- disk_format='raw',
- container_format='bare',
- path = available_images[image_name])
- image.save()
-
- return Image.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-
- def sync_record(self, image):
- image.save()
diff --git a/xos/synchronizers/base/steps/sync_instances.py b/xos/synchronizers/base/steps/sync_instances.py
deleted file mode 100644
index 2862474..0000000
--- a/xos/synchronizers/base/steps/sync_instances.py
+++ /dev/null
@@ -1,174 +0,0 @@
-import os
-import base64
-import socket
-from django.db.models import F, Q
-from xos.config import Config
-from xos.settings import RESTAPI_HOSTNAME, RESTAPI_PORT
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models.instance import Instance
-from core.models.slice import Slice, SlicePrivilege, ControllerSlice
-from core.models.network import Network, NetworkSlice, ControllerNetwork
-from synchronizers.base.ansible import *
-from synchronizers.base.syncstep import *
-from xos.logger import observer_logger as logger
-
-def escape(s):
- s = s.replace('\n',r'\n').replace('"',r'\"')
- return s
-
-class SyncInstances(OpenStackSyncStep):
- provides=[Instance]
- requested_interval=0
- observes=Instance
- playbook='sync_instances.yaml'
-
- def fetch_pending(self, deletion=False):
- objs = super(SyncInstances, self).fetch_pending(deletion)
- objs = [x for x in objs if x.isolation=="vm"]
- return objs
-
- def get_userdata(self, instance, pubkeys):
- userdata = '#cloud-config\n\nopencloud:\n slicename: "%s"\n hostname: "%s"\n restapi_hostname: "%s"\n restapi_port: "%s"\n' % (instance.slice.name, instance.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
- userdata += 'ssh_authorized_keys:\n'
- for key in pubkeys:
- userdata += ' - %s\n' % key
- return userdata
-
- def map_sync_inputs(self, instance):
- inputs = {}
- metadata_update = {}
- if (instance.numberCores):
- metadata_update["cpu_cores"] = str(instance.numberCores)
-
- for tag in instance.slice.tags.all():
- if tag.name.startswith("sysctl-"):
- metadata_update[tag.name] = tag.value
-
- slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice)
- pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
- if instance.creator.public_key:
- pubkeys.add(instance.creator.public_key)
-
- if instance.slice.creator.public_key:
- pubkeys.add(instance.slice.creator.public_key)
-
- if instance.slice.service and instance.slice.service.public_key:
- pubkeys.add(instance.slice.service.public_key)
-
- nics = []
- networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
- controller_networks = ControllerNetwork.objects.filter(network__in=networks,
- controller=instance.node.site_deployment.controller)
-
- for controller_network in controller_networks:
-
- # Lenient exception - causes slow backoff
- if controller_network.network.template.visibility == 'private' and \
- controller_network.network.template.translation == 'none':
- if not controller_network.net_id:
- raise DeferredException("Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name))
- nics.append(controller_network.net_id)
-
- # now include network template
- network_templates = [network.template.shared_network_name for network in networks \
- if network.template.shared_network_name]
-
- #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork)
- driver = self.driver.admin_driver(tenant='admin', controller=instance.node.site_deployment.controller)
- nets = driver.shell.neutron.list_networks()['networks']
- for net in nets:
- if net['name'] in network_templates:
- nics.append(net['id'])
-
- if (not nics):
- for net in nets:
- if net['name']=='public':
- nics.append(net['id'])
-
- image_name = None
- controller_images = instance.image.controllerimages.filter(controller=instance.node.site_deployment.controller)
- if controller_images:
- image_name = controller_images[0].image.name
- logger.info("using image from ControllerImage object: " + str(image_name))
-
- if image_name is None:
- controller_driver = self.driver.admin_driver(controller=instance.node.site_deployment.controller)
- images = controller_driver.shell.glanceclient.images.list()
- for image in images:
- if image.name == instance.image.name or not image_name:
- image_name = image.name
- logger.info("using image from glance: " + str(image_name))
-
- try:
- legacy = Config().observer_legacy
- except:
- legacy = False
-
- if (legacy):
- host_filter = instance.node.name.split('.',1)[0]
- else:
- host_filter = instance.node.name.strip()
-
- availability_zone_filter = 'nova:%s'%host_filter
- instance_name = '%s-%d'%(instance.slice.name,instance.id)
- self.instance_name = instance_name
-
- userData = self.get_userdata(instance, pubkeys)
- if instance.userData:
- userData += instance.userData
-
- controller = instance.node.site_deployment.controller
- fields = {'endpoint':controller.auth_url,
- 'endpoint_v3': controller.auth_url_v3,
- 'domain': controller.domain,
- 'admin_user': instance.creator.email,
- 'admin_password': instance.creator.remote_password,
- 'admin_tenant': instance.slice.name,
- 'tenant': instance.slice.name,
- 'tenant_description': instance.slice.description,
- 'name':instance_name,
- 'ansible_tag':instance_name,
- 'availability_zone': availability_zone_filter,
- 'image_name':image_name,
- 'flavor_name':instance.flavor.name,
- 'nics':nics,
- 'meta':metadata_update,
- 'user_data':r'%s'%escape(userData)}
- return fields
-
-
- def map_sync_outputs(self, instance, res):
- instance_id = res[0]['info']['OS-EXT-SRV-ATTR:instance_name']
- instance_uuid = res[0]['id']
-
- try:
- hostname = res[0]['info']['OS-EXT-SRV-ATTR:hypervisor_hostname']
- ip = socket.gethostbyname(hostname)
- instance.ip = ip
- except:
- pass
-
- instance.instance_id = instance_id
- instance.instance_uuid = instance_uuid
- instance.instance_name = self.instance_name
- instance.save()
-
-
- def map_delete_inputs(self, instance):
- controller_register = json.loads(instance.node.site_deployment.controller.backend_register)
-
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%instance.node.site_deployment.controller.name)
-
- instance_name = '%s-%d'%(instance.slice.name,instance.id)
- controller = instance.node.site_deployment.controller
- input = {'endpoint':controller.auth_url,
- 'admin_user': instance.creator.email,
- 'admin_password': instance.creator.remote_password,
- 'admin_tenant': instance.slice.name,
- 'tenant': instance.slice.name,
- 'tenant_description': instance.slice.description,
- 'name':instance_name,
- 'ansible_tag':instance_name,
- 'delete': True}
- return input
diff --git a/xos/synchronizers/base/steps/sync_instances.yaml b/xos/synchronizers/base/steps/sync_instances.yaml
deleted file mode 100644
index 70da32d..0000000
--- a/xos/synchronizers/base/steps/sync_instances.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - os_server:
- name: {{ name }}
- auth:
- auth_url: {{ endpoint }}
- username: {{ admin_user }}
- password: {{ admin_password }}
- project_name: {{ project_name }}
- {% if delete -%}
- state: absent
- {% else -%}
- state: present
- availability_zone: "{{ availability_zone }}"
- image: {{ image_name }}
- flavor: {{ flavor_name }}
- timeout: 200
- userdata: "{{ user_data }}"
- config_drive: yes
- nics:
- {% for nic in nics %}
- - {{ nic.kind }}-id: {{ nic.value }}
- {% endfor %}
-
- {% if meta %}
- meta:
- {% for k,v in meta.items() %}
- {{ k }} : "{{ v }}"
- {% endfor %}
- {% endif %}
- {% endif %}
-
diff --git a/xos/synchronizers/base/steps/sync_ports.py b/xos/synchronizers/base/steps/sync_ports.py
deleted file mode 100644
index 8f2b66d..0000000
--- a/xos/synchronizers/base/steps/sync_ports.py
+++ /dev/null
@@ -1,196 +0,0 @@
-import os
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models import Controller
-from core.models.network import *
-from xos.logger import observer_logger as logger
-
-class SyncPorts(OpenStackSyncStep):
- requested_interval = 0 # 3600
- provides=[Port]
- observes=Port
-
- # The way it works is to enumerate the all of the ports that neutron
- # has, and then work backward from each port's network-id to determine
- # which Network is associated from the port.
-
- def call(self, **args):
- logger.info("sync'ing network instances")
-
- ports = Port.objects.all()
- ports_by_id = {}
- ports_by_neutron_port = {}
- for port in ports:
- ports_by_id[port.id] = port
- ports_by_neutron_port[port.port_id] = port
-
- networks = Network.objects.all()
- networks_by_id = {}
- for network in networks:
- for nd in network.controllernetworks.all():
- networks_by_id[nd.net_id] = network
-
- #logger.info("networks_by_id = ")
- #for (network_id, network) in networks_by_id.items():
- # logger.info(" %s: %s" % (network_id, network.name))
-
- instances = Instance.objects.all()
- instances_by_instance_uuid = {}
- for instance in instances:
- instances_by_instance_uuid[instance.instance_uuid] = instance
-
- # Get all ports in all controllers
-
- ports_by_id = {}
- templates_by_id = {}
- for controller in Controller.objects.all():
- if not controller.admin_tenant:
- logger.info("controller %s has no admin_tenant" % controller)
- continue
- try:
- driver = self.driver.admin_driver(controller = controller)
- ports = driver.shell.neutron.list_ports()["ports"]
- except:
- logger.log_exc("failed to get ports from controller %s" % controller)
- continue
-
- for port in ports:
- ports_by_id[port["id"]] = port
-
- # public-nat and public-dedicated networks don't have a net-id anywhere
- # in the data model, so build up a list of which ids map to which network
- # templates.
- try:
- neutron_networks = driver.shell.neutron.list_networks()["networks"]
- except:
- print "failed to get networks from controller %s" % controller
- continue
- for network in neutron_networks:
- for template in NetworkTemplate.objects.all():
- if template.shared_network_name == network["name"]:
- templates_by_id[network["id"]] = template
-
- for port in ports_by_id.values():
- #logger.info("port %s" % str(port))
- if port["id"] in ports_by_neutron_port:
- # we already have it
- #logger.info("already accounted for port %s" % port["id"])
- continue
-
- if port["device_owner"] != "compute:nova":
- # we only want the ports that connect to instances
- #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
- continue
-
- instance = instances_by_instance_uuid.get(port['device_id'], None)
- if not instance:
- logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id']))
- continue
-
- network = networks_by_id.get(port['network_id'], None)
- if not network:
- # maybe it's public-nat or public-dedicated. Search the templates for
- # the id, then see if the instance's slice has some network that uses
- # that template
- template = templates_by_id.get(port['network_id'], None)
- if template and instance.slice:
- for candidate_network in instance.slice.networks.all():
- if candidate_network.template == template:
- network=candidate_network
- if not network:
- logger.info("no network for port %s network %s" % (port["id"], port["network_id"]))
-
- # we know it's associated with a instance, but we don't know
- # which network it is part of.
-
- continue
-
- if network.template.shared_network_name:
- # If it's a shared network template, then more than one network
- # object maps to the neutron network. We have to do a whole bunch
- # of extra work to find the right one.
- networks = network.template.network_set.all()
- network = None
- for candidate_network in networks:
- if (candidate_network.owner == instance.slice):
- logger.info("found network %s" % candidate_network)
- network = candidate_network
-
- if not network:
- logger.info("failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"]))
- continue
-
- if not port["fixed_ips"]:
- logger.info("port %s has no fixed_ips" % port["id"])
- continue
-
- ip=port["fixed_ips"][0]["ip_address"]
- mac=port["mac_address"]
- logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"])))
-
- ns = Port(network=network,
- instance=instance,
- ip=ip,
- mac=mac,
- port_id=port["id"])
-
- try:
- ns.save()
- except:
- logger.log_exc("failed to save port %s" % str(ns))
- continue
-
- # For ports that were created by the user, find that ones
- # that don't have neutron ports, and create them.
- for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False) ):
- logger.info("XXX working on port %s" % port)
- controller = port.instance.node.site_deployment.controller
- slice = port.instance.slice
-
- if controller:
- cn=port.network.controllernetworks.filter(controller=controller)
- if not cn:
- logger.log_exc("no controllernetwork for %s" % port)
- continue
- cn=cn[0]
- if cn.lazy_blocked:
- cn.lazy_blocked=False
- cn.save()
- logger.info("deferring port %s because controllerNetwork was lazy-blocked" % port)
- continue
- if not cn.net_id:
- logger.info("deferring port %s because controllerNetwork does not have a port-id yet" % port)
- continue
- try:
- # We need to use a client driver that specifies the tenant
- # of the destination instance. Nova-compute will not connect
- # ports to instances if the port's tenant does not match
- # the instance's tenant.
-
- # A bunch of stuff to compensate for OpenStackDriver.client_driveR()
- # not being in working condition.
- from openstack_xos.client import OpenStackClient
- from openstack_xos.driver import OpenStackDriver
- caller = port.network.owner.creator
- auth = {'username': caller.email,
- 'password': caller.remote_password,
- 'tenant': slice.name}
- client = OpenStackClient(controller=controller, **auth) # cacert=self.config.nova_ca_ssl_cert,
- driver = OpenStackDriver(client=client)
-
- neutron_port = driver.shell.neutron.create_port({"port": {"network_id": cn.net_id}})["port"]
- port.port_id = neutron_port["id"]
- if neutron_port["fixed_ips"]:
- port.ip = neutron_port["fixed_ips"][0]["ip_address"]
- port.mac = neutron_port["mac_address"]
- except:
- logger.log_exc("failed to create neutron port for %s" % port)
- continue
- port.save()
-
- def delete_record(self, network_instance):
- # Nothing to do, this is an OpenCloud object
- pass
-
diff --git a/xos/synchronizers/base/steps/sync_roles.py b/xos/synchronizers/base/steps/sync_roles.py
deleted file mode 100644
index e859316..0000000
--- a/xos/synchronizers/base/steps/sync_roles.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import os
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models.role import Role
-from core.models.site import SiteRole, Controller, ControllerRole
-from core.models.slice import SliceRole
-from xos.logger import observer_logger as logger
-
-class SyncRoles(OpenStackSyncStep):
- provides=[Role]
- requested_interval=0
- observes=[SiteRole,SliceRole,ControllerRole]
-
- def sync_record(self, role):
- if not role.enacted:
- controllers = Controller.objects.all()
- for controller in controllers:
- driver = self.driver.admin_driver(controller=controller)
- driver.create_role(role.role)
- role.save()
-
diff --git a/xos/synchronizers/base/steps/teardown_container.yaml b/xos/synchronizers/base/steps/teardown_container.yaml
deleted file mode 100644
index 5cabc78..0000000
--- a/xos/synchronizers/base/steps/teardown_container.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: {{ username }}
- sudo: yes
-
- vars:
- container_name: {{ container_name }}
- docker_image: {{ docker_image }}
- ports:
- {% for port in ports %}
- - device: {{ port.device }}
- xos_network_id: {{ port.xos_network_id }}
- mac: {{ port.mac|default("") }}
- ip: {{ port.ip }}
- snoop_instance_mac: {{ port.snoop_instance_mac }}
- snoop_instance_id: {{ port.snoop_instance_id }}
- parent_mac: {{ port.parent_mac|default("") }}
- s_tag: {{ port.s_tag|default("") }}
- c_tag: {{ port.c_tag|default("") }}
- next_hop: {{ port.next_hop|default("") }}
- bridge: {{ port.bridge }}
- {% endfor %}
- volumes:
- {% for volume in volumes %}
- - {{ volume }}
- {% endfor %}
-
- tasks:
- - name: Make sure container is stopped
- service: name=container-{{ container_name }} state=stopped
-
diff --git a/xos/synchronizers/onboarding/xosbuilder.py b/xos/synchronizers/onboarding/xosbuilder.py
index bbe7294..be3960e 100644
--- a/xos/synchronizers/onboarding/xosbuilder.py
+++ b/xos/synchronizers/onboarding/xosbuilder.py
@@ -280,7 +280,7 @@
for c in ServiceController.objects.all():
#dockerfile = dockerfile + self.get_controller_docker_lines(c, self.SYNC_ALLCONTROLLER_KINDS)
script = script + self.get_controller_script_lines(c, self.SYNC_ALLCONTROLLER_KINDS)
- if controller.service_controller_resources.filter(kind="models").exists():
+ if c.service_controller_resources.filter(kind="models").exists():
app_list.append("services." + c.name)
self.create_xos_app_data(controller.name, script, app_list, None)
diff --git a/xos/synchronizers/openstack/__init__.py b/xos/synchronizers/openstack/__init__.py
deleted file mode 100644
index e56cd39..0000000
--- a/xos/synchronizers/openstack/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from xos.config import Config
-
-try:
- observer_disabled = Config().observer_disabled
-except:
- observer_disabled = False
-
-def EnableObserver(x):
- """ used for manage.py --noobserver """
- global observer_disabled
- observer_disabled = not x
-
-print_once = True
-
-def notify_observer(model=None, delete=False, pk=None, model_dict={}):
- if (observer_disabled):
- global print_once
- if (print_once):
- print "The observer is disabled"
- print_once = False
- return
-
- try:
- from .event_manager import EventSender
- if (model and delete):
- if hasattr(model,"__name__"):
- modelName = model.__name__
- else:
- modelName = model.__class__.__name__
- EventSender().fire(delete_flag = delete, model = modelName, pk = pk, model_dict=model_dict)
- else:
- EventSender().fire()
- except Exception,e:
- print "Exception in Observer. This should not disrupt the front end. %s"%str(e)
-
-
diff --git a/xos/synchronizers/openstack/error_mapper.py b/xos/synchronizers/openstack/error_mapper.py
deleted file mode 100644
index 9eb878d..0000000
--- a/xos/synchronizers/openstack/error_mapper.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from xos.config import Config
-from xos.logger import Logger, logging, logger
-
-class ErrorMapper:
- def __init__(self, error_map_file):
- self.error_map = {}
- try:
- error_map_lines = open(error_map_file).read().splitlines()
- for l in error_map_lines:
- if (not l.startswith('#')):
- splits = l.split('->')
- k,v = map(lambda i:i.rstrip(),splits)
- self.error_map[k]=v
- except:
- logging.info('Could not read error map')
-
-
- def map(self, error):
- return self.error_map[error]
-
-
-
-
-
-
diff --git a/xos/synchronizers/openstack/model_policies/__init__.py b/xos/synchronizers/openstack/model_policies/__init__.py
deleted file mode 100644
index 36c6e25..0000000
--- a/xos/synchronizers/openstack/model_policies/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from .model_policy_Slice import *
-from .model_policy_Instance import *
-from .model_policy_User import *
-from .model_policy_Network import *
-from .model_policy_Site import *
-from .model_policy_SitePrivilege import *
-from .model_policy_SlicePrivilege import *
-from .model_policy_ControllerSlice import *
-from .model_policy_ControllerSite import *
-from .model_policy_ControllerUser import *
-from .model_policy_Controller import *
-from .model_policy_Image import *
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_Controller.py b/xos/synchronizers/openstack/model_policies/model_policy_Controller.py
deleted file mode 100644
index c62b612..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_Controller.py
+++ /dev/null
@@ -1,62 +0,0 @@
-
-def handle(controller):
- from core.models import Controller, Site, ControllerSite, Slice, ControllerSlice, User, ControllerUser, ControllerImages, ControllerNetwork, Image, Network
- from collections import defaultdict
-
- # relations for all sites
- ctrls_by_site = defaultdict(list)
- ctrl_sites = ControllerSite.objects.all()
- for ctrl_site in ctrl_sites:
- ctrls_by_site[ctrl_site.site].append(ctrl_site.controller)
- sites = Site.objects.all()
- for site in sites:
- if site not in ctrls_by_site or \
- controller not in ctrls_by_site[site]:
- controller_site = ControllerSite(controller=controller, site=site)
- controller_site.save()
- # relations for all slices
- ctrls_by_slice = defaultdict(list)
- ctrl_slices = ControllerSlice.objects.all()
- for ctrl_slice in ctrl_slices:
- ctrls_by_slice[ctrl_slice.slice].append(ctrl_slice.controller)
- slices = Slice.objects.all()
- for slice in slices:
- if slice not in ctrls_by_slice or \
- controller not in ctrls_by_slice[slice]:
- controller_slice = ControllerSlice(controller=controller, slice=slice)
- controller_slice.save()
- # relations for all users
- ctrls_by_user = defaultdict(list)
- ctrl_users = ControllerUser.objects.all()
- for ctrl_user in ctrl_users:
- ctrls_by_user[ctrl_user.user].append(ctrl_user.controller)
- users = User.objects.all()
- for user in users:
- if user not in ctrls_by_user or \
- controller not in ctrls_by_user[user]:
- controller_user = ControllerUser(controller=controller, user=user)
- controller_user.save()
- # relations for all networks
- ctrls_by_network = defaultdict(list)
- ctrl_networks = ControllerNetwork.objects.all()
- for ctrl_network in ctrl_networks:
- ctrls_by_network[ctrl_network.network].append(ctrl_network.controller)
- networks = Network.objects.all()
- for network in networks:
- if network not in ctrls_by_network or \
- controller not in ctrls_by_network[network]:
- controller_network = ControllerNetwork(controller=controller, network=network)
- if network.subnet and network.subnet.strip():
- controller_network.subnet = network.subnet.strip()
- controller_network.save()
- # relations for all images
- ctrls_by_image = defaultdict(list)
- ctrl_images = ControllerImages.objects.all()
- for ctrl_image in ctrl_images:
- ctrls_by_image[ctrl_image.image].append(ctrl_image.controller)
- images = Image.objects.all()
- for image in images:
- if image not in ctrls_by_image or \
- controller not in ctrls_by_image[image]:
- controller_image = ControllerImages(controller=controller, image=image)
- controller_image.save()
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_ControllerSite.py b/xos/synchronizers/openstack/model_policies/model_policy_ControllerSite.py
deleted file mode 100644
index 4b76080..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_ControllerSite.py
+++ /dev/null
@@ -1,16 +0,0 @@
-def handle(controller_site):
- from core.models import ControllerSite, Site
-
- try:
- my_status_code = int(controller_site.backend_status[0])
- try:
- his_status_code = int(controller_site.site.backend_status[0])
- except:
- his_status_code = 0
-
- if (my_status_code not in [0,his_status_code]):
- controller_site.site.backend_status = controller_site.backend_status
- controller_site.site.save(update_fields = ['backend_status'])
- except Exception,e:
- print str(e)
- pass
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_ControllerSlice.py b/xos/synchronizers/openstack/model_policies/model_policy_ControllerSlice.py
deleted file mode 100644
index bfe7995..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_ControllerSlice.py
+++ /dev/null
@@ -1,25 +0,0 @@
-def handle(controller_slice):
- from core.models import ControllerSlice, Slice
-
- try:
- my_status_code = int(controller_slice.backend_status[0])
- try:
- his_status_code = int(controller_slice.slice.backend_status[0])
- except:
- his_status_code = 0
-
- fields = []
- if (my_status_code not in [0,his_status_code]):
- controller_slice.slice.backend_status = controller_slice.backend_status
- fields+=['backend_status']
-
- if (controller_slice.backend_register != controller_slice.slice.backend_register):
- controller_slice.slice.backend_register = controller_slice.backend_register
- fields+=['backend_register']
-
- controller_slice.slice.save(update_fields = fields)
-
-
- except Exception,e:
- print str(e)
- pass
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_ControllerUser.py b/xos/synchronizers/openstack/model_policies/model_policy_ControllerUser.py
deleted file mode 100644
index b69c9b8..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_ControllerUser.py
+++ /dev/null
@@ -1,16 +0,0 @@
-def handle(controller_user):
- from core.models import ControllerUser, User
-
- try:
- my_status_code = int(controller_user.backend_status[0])
- try:
- his_status_code = int(controller_user.user.backend_status[0])
- except:
- his_status_code = 0
-
- if (my_status_code not in [0,his_status_code]):
- controller_user.user.backend_status = controller_user.backend_status
- controller_user.user.save(update_fields = ['backend_status'])
- except Exception,e:
- print str(e)
- pass
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_Image.py b/xos/synchronizers/openstack/model_policies/model_policy_Image.py
deleted file mode 100644
index c77d5bb..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_Image.py
+++ /dev/null
@@ -1,17 +0,0 @@
-def handle(image):
- from core.models import Controller, ControllerImages, Image
- from collections import defaultdict
-
- if (image.kind == "container"):
- # container images do not get instantiated
- return
-
- controller_images = ControllerImages.objects.filter(image=image)
- existing_controllers = [cs.controller for cs in controller_images]
-
- all_controllers = Controller.objects.all()
- for controller in all_controllers:
- if controller not in existing_controllers:
- sd = ControllerImages(image=image, controller=controller)
- sd.save()
-
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_Instance.py b/xos/synchronizers/openstack/model_policies/model_policy_Instance.py
deleted file mode 100644
index dd1a8d5..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_Instance.py
+++ /dev/null
@@ -1,58 +0,0 @@
-def handle_container_on_metal(instance):
- from core.models import Instance, Flavor, Port, Image
-
- print "MODEL POLICY: instance", instance, "handle container_on_metal"
-
- if instance.deleted:
- return
-
- if (instance.isolation in ["container"]) and (instance.slice.network not in ["host", "bridged"]):
- # Our current docker-on-metal network strategy requires that there be some
- # VM on the server that connects to the networks, so that
- # the containers can piggyback off of that configuration.
- if not Instance.objects.filter(slice=instance.slice, node=instance.node, isolation="vm").exists():
- flavors = Flavor.objects.filter(name="m1.small")
- if not flavors:
- raise XOSConfigurationError("No m1.small flavor")
-
- images = Image.objects.filter(kind="vm")
-
- companion_instance = Instance(slice = instance.slice,
- node = instance.node,
- image = images[0],
- creator = instance.creator,
- deployment = instance.node.site_deployment.deployment,
- flavor = flavors[0])
- companion_instance.save()
-
- print "MODEL POLICY: instance", instance, "created companion", companion_instance
-
- # Add the ports for the container
- for network in instance.slice.networks.all():
- # hmmm... The NAT ports never become ready, because sync_ports never
- # instantiates them. Need to think about this.
- print "MODEL POLICY: instance", instance, "handling network", network
- if (network.name.endswith("-nat")):
- continue
-
- if not Port.objects.filter(network=network, instance=instance).exists():
- port = Port(network = network, instance=instance)
- port.save()
- print "MODEL POLICY: instance", instance, "created port", port
-
-def handle(instance):
- from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
-
- networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
- controller_networks = ControllerNetwork.objects.filter(network__in=networks,
- controller=instance.node.site_deployment.controller)
-
- for cn in controller_networks:
- if (cn.lazy_blocked):
- print "MODEL POLICY: instance", instance, "unblocking network", cn.network
- cn.lazy_blocked=False
- cn.backend_register = '{}'
- cn.save()
-
- if (instance.isolation in ["container", "container_vm"]):
- handle_container_on_metal(instance)
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_Network.py b/xos/synchronizers/openstack/model_policies/model_policy_Network.py
deleted file mode 100644
index 06347c5..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_Network.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from core.models import *
-
-def handle(network):
- from core.models import ControllerSlice,ControllerNetwork, Network
- from collections import defaultdict
-
- print "MODEL POLICY: network", network
-
- # network = Network.get(network_id)
- # network controllers are not visible to users. We must ensure
- # networks are deployed at all deploymets available to their slices.
- slice_controllers = ControllerSlice.objects.all()
- slice_deploy_lookup = defaultdict(list)
- for slice_controller in slice_controllers:
- slice_deploy_lookup[slice_controller.slice].append(slice_controller.controller)
-
- network_controllers = ControllerNetwork.objects.all()
- network_deploy_lookup = defaultdict(list)
- for network_controller in network_controllers:
- network_deploy_lookup[network_controller.network].append(network_controller.controller)
-
- expected_controllers = slice_deploy_lookup[network.owner]
- for expected_controller in expected_controllers:
- if network not in network_deploy_lookup or \
- expected_controller not in network_deploy_lookup[network]:
- lazy_blocked=True
-
- # check and see if some instance already exists
- for networkslice in network.networkslices.all():
- if networkslice.slice.instances.filter(node__site_deployment__controller=expected_controller).exists():
- print "MODEL_POLICY: network, setting lazy_blocked to false because instance on controller already exists"
- lazy_blocked=False
-
- nd = ControllerNetwork(network=network, controller=expected_controller, lazy_blocked=lazy_blocked)
- print "MODEL POLICY: network", network, "create ControllerNetwork", nd, "lazy_blocked", lazy_blocked
- if network.subnet:
- # XXX: Possibly unpredictable behavior if there is
- # more than one ControllerNetwork and the subnet
- # is specified.
- nd.subnet = network.subnet
- nd.save()
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_Site.py b/xos/synchronizers/openstack/model_policies/model_policy_Site.py
deleted file mode 100644
index 23010a2..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_Site.py
+++ /dev/null
@@ -1,14 +0,0 @@
-
-def handle(site):
- from core.models import Controller, ControllerSite, Site
-
- # site = Site.get(site_id)
- # make sure site has a ControllerSite record for each controller
- ctrl_sites = ControllerSite.objects.filter(site=site)
- existing_controllers = [cs.controller for cs in ctrl_sites]
-
- all_controllers = Controller.objects.all()
- for ctrl in all_controllers:
- if ctrl not in existing_controllers:
- ctrl_site = ControllerSite(controller=ctrl, site=site)
- ctrl_site.save()
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_SitePrivilege.py b/xos/synchronizers/openstack/model_policies/model_policy_SitePrivilege.py
deleted file mode 100644
index d9c6a1e..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_SitePrivilege.py
+++ /dev/null
@@ -1,15 +0,0 @@
-def handle(site_privilege):
- from core.models import Controller, SitePrivilege, ControllerSitePrivilege
-
- # site_privilege = SitePrivilege.get(site_privilege_id)
- # apply site privilage at all controllers
- controller_site_privileges = ControllerSitePrivilege.objects.filter(
- site_privilege = site_privilege,
- )
- existing_controllers = [sp.controller for sp in controller_site_privileges]
- all_controllers = Controller.objects.all()
- for controller in all_controllers:
- if controller not in existing_controllers:
- ctrl_site_priv = ControllerSitePrivilege(controller=controller, site_privilege=site_privilege)
- ctrl_site_priv.save()
-
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_Slice.py b/xos/synchronizers/openstack/model_policies/model_policy_Slice.py
deleted file mode 100644
index 088d583..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_Slice.py
+++ /dev/null
@@ -1,102 +0,0 @@
-from xos.config import Config
-
-def handle_delete(slice):
- from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
- from collections import defaultdict
-
- public_nets = []
- private_net = None
- networks = Network.objects.filter(owner=slice)
-
- for n in networks:
- n.delete()
-
- # Note that sliceprivileges and slicecontrollers are autodeleted, through the dependency graph
-
-def handle(slice):
- from core.models import Controller, ControllerSlice, SiteDeployment, Network, NetworkSlice,NetworkTemplate, Slice
- from collections import defaultdict
-
- # only create nat_net if not using VTN
- support_nat_net = not getattr(Config(), "networking_use_vtn", False)
-
- print "MODEL POLICY: slice", slice
-
- # slice = Slice.get(slice_id)
-
- controller_slices = ControllerSlice.objects.filter(slice=slice)
- existing_controllers = [cs.controller for cs in controller_slices]
-
- print "MODEL POLICY: slice existing_controllers=", existing_controllers
-
- all_controllers = Controller.objects.all()
- for controller in all_controllers:
- if controller not in existing_controllers:
- print "MODEL POLICY: slice adding controller", controller
- sd = ControllerSlice(slice=slice, controller=controller)
- sd.save()
-
- if slice.network in ["host", "bridged"]:
- # Host and Bridged docker containers need no networks and they will
- # only get in the way.
- print "MODEL POLICY: Skipping network creation"
- elif slice.network in ["noauto"]:
- # do nothing
- pass
- else:
- # make sure slice has at least 1 public and 1 private networkd
- public_nets = []
- private_nets = []
- networks = Network.objects.filter(owner=slice)
- for network in networks:
- if not network.autoconnect:
- continue
- if network.template.name == 'Public dedicated IPv4':
- public_nets.append(network)
- elif network.template.name == 'Public shared IPv4':
- public_nets.append(network)
- elif network.template.name == 'Private':
- private_nets.append(network)
- if support_nat_net and (not public_nets):
- # ensure there is at least one public network, and default it to dedicated
- nat_net = Network(
- name = slice.name+'-nat',
- template = NetworkTemplate.objects.get(name='Public shared IPv4'),
- owner = slice
- )
- if slice.exposed_ports:
- nat_net.ports = slice.exposed_ports
- nat_net.save()
- public_nets.append(nat_net)
- print "MODEL POLICY: slice", slice, "made nat-net"
-
- if not private_nets:
- private_net = Network(
- name = slice.name+'-private',
- template = NetworkTemplate.objects.get(name='Private'),
- owner = slice
- )
- private_net.save()
- print "MODEL POLICY: slice", slice, "made private net"
- private_nets = [private_net]
- # create slice networks
- public_net_slice = None
- private_net_slice = None
- net_slices = NetworkSlice.objects.filter(slice=slice, network__in=private_nets+public_nets)
- for net_slice in net_slices:
- if net_slice.network in public_nets:
- public_net_slice = net_slice
- elif net_slice.network in private_nets:
- private_net_slice = net_slice
- if support_nat_net and (not public_net_slice):
- public_net_slice = NetworkSlice(slice=slice, network=public_nets[0])
- public_net_slice.save()
- print "MODEL POLICY: slice", slice, "made public_net_slice"
- if not private_net_slice:
- private_net_slice = NetworkSlice(slice=slice, network=private_nets[0])
- private_net_slice.save()
- print "MODEL POLICY: slice", slice, "made private_net_slice"
-
- print "MODEL POLICY: slice", slice, "DONE"
-
-
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_SlicePrivilege.py b/xos/synchronizers/openstack/model_policies/model_policy_SlicePrivilege.py
deleted file mode 100644
index bca7f22..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_SlicePrivilege.py
+++ /dev/null
@@ -1,15 +0,0 @@
-def handle(slice_privilege):
- from core.models import Controller, SlicePrivilege, ControllerSlicePrivilege
-
- # slice_privilege = SlicePrivilege.get(slice_privilege_id)
- # apply slice privilage at all controllers
- controller_slice_privileges = ControllerSlicePrivilege.objects.filter(
- slice_privilege = slice_privilege,
- )
- existing_controllers = [sp.controller for sp in controller_slice_privileges]
- all_controllers = Controller.objects.all()
- for controller in all_controllers:
- if controller not in existing_controllers:
- ctrl_slice_priv = ControllerSlicePrivilege(controller=controller, slice_privilege=slice_privilege)
- ctrl_slice_priv.save()
-
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_Sliver.py b/xos/synchronizers/openstack/model_policies/model_policy_Sliver.py
deleted file mode 100644
index a13428d..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_Sliver.py
+++ /dev/null
@@ -1,13 +0,0 @@
-
-def handle(instance):
- from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
-
- networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice)]
- controller_networks = ControllerNetwork.objects.filter(network__in=networks,
- controller=instance.node.site_deployment.controller)
-
- for cn in controller_networks:
- if (cn.lazy_blocked):
- cn.lazy_blocked=False
- cn.backend_register = '{}'
- cn.save()
diff --git a/xos/synchronizers/openstack/model_policies/model_policy_User.py b/xos/synchronizers/openstack/model_policies/model_policy_User.py
deleted file mode 100644
index 8d14244..0000000
--- a/xos/synchronizers/openstack/model_policies/model_policy_User.py
+++ /dev/null
@@ -1,14 +0,0 @@
-def handle(user):
- from core.models import Controller, ControllerSite, ControllerUser, User
- from collections import defaultdict
-
- # user = User.get(user_id)
-
- controller_users = ControllerUser.objects.filter(user=user)
- existing_controllers = [cu.controller for cu in controller_users]
- all_controllers = Controller.objects.all()
- for controller in all_controllers:
- if controller not in existing_controllers:
- ctrl_user = ControllerUser(controller=controller, user=user)
- ctrl_user.save()
-
diff --git a/xos/synchronizers/openstack/openstacksyncstep.py b/xos/synchronizers/openstack/openstacksyncstep.py
deleted file mode 100644
index 46056cf..0000000
--- a/xos/synchronizers/openstack/openstacksyncstep.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import os
-import base64
-from synchronizers.base.syncstep import SyncStep
-
-class OpenStackSyncStep(SyncStep):
- """ XOS Sync step for copying data to OpenStack
- """
-
- def __init__(self, **args):
- SyncStep.__init__(self, **args)
- return
-
- def __call__(self, **args):
- return self.call(**args)
diff --git a/xos/synchronizers/openstack/steps/__init__.py b/xos/synchronizers/openstack/steps/__init__.py
deleted file mode 100644
index c70b0c0..0000000
--- a/xos/synchronizers/openstack/steps/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-#from .sync_controller_sites import SyncControllerSites
-#from .sync_controller_slices import SyncControllerSlices
-#from .sync_controller_users import SyncControllerUsers
-#from .sync_controller_site_privileges import SyncControllerSitePrivileges
-#from .sync_controller_slice_privileges import SyncControllerSlicePrivileges
-#from .sync_controller_networks import SyncControllerNetworks
diff --git a/xos/synchronizers/openstack/steps/delete_slivers.yaml b/xos/synchronizers/openstack/steps/delete_slivers.yaml
deleted file mode 100644
index fa6b879..0000000
--- a/xos/synchronizers/openstack/steps/delete_slivers.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
-
- - nova_compute:
- state: absent
- name: {{ name }}
diff --git a/xos/synchronizers/openstack/steps/purge_disabled_users.py b/xos/synchronizers/openstack/steps/purge_disabled_users.py
deleted file mode 100644
index 6b1dac3..0000000
--- a/xos/synchronizers/openstack/steps/purge_disabled_users.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import os
-import base64
-import datetime
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models.user import User
-from xos.logger import observer_logger as logger
-
-#class SyncRoles(OpenStackSyncStep):
-# provides=[User]
-# requested_interval=0
-# observes=User
-#
-# def fetch_pending(self, deleted):
-# if (deleted):
-# # users marked as deleted
-# return User.deleted_objects.all()
-# else:
-# # disabled users that haven't been updated in over a week
-# one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
-# return User.objects.filter(is_active=False, updated__gt=one_week_ago)
-#
-# def sync_record(self, user):
-# user.delete()
diff --git a/xos/synchronizers/openstack/steps/sliver.yaml b/xos/synchronizers/openstack/steps/sliver.yaml
deleted file mode 100644
index e630415..0000000
--- a/xos/synchronizers/openstack/steps/sliver.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - nova_compute:
- state: present
- auth_url: http://172.31.38.128:5000/v2.0/
- login_username: admin
- login_password: 6a789bf69dd647e2
- login_tenant_name: admin
- name: gloopy
- image_id: 3ee851df-b35a-41c5-8551-f681e7209095
- key_name: boo
- wait_for: 200
- flavor_id: 3
- nics:
- - net-id: d1de537b-80dc-4c1b-aa5f-4a197b33b5f6
diff --git a/xos/synchronizers/openstack/steps/sync_container.py b/xos/synchronizers/openstack/steps/sync_container.py
deleted file mode 100644
index 41e1305..0000000
--- a/xos/synchronizers/openstack/steps/sync_container.py
+++ /dev/null
@@ -1,162 +0,0 @@
-import hashlib
-import os
-import socket
-import sys
-import base64
-import time
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
-from synchronizers.base.syncstep import SyncStep, DeferredException
-from synchronizers.base.ansible import run_template_ssh
-from core.models import Service, Slice, Instance
-from xos.logger import Logger, logging
-
-# hpclibrary will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-logger = Logger(level=logging.INFO)
-
-class SyncContainer(SyncInstanceUsingAnsible):
- provides=[Instance]
- observes=Instance
- requested_interval=0
- template_name = "sync_container.yaml"
-
- def __init__(self, *args, **kwargs):
- super(SyncContainer, self).__init__(*args, **kwargs)
-
- def fetch_pending(self, deletion=False):
- objs = super(SyncContainer, self).fetch_pending(deletion)
- objs = [x for x in objs if x.isolation in ["container", "container_vm"]]
- return objs
-
- def get_instance_port(self, container_port):
- for p in container_port.network.links.all():
- if (p.instance) and (p.instance.isolation=="vm") and (p.instance.node == container_port.instance.node) and (p.mac):
- return p
- return None
-
- def get_parent_port_mac(self, instance, port):
- if not instance.parent:
- raise Exception("instance has no parent")
- for parent_port in instance.parent.ports.all():
- if parent_port.network == port.network:
- if not parent_port.mac:
- raise DeferredException("parent port on network %s does not have mac yet" % parent_port.network.name)
- return parent_port.mac
- raise Exception("failed to find corresponding parent port for network %s" % port.network.name)
-
- def get_ports(self, o):
- i=0
- ports = []
- if (o.slice.network in ["host", "bridged"]):
- pass # no ports in host or bridged mode
- else:
- for port in o.ports.all():
- if (not port.ip):
- # 'unmanaged' ports may have an ip, but no mac
- # XXX: are there any ports that have a mac but no ip?
- raise DeferredException("Port on network %s is not yet ready" % port.network.name)
-
- pd={}
- pd["mac"] = port.mac or ""
- pd["ip"] = port.ip or ""
- pd["xos_network_id"] = port.network.id
-
- if port.network.name == "wan_network":
- if port.ip:
- (a, b, c, d) = port.ip.split('.')
- pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
-
-
- if o.isolation == "container":
- # container on bare metal
- instance_port = self.get_instance_port(port)
- if not instance_port:
- raise DeferredException("No instance on slice for port on network %s" % port.network.name)
-
- pd["snoop_instance_mac"] = instance_port.mac
- pd["snoop_instance_id"] = instance_port.instance.instance_id
- pd["src_device"] = ""
- pd["bridge"] = "br-int"
- else:
- # container in VM
- pd["snoop_instance_mac"] = ""
- pd["snoop_instance_id"] = ""
- pd["parent_mac"] = self.get_parent_port_mac(o, port)
- pd["bridge"] = ""
-
- for (k,v) in port.get_parameters().items():
- pd[k] = v
-
- ports.append(pd)
-
- # for any ports that don't have a device, assign one
- used_ports = [x["device"] for x in ports if ("device" in x)]
- avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)]
- for port in ports:
- if not port.get("device",None):
- port["device"] = avail_ports.pop(0)
-
- return ports
-
- def get_extra_attributes(self, o):
- fields={}
- fields["ansible_tag"] = "container-%s" % str(o.id)
- if o.image.tag:
- fields["docker_image"] = o.image.path + ":" + o.image.tag
- else:
- fields["docker_image"] = o.image.path
- fields["ports"] = self.get_ports(o)
- if o.volumes:
- fields["volumes"] = [x.strip() for x in o.volumes.split(",")]
- else:
- fields["volumes"] = ""
- fields["network_method"] = o.slice.network or "default"
- return fields
-
- def sync_record(self, o):
- logger.info("sync'ing object %s" % str(o),extra=o.tologdict())
-
- fields = self.get_ansible_fields(o)
-
- # If 'o' defines a 'sync_attributes' list, then we'll copy those
- # attributes into the Ansible recipe's field list automatically.
- if hasattr(o, "sync_attributes"):
- for attribute_name in o.sync_attributes:
- fields[attribute_name] = getattr(o, attribute_name)
-
- fields.update(self.get_extra_attributes(o))
-
- self.run_playbook(o, fields)
-
- o.instance_id = fields["container_name"]
- o.instance_name = fields["container_name"]
-
- o.save()
-
- def delete_record(self, o):
- logger.info("delete'ing object %s" % str(o),extra=o.tologdict())
-
- fields = self.get_ansible_fields(o)
-
- # If 'o' defines a 'sync_attributes' list, then we'll copy those
- # attributes into the Ansible recipe's field list automatically.
- if hasattr(o, "sync_attributes"):
- for attribute_name in o.sync_attributes:
- fields[attribute_name] = getattr(o, attribute_name)
-
- fields.update(self.get_extra_attributes(o))
-
- self.run_playbook(o, fields, "teardown_container.yaml")
-
- def run_playbook(self, o, fields, template_name=None):
- if not template_name:
- template_name = self.template_name
- tStart = time.time()
- run_template_ssh(template_name, fields, path="container")
- logger.info("playbook execution time %d" % int(time.time()-tStart),extra=o.tologdict())
-
-
diff --git a/xos/synchronizers/openstack/steps/sync_container.yaml b/xos/synchronizers/openstack/steps/sync_container.yaml
deleted file mode 100644
index 4ae4eb2..0000000
--- a/xos/synchronizers/openstack/steps/sync_container.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: {{ username }}
- sudo: yes
-
- vars:
- container_name: {{ container_name }}
- docker_image: {{ docker_image }}
- network_method: {{ network_method }}
- ports:
- {% for port in ports %}
- - device: {{ port.device }}
- xos_network_id: {{ port.xos_network_id }}
- mac: {{ port.mac|default("") }}
- ip: {{ port.ip }}
- snoop_instance_mac: {{ port.snoop_instance_mac }}
- snoop_instance_id: {{ port.snoop_instance_id }}
- parent_mac: {{ port.parent_mac|default("") }}
- s_tag: {{ port.s_tag|default("") }}
- c_tag: {{ port.c_tag|default("") }}
- next_hop: {{ port.next_hop|default("") }}
- bridge: {{ port.bridge }}
- {% endfor %}
- volumes:
- {% for volume in volumes %}
- - {{ volume }}
- {% endfor %}
-
- tasks:
-
-# - name: Fix /etc/hosts
-# lineinfile:
-# dest=/etc/hosts
-# regexp="127.0.0.1 localhost"
-# line="127.0.0.1 localhost {{ instance_hostname }}"
-
- - name: Add repo key
- apt_key:
- keyserver=hkp://pgp.mit.edu:80
- id=58118E89F3A912897C070ADBF76221572C52609D
-
- - name: Install Docker repo
- apt_repository:
- repo="deb https://apt.dockerproject.org/repo ubuntu-trusty main"
- state=present
-
- - name: Install Docker
- apt:
- name={{ '{{' }} item {{ '}}' }}
- state=latest
- update_cache=yes
- with_items:
-# XXX docker 1.10 is not working on cloudlab
-# - docker-engine
- - python-pip
- - python-httplib2
-
- - name: Install Docker 1.9.1
- apt:
- name={{ '{{' }} item {{ '}}' }}
- update_cache=yes
- with_items:
- - docker-engine=1.9.1-0~trusty
-
- # Something is installing a requests library that is incompative with pip, and
- # will cause this recipe to fail next time it tries to run pip. Only the one
- # in /usr/local/lib is bad. There's still a good one in /usr/lib
- - name: check if bad requests library installed
- stat: path=/usr/local/lib/python2.7/dist-packages/requests
- register: bad_requests
-
- - name: remove bad request library
- shell: mv /usr/local/lib/python2.7/dist-packages/requests /usr/local/lib/python2.7/dist-packages/requests-bad
- when: bad_requests.stat.exists == True
-
- - name: Install docker-py
- pip:
- name=docker-py
- state=latest
-
- - name: install Pipework
- get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
- dest=/usr/local/bin/pipework
- mode=0755
-
-# - name: Start Container
-# docker:
-# docker_api_version: "1.18"
-# name: {{ container_name }}
-# # was: reloaded
-# state: running
-# image: {{ docker_image }}
-
- - name: check if systemd is installed
- stat: path=/usr/bin/systemctl
- register: systemctl
-
- - name: container upstart
- template: src=/opt/xos/synchronizers/openstack/templates/container.conf.j2 dest=/etc/init/container-{{ container_name }}.conf
-
- - name: container systemd
- template: src=/opt/xos/synchronizers/openstack/templates/container.service.j2 dest=/lib/systemd/system/container-{{ container_name }}.service
-
- - name: container startup script
- template: src=/opt/xos/synchronizers/openstack/templates/start-container.sh.j2 dest=/usr/local/sbin/start-container-{{ container_name }}.sh mode=0755
-
- - name: container teardown script
- template: src=/opt/xos/synchronizers/openstack/templates/stop-container.sh.j2 dest=/usr/local/sbin/stop-container-{{ container_name }}.sh mode=0755
-
- - name: restart systemd
- shell: systemctl daemon-reload
- when: systemctl.stat.exists == True
-
-{% if ports %}
- - name: make sure bridges are setup
- shell: ifconfig {{ '{{' }} item.bridge {{ '}}' }}
- with_items: "ports"
-{% endif %}
-
- - name: Make sure container is running
- service: name=container-{{ container_name }} state=started
-
diff --git a/xos/synchronizers/openstack/steps/sync_controller_images.py b/xos/synchronizers/openstack/steps/sync_controller_images.py
deleted file mode 100644
index c1e5136..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_images.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models import Controller
-from core.models import Image, ControllerImages
-from xos.logger import observer_logger as logger
-from synchronizers.base.ansible import *
-import json
-
-class SyncControllerImages(OpenStackSyncStep):
- provides=[ControllerImages]
- observes = ControllerImages
- requested_interval=0
- playbook='sync_controller_images.yaml'
-
- def fetch_pending(self, deleted):
- if (deleted):
- return []
-
- # now we return all images that need to be enacted
- return ControllerImages.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-
- def map_sync_inputs(self, controller_image):
- image_fields = {'endpoint':controller_image.controller.auth_url,
- 'endpoint_v3': controller_image.controller.auth_url_v3,
- 'admin_user':controller_image.controller.admin_user,
- 'admin_password':controller_image.controller.admin_password,
- 'domain': controller_image.controller.domain,
- 'name':controller_image.image.name,
- 'filepath':controller_image.image.path,
- 'ansible_tag': '%s@%s'%(controller_image.image.name,controller_image.controller.name), # name of ansible playbook
- }
-
- return image_fields
-
- def map_sync_outputs(self, controller_image, res):
- image_id = res[0]['id']
- controller_image.glance_image_id = image_id
- controller_image.backend_status = '1 - OK'
- controller_image.save()
diff --git a/xos/synchronizers/openstack/steps/sync_controller_images.yaml b/xos/synchronizers/openstack/steps/sync_controller_images.yaml
deleted file mode 100644
index 6247a30..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_images.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - glance_image:
- auth_url={{ endpoint }}
- login_username="{{ admin_user }}"
- login_tenant_name="admin"
- login_password="{{ admin_password }}"
- name="{{ name }}"
- file="{{ filepath }}"
- disk_format='raw'
- is_public=true
diff --git a/xos/synchronizers/openstack/steps/sync_controller_networks.py b/xos/synchronizers/openstack/steps/sync_controller_networks.py
deleted file mode 100644
index b61ef7b..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_networks.py
+++ /dev/null
@@ -1,163 +0,0 @@
-import os
-import base64
-import struct
-import socket
-from collections import defaultdict
-from netaddr import IPAddress, IPNetwork
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models.network import *
-from core.models.slice import *
-from core.models.instance import Instance
-from xos.logger import observer_logger as logger
-from synchronizers.base.ansible import *
-from openstack_xos.driver import OpenStackDriver
-from xos.config import Config
-import json
-
-import pdb
-
-class SyncControllerNetworks(OpenStackSyncStep):
- requested_interval = 0
- provides=[Network]
- observes=ControllerNetwork
- playbook='sync_controller_networks.yaml'
-
- def alloc_subnet(self, uuid):
- # 16 bits only
- uuid_masked = uuid & 0xffff
- a = 10
- b = uuid_masked >> 8
- c = uuid_masked & 0xff
- d = 0
-
- cidr = '%d.%d.%d.%d/24'%(a,b,c,d)
- return cidr
-
- def alloc_gateway(self, subnet):
- # given a CIDR, allocate a default gateway using the .1 address within
- # the subnet.
- # 10.123.0.0/24 --> 10.123.0.1
- # 207.141.192.128/28 --> 207.141.192.129
- (network, bits) = subnet.split("/")
- network=network.strip()
- bits=int(bits.strip())
- netmask = (~(pow(2,32-bits)-1) & 0xFFFFFFFF)
- ip = struct.unpack("!L", socket.inet_aton(network))[0]
- ip = ip & netmask | 1
- return socket.inet_ntoa(struct.pack("!L", ip))
-
- def save_controller_network(self, controller_network):
- network_name = controller_network.network.name
- subnet_name = '%s-%d'%(network_name,controller_network.pk)
- if controller_network.subnet and controller_network.subnet.strip():
- # If a subnet is already specified (pass in by the creator), then
- # use that rather than auto-generating one.
- cidr = controller_network.subnet.strip()
- print "CIDR_MS", cidr
- else:
- cidr = self.alloc_subnet(controller_network.pk)
- print "CIDR_AMS", cidr
-
- if controller_network.network.start_ip and controller_network.network.start_ip.strip():
- start_ip = controller_network.network.start_ip.strip()
- else:
- start_ip = None
-
- if controller_network.network.end_ip and controller_network.network.end_ip.strip():
- end_ip = controller_network.network.end_ip.strip()
- else:
- end_ip = None
-
- self.cidr=cidr
- slice = controller_network.network.owner
-
- network_fields = {'endpoint':controller_network.controller.auth_url,
- 'endpoint_v3': controller_network.controller.auth_url_v3,
- 'admin_user':slice.creator.email,
- 'admin_password':slice.creator.remote_password,
- 'admin_project':slice.name,
- 'domain': controller_network.controller.domain,
- 'name':network_name,
- 'subnet_name':subnet_name,
- 'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
- 'cidr':cidr,
- 'gateway':self.alloc_gateway(cidr),
- 'start_ip':start_ip,
- 'end_ip':end_ip,
- 'use_vtn':getattr(Config(), "networking_use_vtn", False),
- 'delete':False
- }
- return network_fields
-
- def map_sync_outputs(self, controller_network,res):
- network_id = res[0]['network']['id']
- subnet_id = res[1]['subnet']['id']
- controller_network.net_id = network_id
- controller_network.subnet = self.cidr
- controller_network.subnet_id = subnet_id
- controller_network.backend_status = '1 - OK'
- controller_network.save()
-
-
- def map_sync_inputs(self, controller_network):
- # XXX This check should really be made from booleans, rather than using hardcoded network names
- #if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct', 'management_template'):
- # logger.info("skipping network controller %s because it is not private" % controller_network)
- # # We only sync private networks
- # return SyncStep.SYNC_WITHOUT_RUNNING
-
- # hopefully a better approach than above
- if (controller_network.network.template.shared_network_name or controller_network.network.template.shared_network_id):
- return SyncStep.SYNC_WITHOUT_RUNNING
-
- if not controller_network.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
- return
-
- if controller_network.network.owner and controller_network.network.owner.creator:
- return self.save_controller_network(controller_network)
- else:
- raise Exception('Could not save network controller %s'%controller_network)
-
- def map_delete_inputs(self, controller_network):
- # XXX This check should really be made from booleans, rather than using hardcoded network names
- if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct']):
- # We only sync private networks
- return
- try:
- slice = controller_network.network.owner # XXX: FIXME!!
- except:
- raise Exception('Could not get slice for Network %s'%controller_network.network.name)
-
- network_name = controller_network.network.name
- subnet_name = '%s-%d'%(network_name,controller_network.pk)
- cidr = controller_network.subnet
- network_fields = {'endpoint':controller_network.controller.auth_url,
- 'admin_user':slice.creator.email, # XXX: FIXME
- 'tenant_name':slice.name, # XXX: FIXME
- 'admin_password':slice.creator.remote_password,
- 'name':network_name,
- 'subnet_name':subnet_name,
- 'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
- 'cidr':cidr,
- 'delete':True
- }
-
- return network_fields
-
- """
- driver = OpenStackDriver().client_driver(caller=controller_network.network.owner.creator,
- tenant=controller_network.network.owner.name,
- controller=controller_network.controller.name)
- if (controller_network.router_id) and (controller_network.subnet_id):
- driver.delete_router_interface(controller_network.router_id, controller_network.subnet_id)
- if controller_network.subnet_id:
- driver.delete_subnet(controller_network.subnet_id)
- if controller_network.router_id:
- driver.delete_router(controller_network.router_id)
- if controller_network.net_id:
- driver.delete_network(controller_network.net_id)
- """
diff --git a/xos/synchronizers/openstack/steps/sync_controller_networks.yaml b/xos/synchronizers/openstack/steps/sync_controller_networks.yaml
deleted file mode 100644
index 7b6075c..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_networks.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - os_network:
- name: {{ name }}
- shared: true
- {% if not delete -%}
- state: present
- {% else -%}
- state: absent
- {% endif -%}
- auth:
- auth_url: {{ endpoint }}
- username: {{ admin_user }}
- password: {{ admin_password }}
- project_name: {{ admin_project }}
-
-{% if not delete %}
- - os_subnet:
- name: {{ subnet_name }}
- network_name: {{ name }}
- {% if not delete -%}
- state: present
- cidr: {{ cidr }}
- dns_nameservers: 8.8.8.8
- {% if use_vtn -%}
- gateway_ip: {{ gateway }}
- {% endif -%}
- {% if start_ip -%}
- allocation_pool_start: {{ start_ip }}
- {% endif -%}
- {% if end_ip -%}
- allocation_pool_end: {{ end_ip }}
- {% endif -%}
- {% else -%}
- state: absent
- {% endif -%}
- auth:
- auth_url: {{ endpoint }}
- username: {{ admin_user }}
- password: {{ admin_password }}
- project_name: {{ admin_project }}
-
-{% if not use_vtn -%}
-# until we get 'no-gateway-ip' arg to os_subnet, in Ansible 2.2
-# https://github.com/ansible/ansible-modules-core/pull/3736
- - command:
- neutron \
- --os-auth-url {{ endpoint }} \
- --os-username {{ admin_user }} \
- --os-password {{ admin_password }} \
- --os-tenant-name {{ admin_project }} \
- subnet-update --no-gateway {{ subnet_name }}
-{% endif -%}
-
-{% endif %}
-
diff --git a/xos/synchronizers/openstack/steps/sync_controller_site_privileges.py b/xos/synchronizers/openstack/steps/sync_controller_site_privileges.py
deleted file mode 100644
index 59919fe..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_site_privileges.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models.site import Controller, SitePrivilege
-from core.models.user import User
-from core.models.controlleruser import ControllerUser, ControllerSitePrivilege
-from xos.logger import observer_logger as logger
-from synchronizers.base.ansible import *
-import json
-
-class SyncControllerSitePrivileges(OpenStackSyncStep):
- provides=[SitePrivilege]
- requested_interval=0
- observes=ControllerSitePrivilege
- playbook='sync_controller_users.yaml'
-
- def map_sync_inputs(self, controller_site_privilege):
- controller_register = json.loads(controller_site_privilege.controller.backend_register)
- if not controller_site_privilege.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
- return
-
- roles = [controller_site_privilege.site_privilege.role.role]
- # setup user home site roles at controller
- if not controller_site_privilege.site_privilege.user.site:
- raise Exception('Siteless user %s'%controller_site_privilege.site_privilege.user.email)
- else:
- # look up tenant id for the user's site at the controller
- #ctrl_site_deployments = SiteDeployment.objects.filter(
- # site_deployment__site=controller_site_privilege.user.site,
- # controller=controller_site_privilege.controller)
-
- #if ctrl_site_deployments:
- # # need the correct tenant id for site at the controller
- # tenant_id = ctrl_site_deployments[0].tenant_id
- # tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
- user_fields = {
- 'endpoint':controller_site_privilege.controller.auth_url,
- 'endpoint_v3': controller_site_privilege.controller.auth_url_v3,
- 'domain': controller_site_privilege.controller.domain,
- 'name': controller_site_privilege.site_privilege.user.email,
- 'email': controller_site_privilege.site_privilege.user.email,
- 'password': controller_site_privilege.site_privilege.user.remote_password,
- 'admin_user': controller_site_privilege.controller.admin_user,
- 'admin_password': controller_site_privilege.controller.admin_password,
- 'ansible_tag':'%s@%s'%(controller_site_privilege.site_privilege.user.email.replace('@','-at-'),controller_site_privilege.controller.name),
- 'admin_tenant': controller_site_privilege.controller.admin_tenant,
- 'roles':roles,
- 'tenant':controller_site_privilege.site_privilege.site.login_base}
-
- return user_fields
-
- def map_sync_outputs(self, controller_site_privilege, res):
- # results is an array in which each element corresponds to an
- # "ok" string received per operation. If we get as many oks as
- # the number of operations we issued, that means a grand success.
- # Otherwise, the number of oks tell us which operation failed.
- controller_site_privilege.role_id = res[0]['id']
- controller_site_privilege.save()
-
- def delete_record(self, controller_site_privilege):
- controller_register = json.loads(controller_site_privilege.controller.backend_register)
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%controller_site_privilege.controller.name)
-
- if controller_site_privilege.role_id:
- driver = self.driver.admin_driver(controller=controller_site_privilege.controller)
- user = ControllerUser.objects.get(
- controller=controller_site_privilege.controller,
- user=controller_site_privilege.site_privilege.user
- )
- site = ControllerSite.objects.get(
- controller=controller_site_privilege.controller,
- user=controller_site_privilege.site_privilege.user
- )
- driver.delete_user_role(
- user.kuser_id,
- site.tenant_id,
- controller_site_privilege.site_prvilege.role.role
- )
diff --git a/xos/synchronizers/openstack/steps/sync_controller_sites.py b/xos/synchronizers/openstack/steps/sync_controller_sites.py
deleted file mode 100644
index 1b3c2ba..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_sites.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import os
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.openstack.openstacksyncstep import OpenStackSyncStep
-from core.models.site import *
-from synchronizers.base.syncstep import *
-from synchronizers.base.ansible import *
-from xos.logger import observer_logger as logger
-import json
-
-class SyncControllerSites(OpenStackSyncStep):
- requested_interval=0
- provides=[Site]
- observes=ControllerSite
- playbook = 'sync_controller_sites.yaml'
-
- def fetch_pending(self, deleted=False):
- lobjs = ControllerSite.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False),Q(controller__isnull=False))
- return lobjs
-
- def map_sync_inputs(self, controller_site):
- tenant_fields = {'endpoint':controller_site.controller.auth_url,
- 'endpoint_v3': controller_site.controller.auth_url_v3,
- 'domain': controller_site.controller.domain,
- 'admin_user': controller_site.controller.admin_user,
- 'admin_password': controller_site.controller.admin_password,
- 'admin_tenant': controller_site.controller.admin_tenant,
- 'ansible_tag': '%s@%s'%(controller_site.site.login_base,controller_site.controller.name), # name of ansible playbook
- 'tenant': controller_site.site.login_base,
- 'tenant_description': controller_site.site.name}
- return tenant_fields
-
- def map_sync_outputs(self, controller_site, res):
- controller_site.tenant_id = res[0]['id']
- controller_site.backend_status = '1 - OK'
- controller_site.save()
-
- def delete_record(self, controller_site):
- controller_register = json.loads(controller_site.controller.backend_register)
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%controller_site.controller.name)
-
- if controller_site.tenant_id:
- driver = self.driver.admin_driver(controller=controller_site.controller)
- driver.delete_tenant(controller_site.tenant_id)
-
- """
- Ansible does not support tenant deletion yet
-
- import pdb
- pdb.set_trace()
- template = os_template_env.get_template('delete_controller_sites.yaml')
- tenant_fields = {'endpoint':controller_site.controller.auth_url,
- 'admin_user': controller_site.controller.admin_user,
- 'admin_password': controller_site.controller.admin_password,
- 'admin_tenant': 'admin',
- 'ansible_tag': 'controller_sites/%s@%s'%(controller_site.controller_site.site.login_base,controller_site.controller_site.deployment.name), # name of ansible playbook
- 'tenant': controller_site.controller_site.site.login_base,
- 'delete': True}
-
- rendered = template.render(tenant_fields)
- res = run_template('sync_controller_sites.yaml', tenant_fields)
-
- if (len(res)!=1):
- raise Exception('Could not assign roles for user %s'%tenant_fields['tenant'])
- """
diff --git a/xos/synchronizers/openstack/steps/sync_controller_sites.yaml b/xos/synchronizers/openstack/steps/sync_controller_sites.yaml
deleted file mode 100644
index 4129802..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_sites.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
diff --git a/xos/synchronizers/openstack/steps/sync_controller_slice_privileges.py b/xos/synchronizers/openstack/steps/sync_controller_slice_privileges.py
deleted file mode 100644
index e5513b0..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_slice_privileges.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models.slice import Controller, SlicePrivilege
-from core.models.user import User
-from core.models.controlleruser import ControllerUser, ControllerSlicePrivilege
-from synchronizers.base.ansible import *
-from xos.logger import observer_logger as logger
-import json
-
-class SyncControllerSlicePrivileges(OpenStackSyncStep):
- provides=[SlicePrivilege]
- requested_interval=0
- observes=ControllerSlicePrivilege
- playbook = 'sync_controller_users.yaml'
-
- def map_sync_inputs(self, controller_slice_privilege):
- if not controller_slice_privilege.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_slice_privilege.controller)
- return
-
- template = os_template_env.get_template('sync_controller_users.yaml')
- roles = [controller_slice_privilege.slice_privilege.role.role]
- # setup user home slice roles at controller
- if not controller_slice_privilege.slice_privilege.user.site:
- raise Exception('Sliceless user %s'%controller_slice_privilege.slice_privilege.user.email)
- else:
- # look up tenant id for the user's slice at the controller
- #ctrl_slice_deployments = SliceDeployment.objects.filter(
- # slice_deployment__slice=controller_slice_privilege.user.slice,
- # controller=controller_slice_privilege.controller)
-
- #if ctrl_slice_deployments:
- # # need the correct tenant id for slice at the controller
- # tenant_id = ctrl_slice_deployments[0].tenant_id
- # tenant_name = ctrl_slice_deployments[0].slice_deployment.slice.login_base
- user_fields = {
- 'endpoint':controller_slice_privilege.controller.auth_url,
- 'endpoint_v3': controller_slice_privilege.controller.auth_url_v3,
- 'domain': controller_slice_privilege.controller.domain,
- 'name': controller_slice_privilege.slice_privilege.user.email,
- 'email': controller_slice_privilege.slice_privilege.user.email,
- 'password': controller_slice_privilege.slice_privilege.user.remote_password,
- 'admin_user': controller_slice_privilege.controller.admin_user,
- 'admin_password': controller_slice_privilege.controller.admin_password,
- 'ansible_tag':'%s@%s@%s'%(controller_slice_privilege.slice_privilege.user.email.replace('@','-at-'),controller_slice_privilege.slice_privilege.slice.name,controller_slice_privilege.controller.name),
- 'admin_tenant': controller_slice_privilege.controller.admin_tenant,
- 'roles':roles,
- 'tenant':controller_slice_privilege.slice_privilege.slice.name}
- return user_fields
-
- def map_sync_outputs(self, controller_slice_privilege, res):
- controller_slice_privilege.role_id = res[0]['id']
- controller_slice_privilege.save()
-
- def delete_record(self, controller_slice_privilege):
- controller_register = json.loads(controller_slice_privilege.controller.backend_register)
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%controller_slice_privilege.controller.name)
-
- if controller_slice_privilege.role_id:
- driver = self.driver.admin_driver(controller=controller_slice_privilege.controller)
- user = ControllerUser.objects.get(
- controller=controller_slice_privilege.controller,
- user=controller_slice_privilege.slice_privilege.user
- )
- slice = ControllerSlice.objects.get(
- controller=controller_slice_privilege.controller,
- user=controller_slice_privilege.slice_privilege.user
- )
- driver.delete_user_role(
- user.kuser_id,
- slice.tenant_id,
- controller_slice_privilege.slice_prvilege.role.role
- )
diff --git a/xos/synchronizers/openstack/steps/sync_controller_slices.py b/xos/synchronizers/openstack/steps/sync_controller_slices.py
deleted file mode 100644
index 0666230..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_slices.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from netaddr import IPAddress, IPNetwork
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models import *
-from synchronizers.base.ansible import *
-from openstack_xos.driver import OpenStackDriver
-from xos.logger import observer_logger as logger
-import json
-
-class SyncControllerSlices(OpenStackSyncStep):
- provides=[Slice]
- requested_interval=0
- observes=ControllerSlice
- playbook='sync_controller_slices.yaml'
-
- def map_sync_inputs(self, controller_slice):
- logger.info("sync'ing slice controller %s" % controller_slice)
-
- if not controller_slice.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
- return
-
- controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
- controller=controller_slice.controller)
- if not controller_users:
- raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
- else:
- controller_user = controller_users[0]
- driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
- roles = [driver.get_admin_role().name]
-
- max_instances=int(controller_slice.slice.max_instances)
- tenant_fields = {'endpoint':controller_slice.controller.auth_url,
- 'endpoint_v3': controller_slice.controller.auth_url_v3,
- 'domain': controller_slice.controller.domain,
- 'admin_user': controller_slice.controller.admin_user,
- 'admin_password': controller_slice.controller.admin_password,
- 'admin_tenant': 'admin',
- 'tenant': controller_slice.slice.name,
- 'tenant_description': controller_slice.slice.description,
- 'roles':roles,
- 'name':controller_user.user.email,
- 'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
- 'max_instances':max_instances}
-
- return tenant_fields
-
- def map_sync_outputs(self, controller_slice, res):
- tenant_id = res[0]['id']
- if (not controller_slice.tenant_id):
- try:
- driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
- driver.shell.nova.quotas.update(tenant_id=tenant_id, instances=int(controller_slice.slice.max_instances))
- except:
- logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
- raise Exception('Could not update quota for %s'%controller_slice.slice.name)
-
- controller_slice.tenant_id = tenant_id
- controller_slice.backend_status = '1 - OK'
- controller_slice.save()
-
-
- def map_delete_inputs(self, controller_slice):
- controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
- controller=controller_slice.controller)
- if not controller_users:
- raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
- else:
- controller_user = controller_users[0]
-
- tenant_fields = {'endpoint':controller_slice.controller.auth_url,
- 'admin_user': controller_slice.controller.admin_user,
- 'admin_password': controller_slice.controller.admin_password,
- 'admin_tenant': 'admin',
- 'tenant': controller_slice.slice.name,
- 'tenant_description': controller_slice.slice.description,
- 'name':controller_user.user.email,
- 'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
- 'delete': True}
- return tenant_fields
diff --git a/xos/synchronizers/openstack/steps/sync_controller_slices.yaml b/xos/synchronizers/openstack/steps/sync_controller_slices.yaml
deleted file mode 100644
index 61470ce..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_slices.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- {% if delete -%}
- - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}" state=absent
- {% else -%}
- - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} tenant={{ tenant }} tenant_description="{{ tenant_description }}"
- {% for role in roles %}
- - keystone_user: endpoint={{ endpoint }} login_user={{ admin_user }} login_password={{ admin_password }} login_tenant_name={{ admin_tenant }} user="{{ name }}" role={{ role }} tenant={{ tenant }}
- {% endfor %}
- {% endif %}
diff --git a/xos/synchronizers/openstack/steps/sync_controller_users.py b/xos/synchronizers/openstack/steps/sync_controller_users.py
deleted file mode 100644
index c9de142..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_users.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models.site import Controller, SiteDeployment, SiteDeployment
-from core.models.user import User
-from core.models.controlleruser import ControllerUser
-from synchronizers.base.ansible import *
-from openstack_xos.driver import OpenStackDriver
-from xos.logger import observer_logger as logger
-import json
-
-class SyncControllerUsers(OpenStackSyncStep):
- provides=[User]
- requested_interval=0
- observes=ControllerUser
- playbook='sync_controller_users.yaml'
-
- def map_sync_inputs(self, controller_user):
- if not controller_user.controller.admin_user:
- logger.info("controller %r has no admin_user, skipping" % controller_user.controller)
- return
-
- # All users will have at least the 'user' role at their home site/tenant.
- # We must also check if the user should have the admin role
-
- roles = ['user']
- if controller_user.user.is_admin:
- driver = OpenStackDriver().admin_driver(controller=controller_user.controller)
- roles.append(driver.get_admin_role().name)
-
- # setup user home site roles at controller
- if not controller_user.user.site:
- raise Exception('Siteless user %s'%controller_user.user.email)
- else:
- # look up tenant id for the user's site at the controller
- #ctrl_site_deployments = SiteDeployment.objects.filter(
- # site_deployment__site=controller_user.user.site,
- # controller=controller_user.controller)
-
- #if ctrl_site_deployments:
- # # need the correct tenant id for site at the controller
- # tenant_id = ctrl_site_deployments[0].tenant_id
- # tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
- user_fields = {
- 'endpoint':controller_user.controller.auth_url,
- 'endpoint_v3': controller_user.controller.auth_url_v3,
- 'domain': controller_user.controller.domain,
- 'name': controller_user.user.email,
- 'email': controller_user.user.email,
- 'password': controller_user.user.remote_password,
- 'admin_user': controller_user.controller.admin_user,
- 'admin_password': controller_user.controller.admin_password,
- 'ansible_tag':'%s@%s'%(controller_user.user.email.replace('@','-at-'),controller_user.controller.name),
- 'admin_project': controller_user.controller.admin_tenant,
- 'roles':roles,
- 'project':controller_user.user.site.login_base
- }
- return user_fields
-
- def map_sync_outputs(self, controller_user, res):
- controller_user.kuser_id = res[0]['user']['id']
- controller_user.backend_status = '1 - OK'
- controller_user.save()
-
- def delete_record(self, controller_user):
- if controller_user.kuser_id:
- driver = self.driver.admin_driver(controller=controller_user.controller)
- driver.delete_user(controller_user.kuser_id)
diff --git a/xos/synchronizers/openstack/steps/sync_controller_users.yaml b/xos/synchronizers/openstack/steps/sync_controller_users.yaml
deleted file mode 100644
index 5cb3cc9..0000000
--- a/xos/synchronizers/openstack/steps/sync_controller_users.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
-
- - name: Create user account for "{{ name }}"
- os_user:
- name: "{{ name }}"
- email: "{{ email }}"
- password: "{{ password }}"
- auth:
- auth_url: {{ endpoint }}
- username: {{ admin_user }}
- password: {{ admin_password }}
- project_name: {{ admin_project }}
-
- - name: Create project for "{{ project }}"
- os_project:
- name: "{{ project }}"
- auth:
- auth_url: {{ endpoint }}
- username: {{ admin_user }}
- password: {{ admin_password }}
- project_name: {{ admin_project }}
-
-{% for role in roles %}
- - name: Creating role "{{ role }}" for "{{ name }}" on "{{ project }}"
- keystone_user:
- user: "{{ name }}"
- role: "{{ role }}"
- tenant: "{{ project }}"
- endpoint: {{ endpoint }}
- login_user: {{ admin_user }}
- login_password: {{ admin_password }}
- login_tenant_name: {{ admin_project }}
-{% endfor %}
-
-# FIXME: the below should work in Ansible 2.1, once we get the Admin/admin and
-# Member/user role name issues straightened out.
-#
-# - name: Creating role "{{ role }}" for "{{ name }}" on "{{ project }}"
-# os_user_role:
-# user: "{{ name }}"
-# role: "{{ role }}"
-# project: "{{ project }}"
-# auth:
-# auth_url: {{ endpoint }}
-# username: {{ admin_user }}
-# password: {{ admin_password }}
-# project_name: {{ admin_project }}
diff --git a/xos/synchronizers/openstack/steps/sync_images.py b/xos/synchronizers/openstack/steps/sync_images.py
deleted file mode 100644
index 1638fd0..0000000
--- a/xos/synchronizers/openstack/steps/sync_images.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import os
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models.image import Image
-from xos.logger import observer_logger as logger
-
-class SyncImages(OpenStackSyncStep):
- provides=[Image]
- requested_interval=0
- observes=Image
-
- def fetch_pending(self, deleted):
- # Images come from the back end
- # You can't delete them
- if (deleted):
- logger.info("SyncImages: returning because deleted=True")
- return []
-
- # get list of images on disk
- images_path = Config().observer_images_directory
-
- logger.info("SyncImages: deleted=False, images_path=%s" % images_path)
-
- available_images = {}
- if os.path.exists(images_path):
- for f in os.listdir(images_path):
- filename = os.path.join(images_path, f)
- if os.path.isfile(filename) and filename.endswith(".img"):
- available_images[f] = filename
-
- logger.info("SyncImages: available_images = %s" % str(available_images))
-
- images = Image.objects.all()
- image_names = [image.name for image in images]
-
- for image_name in available_images:
- #remove file extension
- clean_name = ".".join(image_name.split('.')[:-1])
- if clean_name not in image_names:
- logger.info("SyncImages: adding %s" % clean_name)
- image = Image(name=clean_name,
- disk_format='raw',
- container_format='bare',
- path = available_images[image_name])
- image.save()
-
- return Image.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-
- def sync_record(self, image):
- image.save()
diff --git a/xos/synchronizers/openstack/steps/sync_instances.py b/xos/synchronizers/openstack/steps/sync_instances.py
deleted file mode 100644
index 5cec50d..0000000
--- a/xos/synchronizers/openstack/steps/sync_instances.py
+++ /dev/null
@@ -1,220 +0,0 @@
-import os
-import base64
-import socket
-from django.db.models import F, Q
-from xos.config import Config
-from xos.settings import RESTAPI_HOSTNAME, RESTAPI_PORT
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models.instance import Instance
-from core.models.slice import Slice, SlicePrivilege, ControllerSlice
-from core.models.network import Network, NetworkSlice, ControllerNetwork
-from synchronizers.base.ansible import *
-from synchronizers.base.syncstep import *
-from xos.logger import observer_logger as logger
-
-def escape(s):
- s = s.replace('\n',r'\n').replace('"',r'\"')
- return s
-
-class SyncInstances(OpenStackSyncStep):
- provides=[Instance]
- requested_interval=0
- observes=Instance
- playbook='sync_instances.yaml'
-
- def fetch_pending(self, deletion=False):
- objs = super(SyncInstances, self).fetch_pending(deletion)
- objs = [x for x in objs if x.isolation=="vm"]
- return objs
-
- def get_userdata(self, instance, pubkeys):
- userdata = '#cloud-config\n\nopencloud:\n slicename: "%s"\n hostname: "%s"\n restapi_hostname: "%s"\n restapi_port: "%s"\n' % (instance.slice.name, instance.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
- userdata += 'ssh_authorized_keys:\n'
- for key in pubkeys:
- userdata += ' - %s\n' % key
- return userdata
-
- def sort_nics(self, nics):
- result = []
-
- # Enforce VTN's network order requirement. The access network must be
- # inserted into the first slot. The management network must be inserted
- # into the second slot.
-
- # move the private and/or access network to the first spot
- for nic in nics[:]:
- network=nic.get("network", None)
- if network:
- tem = network.template
- if (tem.visibility == "private") and (tem.translation=="none") and ("management" not in tem.name):
- result.append(nic)
- nics.remove(nic)
-
- # move the management network to the second spot
- for net in nics[:]:
- network=nic.get("network", None)
- if network:
- tem = network.template
- if (tem.visibility == "private") and (tem.translation=="none") and ("management" in tem.name):
-#MCORD
-# if len(result)!=1:
-# raise Exception("Management network needs to be inserted in slot 1, but there are %d private nics" % len(result))
- result.append(nic)
- nics.remove(nic)
-
- # add everything else. For VTN there probably shouldn't be any more.
- result.extend(nics)
-
- return result
-
- def map_sync_inputs(self, instance):
- inputs = {}
- metadata_update = {}
- if (instance.numberCores):
- metadata_update["cpu_cores"] = str(instance.numberCores)
-
- for tag in instance.slice.tags.all():
- if tag.name.startswith("sysctl-"):
- metadata_update[tag.name] = tag.value
-
- slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice)
- pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
- if instance.creator.public_key:
- pubkeys.add(instance.creator.public_key)
-
- if instance.slice.creator.public_key:
- pubkeys.add(instance.slice.creator.public_key)
-
- if instance.slice.service and instance.slice.service.public_key:
- pubkeys.add(instance.slice.service.public_key)
-
- nics=[]
-
- # handle ports the were created by the user
- port_ids=[]
- for port in Port.objects.filter(instance=instance):
- if not port.port_id:
- raise DeferredException("Instance %s waiting on port %s" % (instance, port))
- nics.append({"kind": "port", "value": port.port_id, "network": port.network})
-
- # we want to exclude from 'nics' any network that already has a Port
- existing_port_networks = [port.network for port in Port.objects.filter(instance=instance)]
-
- networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice) if ns.network not in existing_port_networks]
- controller_networks = ControllerNetwork.objects.filter(network__in=networks,
- controller=instance.node.site_deployment.controller)
-
- #controller_networks = self.sort_controller_networks(controller_networks)
- for controller_network in controller_networks:
- # Lenient exception - causes slow backoff
- if controller_network.network.template.visibility == 'private' and \
- controller_network.network.template.translation == 'none':
- if not controller_network.net_id:
- raise DeferredException("Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name))
- nics.append({"kind": "net", "value": controller_network.net_id, "network": controller_network.network})
-
- # now include network template
- network_templates = [network.template.shared_network_name for network in networks \
- if network.template.shared_network_name]
-
- #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork)
- driver = self.driver.admin_driver(tenant='admin', controller=instance.node.site_deployment.controller)
- nets = driver.shell.neutron.list_networks()['networks']
- for net in nets:
- if net['name'] in network_templates:
- nics.append({"kind": "net", "value": net['id'], "network": None})
-
- if (not nics):
- for net in nets:
- if net['name']=='public':
- nics.append({"kind": "net", "value": net['id'], "network": None})
-
- nics = self.sort_nics(nics)
-
- image_name = None
- controller_images = instance.image.controllerimages.filter(controller=instance.node.site_deployment.controller)
- if controller_images:
- image_name = controller_images[0].image.name
- logger.info("using image from ControllerImage object: " + str(image_name))
-
- if image_name is None:
- controller_driver = self.driver.admin_driver(controller=instance.node.site_deployment.controller)
- images = controller_driver.shell.glanceclient.images.list()
- for image in images:
- if image.name == instance.image.name or not image_name:
- image_name = image.name
- logger.info("using image from glance: " + str(image_name))
-
- try:
- legacy = Config().observer_legacy
- except:
- legacy = False
-
- if (legacy):
- host_filter = instance.node.name.split('.',1)[0]
- else:
- host_filter = instance.node.name.strip()
-
- availability_zone_filter = 'nova:%s'%host_filter
- instance_name = '%s-%d'%(instance.slice.name,instance.id)
- self.instance_name = instance_name
-
- userData = self.get_userdata(instance, pubkeys)
- if instance.userData:
- userData += instance.userData
-
- controller = instance.node.site_deployment.controller
- fields = {'endpoint':controller.auth_url,
- 'endpoint_v3': controller.auth_url_v3,
- 'domain': controller.domain,
- 'admin_user': instance.creator.email,
- 'admin_password': instance.creator.remote_password,
- 'project_name': instance.slice.name,
- 'tenant': instance.slice.name,
- 'tenant_description': instance.slice.description,
- 'name':instance_name,
- 'ansible_tag':instance_name,
- 'availability_zone': availability_zone_filter,
- 'image_name':image_name,
- 'flavor_name':instance.flavor.name,
- 'nics':nics,
- 'meta':metadata_update,
- 'user_data':r'%s'%escape(userData)}
- return fields
-
-
- def map_sync_outputs(self, instance, res):
- instance_id = res[0]['openstack']['OS-EXT-SRV-ATTR:instance_name']
- instance_uuid = res[0]['id']
-
- try:
- hostname = res[0]['openstack']['OS-EXT-SRV-ATTR:hypervisor_hostname']
- ip = socket.gethostbyname(hostname)
- instance.ip = ip
- except:
- pass
-
- instance.instance_id = instance_id
- instance.instance_uuid = instance_uuid
- instance.instance_name = self.instance_name
- instance.save()
-
-
- def map_delete_inputs(self, instance):
- controller_register = json.loads(instance.node.site_deployment.controller.backend_register)
-
- if (controller_register.get('disabled',False)):
- raise InnocuousException('Controller %s is disabled'%instance.node.site_deployment.controller.name)
-
- instance_name = '%s-%d'%(instance.slice.name,instance.id)
- controller = instance.node.site_deployment.controller
- input = {'endpoint':controller.auth_url,
- 'admin_user': instance.creator.email,
- 'admin_password': instance.creator.remote_password,
- 'admin_tenant': instance.slice.name,
- 'tenant': instance.slice.name,
- 'tenant_description': instance.slice.description,
- 'name':instance_name,
- 'ansible_tag':instance_name,
- 'delete': True}
- return input
diff --git a/xos/synchronizers/openstack/steps/sync_instances.yaml b/xos/synchronizers/openstack/steps/sync_instances.yaml
deleted file mode 100644
index 476890f..0000000
--- a/xos/synchronizers/openstack/steps/sync_instances.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- hosts: 127.0.0.1
- connection: local
- tasks:
- - os_server:
- name: {{ name }}
- auth:
- auth_url: {{ endpoint }}
- username: {{ admin_user }}
- password: {{ admin_password }}
- project_name: {{ project_name }}
- {% if delete -%}
- state: absent
- {% else -%}
- state: present
- availability_zone: "{{ availability_zone }}"
- image: {{ image_name }}
- flavor: {{ flavor_name }}
- timeout: 200
- userdata: "{{ user_data }}"
- config_drive: yes
- auto_ip: no
- nics:
- {% for nic in nics %}
- - {{ nic.kind }}-id: {{ nic.value }}
- {% endfor %}
-
- {% if meta %}
- meta:
- {% for k,v in meta.items() %}
- {{ k }} : "{{ v }}"
- {% endfor %}
- {% endif %}
- {% endif %}
-
diff --git a/xos/synchronizers/openstack/steps/sync_object.py b/xos/synchronizers/openstack/steps/sync_object.py
deleted file mode 100644
index aaf2f25..0000000
--- a/xos/synchronizers/openstack/steps/sync_object.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import os
-import base64
-from collections import defaultdict
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from synchronizers.base.syncstep import *
-from core.models import *
-from synchronizers.base.ansible import *
-from openstack_xos.driver import OpenStackDriver
-from xos.logger import observer_logger as logger
-import json
-
-class SyncObject(OpenStackSyncStep):
- provides=[] # Caller fills this in
- requested_interval=0
- observes=[] # Caller fills this in
-
- def sync_record(self, r):
- raise DeferredException('Waiting for Service dependency: %r'%r)
diff --git a/xos/synchronizers/openstack/steps/sync_ports.py b/xos/synchronizers/openstack/steps/sync_ports.py
deleted file mode 100644
index 5e0ff04..0000000
--- a/xos/synchronizers/openstack/steps/sync_ports.py
+++ /dev/null
@@ -1,230 +0,0 @@
-import os
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models import Controller
-from core.models.network import *
-from xos.logger import observer_logger as logger
-
-class SyncPorts(OpenStackSyncStep):
- requested_interval = 0 # 3600
- provides=[Port]
- observes=Port
-
- # The way it works is to enumerate the all of the ports that neutron
- # has, and then work backward from each port's network-id to determine
- # which Network is associated from the port.
-
- def call(self, failed=[], deletion=False):
- if deletion:
- self.delete_ports()
- else:
- self.sync_ports()
-
- def get_driver(self, port):
- # We need to use a client driver that specifies the tenant
- # of the destination instance. Nova-compute will not connect
- # ports to instances if the port's tenant does not match
- # the instance's tenant.
-
- # A bunch of stuff to compensate for OpenStackDriver.client_driver()
- # not being in working condition.
- from openstack_xos.client import OpenStackClient
- from openstack_xos.driver import OpenStackDriver
- controller = port.instance.node.site_deployment.controller
- slice = port.instance.slice
- caller = port.network.owner.creator
- auth = {'username': caller.email,
- 'password': caller.remote_password,
- 'tenant': slice.name}
- client = OpenStackClient(controller=controller, **auth)
- driver = OpenStackDriver(client=client)
-
- return driver
-
- def sync_ports(self):
- logger.info("sync'ing Ports [delete=False]")
-
- ports = Port.objects.all()
- ports_by_id = {}
- ports_by_neutron_port = {}
- for port in ports:
- ports_by_id[port.id] = port
- ports_by_neutron_port[port.port_id] = port
-
- networks = Network.objects.all()
- networks_by_id = {}
- for network in networks:
- for nd in network.controllernetworks.all():
- networks_by_id[nd.net_id] = network
-
- #logger.info("networks_by_id = ")
- #for (network_id, network) in networks_by_id.items():
- # logger.info(" %s: %s" % (network_id, network.name))
-
- instances = Instance.objects.all()
- instances_by_instance_uuid = {}
- for instance in instances:
- instances_by_instance_uuid[instance.instance_uuid] = instance
-
- # Get all ports in all controllers
-
- ports_by_id = {}
- templates_by_id = {}
- for controller in Controller.objects.all():
- if not controller.admin_tenant:
- logger.info("controller %s has no admin_tenant" % controller)
- continue
- try:
- driver = self.driver.admin_driver(controller = controller)
- ports = driver.shell.neutron.list_ports()["ports"]
- except:
- logger.log_exc("failed to get ports from controller %s" % controller)
- continue
-
- for port in ports:
- ports_by_id[port["id"]] = port
-
- # public-nat and public-dedicated networks don't have a net-id anywhere
- # in the data model, so build up a list of which ids map to which network
- # templates.
- try:
- neutron_networks = driver.shell.neutron.list_networks()["networks"]
- except:
- print "failed to get networks from controller %s" % controller
- continue
- for network in neutron_networks:
- for template in NetworkTemplate.objects.all():
- if template.shared_network_name == network["name"]:
- templates_by_id[network["id"]] = template
-
- for port in ports_by_id.values():
- #logger.info("port %s" % str(port))
- if port["id"] in ports_by_neutron_port:
- # we already have it
- #logger.info("already accounted for port %s" % port["id"])
- continue
-
- if port["device_owner"] != "compute:nova":
- # we only want the ports that connect to instances
- #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
- continue
-
- instance = instances_by_instance_uuid.get(port['device_id'], None)
- if not instance:
- logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id']))
- continue
-
- network = networks_by_id.get(port['network_id'], None)
- if not network:
- # maybe it's public-nat or public-dedicated. Search the templates for
- # the id, then see if the instance's slice has some network that uses
- # that template
- template = templates_by_id.get(port['network_id'], None)
- if template and instance.slice:
- for candidate_network in instance.slice.networks.all():
- if candidate_network.template == template:
- network=candidate_network
- if not network:
- logger.info("no network for port %s network %s" % (port["id"], port["network_id"]))
-
- # we know it's associated with a instance, but we don't know
- # which network it is part of.
-
- continue
-
- if network.template.shared_network_name:
- # If it's a shared network template, then more than one network
- # object maps to the neutron network. We have to do a whole bunch
- # of extra work to find the right one.
- networks = network.template.network_set.all()
- network = None
- for candidate_network in networks:
- if (candidate_network.owner == instance.slice):
- logger.info("found network %s" % candidate_network)
- network = candidate_network
-
- if not network:
- logger.info("failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"]))
- continue
-
- if not port["fixed_ips"]:
- logger.info("port %s has no fixed_ips" % port["id"])
- continue
-
- ip=port["fixed_ips"][0]["ip_address"]
- mac=port["mac_address"]
- logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"])))
-
- ns = Port(network=network,
- instance=instance,
- ip=ip,
- mac=mac,
- port_id=port["id"])
-
- try:
- ns.save()
- except:
- logger.log_exc("failed to save port %s" % str(ns))
- continue
-
- # For ports that were created by the user, find that ones
- # that don't have neutron ports, and create them.
- for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False) ):
- logger.info("XXX working on port %s" % port)
- controller = port.instance.node.site_deployment.controller
- slice = port.instance.slice
-
- if controller:
- cn=port.network.controllernetworks.filter(controller=controller)
- if not cn:
- logger.log_exc("no controllernetwork for %s" % port)
- continue
- cn=cn[0]
- if cn.lazy_blocked:
- cn.lazy_blocked=False
- cn.save()
- logger.info("deferring port %s because controllerNetwork was lazy-blocked" % port)
- continue
- if not cn.net_id:
- logger.info("deferring port %s because controllerNetwork does not have a port-id yet" % port)
- continue
- try:
- driver = self.get_driver(port)
-
- args = {"network_id": cn.net_id}
- neutron_port_name = port.get_parameters().get("neutron_port_name", None)
- if neutron_port_name:
- args["name"] = neutron_port_name
-
- neutron_port = driver.shell.neutron.create_port({"port": args})["port"]
- port.port_id = neutron_port["id"]
- if neutron_port["fixed_ips"]:
- port.ip = neutron_port["fixed_ips"][0]["ip_address"]
- port.mac = neutron_port["mac_address"]
- port.xos_created = True
- logger.info("created neutron port %s for %s" % (port.port_id, port))
- except:
- logger.log_exc("failed to create neutron port for %s" % port)
- continue
- port.save()
-
- def delete_ports(self):
- logger.info("sync'ing Ports [delete=True]")
- for port in Port.deleted_objects.all():
- self.delete_record(port)
-
- def delete_record(self, port):
- if port.xos_created and port.port_id:
- logger.info("calling openstack to destroy port %s" % port.port_id)
- try:
- driver = self.get_driver(port)
- driver.shell.neutron.delete_port(port.port_id)
- except:
- logger.log_exc("failed to delete port %s from neutron" % port.port_id)
- return
-
- logger.info("Purging port %s" % port)
- port.delete(purge=True)
-
diff --git a/xos/synchronizers/openstack/steps/sync_roles.py b/xos/synchronizers/openstack/steps/sync_roles.py
deleted file mode 100644
index e859316..0000000
--- a/xos/synchronizers/openstack/steps/sync_roles.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import os
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.openstacksyncstep import OpenStackSyncStep
-from core.models.role import Role
-from core.models.site import SiteRole, Controller, ControllerRole
-from core.models.slice import SliceRole
-from xos.logger import observer_logger as logger
-
-class SyncRoles(OpenStackSyncStep):
- provides=[Role]
- requested_interval=0
- observes=[SiteRole,SliceRole,ControllerRole]
-
- def sync_record(self, role):
- if not role.enacted:
- controllers = Controller.objects.all()
- for controller in controllers:
- driver = self.driver.admin_driver(controller=controller)
- driver.create_role(role.role)
- role.save()
-
diff --git a/xos/synchronizers/openstack/steps/teardown_container.yaml b/xos/synchronizers/openstack/steps/teardown_container.yaml
deleted file mode 100644
index 5cabc78..0000000
--- a/xos/synchronizers/openstack/steps/teardown_container.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- hosts: {{ instance_name }}
- gather_facts: False
- connection: ssh
- user: {{ username }}
- sudo: yes
-
- vars:
- container_name: {{ container_name }}
- docker_image: {{ docker_image }}
- ports:
- {% for port in ports %}
- - device: {{ port.device }}
- xos_network_id: {{ port.xos_network_id }}
- mac: {{ port.mac|default("") }}
- ip: {{ port.ip }}
- snoop_instance_mac: {{ port.snoop_instance_mac }}
- snoop_instance_id: {{ port.snoop_instance_id }}
- parent_mac: {{ port.parent_mac|default("") }}
- s_tag: {{ port.s_tag|default("") }}
- c_tag: {{ port.c_tag|default("") }}
- next_hop: {{ port.next_hop|default("") }}
- bridge: {{ port.bridge }}
- {% endfor %}
- volumes:
- {% for volume in volumes %}
- - {{ volume }}
- {% endfor %}
-
- tasks:
- - name: Make sure container is stopped
- service: name=container-{{ container_name }} state=stopped
-
diff --git a/xos/synchronizers/openstack/templates/container.conf.j2 b/xos/synchronizers/openstack/templates/container.conf.j2
deleted file mode 100644
index 7cbb880..0000000
--- a/xos/synchronizers/openstack/templates/container.conf.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-# Upstart script for container
-description "container"
-author "smbaker@gmail.com"
-start on filesystem and started docker
-stop on runlevel [!2345]
-respawn
-
-script
- /usr/local/sbin/start-container-{{ container_name }}.sh ATTACH
-end script
-
-post-stop script
- /usr/local/sbin/stop-container-{{ container_name }}.sh
-end script
\ No newline at end of file
diff --git a/xos/synchronizers/openstack/templates/container.service.j2 b/xos/synchronizers/openstack/templates/container.service.j2
deleted file mode 100644
index 817d6d7..0000000
--- a/xos/synchronizers/openstack/templates/container.service.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-[Unit]
-Description={{ container_name }}
-After=docker.service
-
-[Service]
-ExecStart=/bin/bash -c "/usr/local/sbin/start-container-{{ container_name }}.sh ATTACH"
-ExecStop=/bin/bash -c "/usr/local/sbin/stop-container-{{ container_name }}.sh"
-SuccessExitStatus=0 137
-
-[Install]
-WantedBy=multi-user.target
diff --git a/xos/synchronizers/openstack/templates/start-container.sh.j2 b/xos/synchronizers/openstack/templates/start-container.sh.j2
deleted file mode 100644
index 2fbf478..0000000
--- a/xos/synchronizers/openstack/templates/start-container.sh.j2
+++ /dev/null
@@ -1,136 +0,0 @@
-#!/bin/bash
-
-iptables -L > /dev/null
-ip6tables -L > /dev/null
-
-CONTAINER={{ container_name }}
-IMAGE={{ docker_image }}
-
-function mac_to_iface {
- PARENT_MAC=$1
- ifconfig|grep $PARENT_MAC| awk '{print $1}'|grep -v '\.'
-}
-
-function encapsulate_stag {
- LAN_IFACE=$1
- STAG=$2
- ifconfig $LAN_IFACE >> /dev/null
- if [ "$?" == 0 ]; then
- STAG_IFACE=$LAN_IFACE.$STAG
- ifconfig $LAN_IFACE up
- ifconfig $STAG_IFACE
- if [ "$?" == 0 ]; then
- echo $STAG_IFACE is already created
- else
- ifconfig $STAG_IFACE >> /dev/null || ip link add link $LAN_IFACE name $STAG_IFACE type vlan id $STAG
- fi
- ifconfig $STAG_IFACE up
- else
- echo There is no $LAN_IFACE. Aborting.
- exit -1
- fi
-}
-
-
-{% if volumes %}
-{% for volume in volumes %}
-DEST_DIR=/var/container_volumes/$CONTAINER/{{ volume }}
-mkdir -p $DEST_DIR
-VOLUME_ARGS="$VOLUME_ARGS -v $DEST_DIR:{{ volume }}"
-{% endfor %}
-{% endif %}
-
-docker inspect $CONTAINER > /dev/null 2>&1
-if [ "$?" == 1 ]
-then
- docker pull $IMAGE
-{% if network_method=="host" %}
- docker run -d --name=$CONTAINER --privileged=true --net=host $VOLUME_ARGS $IMAGE
-{% elif network_method=="bridged" %}
- docker run -d --name=$CONTAINER --privileged=true --net=bridge $VOLUME_ARGS $IMAGE
-{% else %}
- docker run -d --name=$CONTAINER --privileged=true --net=none $VOLUME_ARGS $IMAGE
-{% endif %}
-else
- docker start $CONTAINER
-fi
-
-{% if ports %}
-{% for port in ports %}
-
-{% if port.next_hop %}
-NEXTHOP_ARG="@{{ port.next_hop }}"
-{% else %}
-NEXTHOP_ARG=""
-{% endif %}
-
-{% if port.c_tag %}
-CTAG_ARG="@{{ port.c_tag }}"
-{% else %}
-CTAG_ARG=""
-{% endif %}
-
-{% if port.parent_mac %}
-# container-in-VM
-SRC_DEV=$( mac_to_iface "{{ port.parent_mac }}" )
-CMD="docker exec $CONTAINER ifconfig $SRC_DEV >> /dev/null || pipework $SRC_DEV -i {{ port.device }} $CONTAINER {{ port.ip }}/24$NEXTHOP_ARG {{ port.mac }} $CTAG_ARG"
-echo $CMD
-eval $CMD
-
-{% else %}
-# container-on-metal
-IP="{{ port.ip }}"
-{% if port.mac %}
-MAC="{{ port.mac }}"
-{% else %}
-MAC=""
-{% endif %}
-
-DEVICE="{{ port.device }}"
-BRIDGE="{{ port.bridge }}"
-{% if port.s_tag %}
-# This is intended for lan_network. Assume that BRIDGE is set to br_lan. We
-# create a device that strips off the S-TAG.
-STAG="{{ port.s_tag }}"
-encapsulate_stag $BRIDGE $STAG
-SRC_DEV=$STAG_IFACE
-{% else %}
-# This is for a standard neutron private network. We use a donor VM to setup
-# openvswitch for us, and we snoop at its devices and create a tap using the
-# same settings.
-XOS_NETWORK_ID="{{ port.xos_network_id }}"
-INSTANCE_MAC="{{ port.snoop_instance_mac }}"
-INSTANCE_ID="{{ port.snoop_instance_id }}"
-INSTANCE_TAP=`virsh domiflist $INSTANCE_ID | grep -i $INSTANCE_MAC | awk '{print $1}'`
-INSTANCE_TAP=${INSTANCE_TAP:3}
-VLAN_ID=`ovs-vsctl show | grep -i -A 1 port.*$INSTANCE_TAP | grep -i tag | awk '{print $2}'`
-# One tap for all containers per XOS/neutron network. Included the VLAN_ID in the
-# hash, to cover the case where XOS is reinstalled and the XOS network ids
-# get reused.
-TAP="con`echo ${XOS_NETWORK_ID}_$VLAN_ID|md5sum|awk '{print $1}'`"
-TAP=${TAP:0:10}
-echo im=$INSTANCE_MAC ii=$INSTANCE_ID it=$INSTANCE_TAP vlan=$VLAN_ID tap=$TAP con=$CONTAINER dev=$DEVICE mac=$MAC
-ovs-vsctl show | grep -i $TAP
-if [[ $? == 1 ]]; then
- echo creating tap
- ovs-vsctl add-port $BRIDGE $TAP tag=$VLAN_ID -- set interface $TAP type=internal
-else
- echo tap exists
-fi
-SRC_DEV=$TAP
-{% endif %}
-
-CMD="docker exec $CONTAINER ifconfig $DEVICE >> /dev/null || pipework $SRC_DEV -i $DEVICE $CONTAINER $IP/24$NEXTHOP_ARG $MAC $CTAG_ARG"
-echo $CMD
-eval $CMD
-{% endif %}
-{% endfor %}
-{% endif %}
-
-# Attach to container
-# (this is only done when using upstart, since upstart expects to be attached
-# to a running service)
-if [[ "$1" == "ATTACH" ]]; then
- docker start -a $CONTAINER
-fi
-
diff --git a/xos/synchronizers/openstack/templates/stop-container.sh.j2 b/xos/synchronizers/openstack/templates/stop-container.sh.j2
deleted file mode 100644
index 9cabb00..0000000
--- a/xos/synchronizers/openstack/templates/stop-container.sh.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-CONTAINER={{ container_name }}
-
-docker stop $CONTAINER
-docker rm $CONTAINER
diff --git a/xos/synchronizers/openstack/xos-synchronizer.py b/xos/synchronizers/openstack/xos-synchronizer.py
deleted file mode 100644
index 852ee0e..0000000
--- a/xos/synchronizers/openstack/xos-synchronizer.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/usr/bin/env python
-import os
-import argparse
-import sys
-
-sys.path.append('/opt/xos')
-
-os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
-from synchronizers.base.backend import Backend
-from synchronizers.base.event_loop import set_driver
-from xos.config import Config, DEFAULT_CONFIG_FN
-from core.models import Instance,NetworkTemplate
-from xos.logger import Logger, logging, logger
-from django.db import ProgrammingError
-import time
-
-try:
- from django import setup as django_setup # django 1.7
-except:
- django_setup = False
-
-config = Config()
-
-# set the driver.
-from openstack_xos.driver import OpenStackDriver
-set_driver(OpenStackDriver())
-
-# after http://www.erlenstar.demon.co.uk/unix/faq_2.html
-def daemon():
- """Daemonize the current process."""
- if os.fork() != 0: os._exit(0)
- os.setsid()
- if os.fork() != 0: os._exit(0)
- os.umask(0)
- devnull = os.open(os.devnull, os.O_RDWR)
- os.dup2(devnull, 0)
- # xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull
- logdir=os.path.dirname(config.observer_logfile)
- # when installed in standalone we might not have httpd installed
- if not os.path.isdir(logdir): os.mkdir(logdir)
- crashlog = os.open('%s'%config.observer_logfile, os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
- os.dup2(crashlog, 1)
- os.dup2(crashlog, 2)
-
- if hasattr(config, "observer_pidfile"):
- pidfile = config.get("observer_pidfile")
- else:
- pidfile = "/var/run/xosobserver.pid"
- try:
- file(pidfile,"w").write(str(os.getpid()))
- except:
- print "failed to create pidfile %s" % pidfile
-
-def main():
- # Generate command line parser
- parser = argparse.ArgumentParser(usage='%(prog)s [options]')
- parser.add_argument('-d', '--daemon', dest='daemon', action='store_true', default=False,
- help='Run as daemon.')
- # smbaker: util/config.py parses sys.argv[] directly to get config file name; include the option here to avoid
- # throwing unrecognized argument exceptions
- parser.add_argument('-C', '--config', dest='config_file', action='store', default=DEFAULT_CONFIG_FN,
- help='Name of config file.')
- args = parser.parse_args()
-
- if args.daemon: daemon()
-
- if django_setup: # 1.7
- django_setup()
-
- models_active = False
- wait = False
- while not models_active:
- try:
- _ = Instance.objects.first()
- _ = NetworkTemplate.objects.first()
- models_active = True
- except Exception,e:
- logger.info(str(e))
- logger.info('Waiting for data model to come up before starting...')
- time.sleep(10)
- wait = True
-
- if (wait):
- time.sleep(60) # Safety factor, seeing that we stumbled waiting for the data model to come up.
- backend = Backend()
- backend.run()
-
-if __name__ == '__main__':
-
- main()
diff --git a/xos/tools/xos-manage b/xos/tools/xos-manage
index c06a12b..2045f19 100755
--- a/xos/tools/xos-manage
+++ b/xos/tools/xos-manage
@@ -82,7 +82,7 @@
python $XOS_DIR/manage.py syncdb --noinput
if [[ $DJANGO_17 ]]; then
echo "Loading initial data from fixture..."
- python $XOS_DIR/manage.py --noobserver --nomodelpolicy loaddata $XOS_DIR/core/fixtures/core_initial_data.json
+ python $XOS_DIR/manage.py --noobserver loaddata $XOS_DIR/core/fixtures/core_initial_data.json
fi
}
function evolvedb {