CORD-1130 remove onboarding synchronizer
Change-Id: Ia2a4fc7f32e66a03bbfca6821d97ae38dce1ed98
diff --git a/xos/synchronizers/onboarding/files/__init__.py b/xos/synchronizers/onboarding/files/__init__.py
deleted file mode 100644
index 8b13789..0000000
--- a/xos/synchronizers/onboarding/files/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/xos/synchronizers/onboarding/model-deps b/xos/synchronizers/onboarding/model-deps
deleted file mode 100644
index 4bf692a..0000000
--- a/xos/synchronizers/onboarding/model-deps
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "XOS": [
- "ServiceController",
- "Library"
- ],
- "ServiceController": [
- "LoadableModuleResource"
- ],
- "Library": [
- "LoadableModuleResource"
- ]
-}
diff --git a/xos/synchronizers/onboarding/onboarding-synchronizer.py b/xos/synchronizers/onboarding/onboarding-synchronizer.py
deleted file mode 100755
index 84bec4f..0000000
--- a/xos/synchronizers/onboarding/onboarding-synchronizer.py
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env python
-
-# This imports and runs ../../xos-observer.py
-
-import importlib
-import os
-import sys
-observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
-sys.path.append(observer_path)
-mod = importlib.import_module("xos-synchronizer")
-mod.main()
diff --git a/xos/synchronizers/onboarding/onboarding_synchronizer_config b/xos/synchronizers/onboarding/onboarding_synchronizer_config
deleted file mode 100644
index fb33044..0000000
--- a/xos/synchronizers/onboarding/onboarding_synchronizer_config
+++ /dev/null
@@ -1,35 +0,0 @@
-[plc]
-name=plc
-deployment=plc
-
-[db]
-name=xos
-user=postgres
-password=password
-host=xos_db
-port=5432
-
-[api]
-host=localhost
-port=8000
-ssl_key=None
-ssl_cert=None
-ca_ssl_cert=None
-ratelimit_enabled=0
-omf_enabled=0
-mail_support_address=support@localhost
-nova_enabled=True
-
-[observer]
-name=onboarding
-dependency_graph=/opt/xos/synchronizers/onboarding/model-deps
-steps_dir=/opt/xos/synchronizers/onboarding/steps
-sys_dir=/opt/xos/synchronizers/onboarding/sys
-deleters_dir=/opt/xos/synchronizers/onboarding/deleters
-log_file=console
-driver=None
-backoff_disabled=True
-pretend=False
-save_ansible_output=True
-node_key=/opt/cord_profile/node_key
-
diff --git a/xos/synchronizers/onboarding/run.sh b/xos/synchronizers/onboarding/run.sh
deleted file mode 100755
index d52d39c..0000000
--- a/xos/synchronizers/onboarding/run.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-export XOS_DIR=/opt/xos
-python onboarding-synchronizer.py -C $XOS_DIR/synchronizers/onboarding/onboarding_synchronizer_config
diff --git a/xos/synchronizers/onboarding/steps/sync_component.py b/xos/synchronizers/onboarding/steps/sync_component.py
deleted file mode 100644
index da556e0..0000000
--- a/xos/synchronizers/onboarding/steps/sync_component.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep, DeferredException
-from core.models import XOS, XOSComponent
-from xos.logger import Logger, logging
-from synchronizers.base.ansible_helper import run_template
-
-# xosbuilder will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from xosbuilder import XOSBuilder
-
-logger = Logger(level=logging.INFO)
-
-class SyncXOSComponent(SyncStep, XOSBuilder):
- provides=[XOSComponent]
- observes=XOSComponent
- requested_interval=0
-
- def __init__(self, **args):
- SyncStep.__init__(self, **args)
- XOSBuilder.__init__(self)
-
- def sync_record(self, sc):
- logger.info("Sync'ing XOSComponent %s" % sc)
-
- if sc.xos and (not sc.xos.enable_build):
- raise DeferredException("XOS build is currently disabled")
-
- unready = self.check_controller_unready(sc)
- if unready:
- raise Exception("Controller %s has unready resources: %s" % (str(sc), ",".join([str(x) for x in unready])))
-
- # There's nothing to actually do, since there's no synchronizer
- # container for libraries.
-
- def delete_record(self, m):
- pass
-
- def fetch_pending(self, deleted=False):
- pend = super(SyncXOSComponent, self).fetch_pending(deleted)
- return pend
-
diff --git a/xos/synchronizers/onboarding/steps/sync_library.py b/xos/synchronizers/onboarding/steps/sync_library.py
deleted file mode 100644
index 2e9ddd2..0000000
--- a/xos/synchronizers/onboarding/steps/sync_library.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep, DeferredException
-from core.models import XOS, Library
-from xos.logger import Logger, logging
-from synchronizers.base.ansible_helper import run_template
-
-# xosbuilder will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from xosbuilder import XOSBuilder
-
-logger = Logger(level=logging.INFO)
-
-class SyncLibrary(SyncStep, XOSBuilder):
- provides=[Library]
- observes=Library
- requested_interval=0
-
- def __init__(self, **args):
- SyncStep.__init__(self, **args)
- XOSBuilder.__init__(self)
-
- def sync_record(self, sc):
- logger.info("Sync'ing Library %s" % sc)
-
- if sc.xos and (not sc.xos.enable_build):
- raise DeferredException("XOS build is currently disabled")
-
- unready = self.check_controller_unready(sc)
- if unready:
- raise Exception("Controller %s has unready resources: %s" % (str(sc), ",".join([str(x) for x in unready])))
-
- # There's nothing to actually do, since there's no synchronizer
- # container for libraries.
-
- def delete_record(self, m):
- pass
-
- def fetch_pending(self, deleted=False):
- pend = super(SyncLibrary, self).fetch_pending(deleted)
- return pend
-
diff --git a/xos/synchronizers/onboarding/steps/sync_loadablemoduleresource.py b/xos/synchronizers/onboarding/steps/sync_loadablemoduleresource.py
deleted file mode 100644
index 46734a9..0000000
--- a/xos/synchronizers/onboarding/steps/sync_loadablemoduleresource.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep
-from core.models import Service, ServiceController, LoadableModuleResource
-from xos.logger import Logger, logging
-
-# xosbuilder will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from xosbuilder import XOSBuilder
-
-logger = Logger(level=logging.INFO)
-
-class SyncLoadableModuleResource(SyncStep, XOSBuilder):
- provides=[LoadableModuleResource]
- observes=LoadableModuleResource
- requested_interval=0
-
- def __init__(self, **args):
- SyncStep.__init__(self, **args)
- XOSBuilder.__init__(self)
-
- def sync_record(self, scr):
- logger.info("Sync'ing LoadableModuleResource %s" % scr)
- self.download_resource(scr)
-
- # TODO: The following should be redone with watchers
-
- if scr.loadable_module and scr.loadable_module.xos:
- # Make sure the xos UI is resynced
- xos = scr.loadable_module.xos
- xos.save(update_fields=["updated"], always_update_timestamp=True)
-
- if (scr.kind=="models") and scr.loadable_module and (scr.loadable_module.name != "openstack"):
- # Make sure the openstack controller is restarted. This is necessary
- # as the OpenStack controller is the only one that handles model
- # policies.
- #
- # Note: Expect to resolve this dependence as part of XOS refactoring.
- #
- os_scr = ServiceController.objects.filter(name="openstack")
- if os_scr:
- os_scr = os_scr[0]
- os_scr.save(update_fields=["updated"], always_update_timestamp=True)
-
- def delete_record(self, m):
- pass
-
- def fetch_pending(self, deleted=False):
- pend = super(SyncLoadableModuleResource, self).fetch_pending(deleted)
- return pend
-
diff --git a/xos/synchronizers/onboarding/steps/sync_servicecontroller.py b/xos/synchronizers/onboarding/steps/sync_servicecontroller.py
deleted file mode 100644
index 31e3b30..0000000
--- a/xos/synchronizers/onboarding/steps/sync_servicecontroller.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep, DeferredException
-from core.models import XOS, ServiceController
-from xos.logger import Logger, logging
-from synchronizers.base.ansible_helper import run_template
-
-# xosbuilder will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from xosbuilder import XOSBuilder
-
-logger = Logger(level=logging.INFO)
-
-class SyncServiceController(SyncStep, XOSBuilder):
- provides=[ServiceController]
- observes=ServiceController
- requested_interval=0
- playbook = "sync_servicecontroller.yaml"
-
- def __init__(self, **args):
- SyncStep.__init__(self, **args)
- XOSBuilder.__init__(self)
-
- def sync_record(self, sc):
- logger.info("Sync'ing ServiceController %s" % sc)
-
- if sc.xos and (not sc.xos.enable_build):
- raise DeferredException("XOS build is currently disabled")
-
- unready = self.check_controller_unready(sc)
- if unready:
- raise Exception("Controller %s has unready resources: %s" % (str(sc), ",".join([str(x) for x in unready])))
-
- if sc.no_build:
- # nothing to do for a no-build ServiceController
- return
-
- dockerfile = self.create_synchronizer_dockerfile(sc)
- if dockerfile:
- dockerfiles=[dockerfile]
- else:
- dockerfiles=[]
-
- tenant_fields = {"dockerfiles": dockerfiles,
- "build_dir": self.build_dir,
- "ansible_tag": sc.__class__.__name__ + "_" + str(sc.id)}
-
- path="servicecontroller"
- res = run_template(self.playbook, tenant_fields, path=path, object=sc)
-
- def delete_record(self, m):
- pass
-
- def fetch_pending(self, deleted=False):
- pend = super(SyncServiceController, self).fetch_pending(deleted)
- return pend
-
diff --git a/xos/synchronizers/onboarding/steps/sync_servicecontroller.yaml b/xos/synchronizers/onboarding/steps/sync_servicecontroller.yaml
deleted file mode 100644
index b81cde1..0000000
--- a/xos/synchronizers/onboarding/steps/sync_servicecontroller.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- hosts: localhost
-
- vars:
- dockerfiles:
- {% for dockerfile in dockerfiles %}
- - docker_image_name: {{ dockerfile.docker_image_name }}
- dockerfile_fn: {{ dockerfile.dockerfile_fn }}
- {% endfor %}
-
- tasks:
- {% for dockerfile in dockerfiles %}
- - name: build_docker_{{ dockerfile.docker_image_name }}
- shell: chdir={{ build_dir }} docker build -f {{ dockerfile.dockerfile_fn }} --rm -t {{ dockerfile.docker_image_name }} .
- {% endfor %}
-
-# - build_dockers:
-# shell: docker build -f {{ '{{' }} item.dockerfile_fn {{ '}}' }} --rm -t {{ '{{' }} item.docker_image_name {{ '}}' }} .
-# with items: "dockerfiles"
diff --git a/xos/synchronizers/onboarding/steps/sync_xos.py b/xos/synchronizers/onboarding/steps/sync_xos.py
deleted file mode 100644
index b1ecf98..0000000
--- a/xos/synchronizers/onboarding/steps/sync_xos.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import os
-import sys
-import base64
-from django.db.models import F, Q
-from xos.config import Config
-from synchronizers.base.syncstep import SyncStep, DeferredException
-from core.models import XOS
-from xos.logger import Logger, logging
-from synchronizers.base.ansible_helper import run_template
-
-# xosbuilder will be in steps/..
-parentdir = os.path.join(os.path.dirname(__file__),"..")
-sys.path.insert(0,parentdir)
-
-from xosbuilder import XOSBuilder
-
-logger = Logger(level=logging.INFO)
-
-class SyncXOS(SyncStep, XOSBuilder):
- provides=[XOS]
- observes=XOS
- requested_interval=0
- playbook = "sync_xos.yaml"
-
- def __init__(self, **args):
- SyncStep.__init__(self, **args)
- XOSBuilder.__init__(self)
-
- def sync_record(self, xos):
- logger.info("Sync'ing XOS %s" % xos)
-
- if not xos.docker_project_name:
- raise Exception("xos.docker_project_name is not set")
-
- if not xos.db_container_name:
- raise Exception("xos.db_container_name is not set")
-
- if (not xos.enable_build):
- raise DeferredException("XOS build is currently disabled")
-
- # We've seen the XOS object get synced before the ServiceController object
- # is synced. This results in the XOS UI container getting built with files
- # from that controller missing. So let's try to defer.
- #
- # It could be argued that we should continue to defer if the ServiceController
- # is in error state, but it is important that a single broken service does
- # not takedown the entirety of XOS.
-
- for scr in xos.loadable_modules.all():
- if (scr.backend_status is not None) and (scr.backend_status.startswith("0")):
- raise DeferredException("Detected unsynced loadable module. Deferring.")
-
- self.create_docker_compose()
-
- if xos.no_build:
- dockerfiles = []
- else:
- dockerfiles = [self.create_ui_dockerfile()]
-
- tenant_fields = {"dockerfiles": dockerfiles,
- "build_dir": self.build_dir,
- "docker_project_name": xos.docker_project_name,
- "ansible_tag": xos.__class__.__name__ + "_" + str(xos.id)}
-
- path="XOS"
- res = run_template(self.playbook, tenant_fields, path=path, object=xos)
-
- def delete_record(self, m):
- pass
-
- def fetch_pending(self, deleted=False):
- pend = super(SyncXOS, self).fetch_pending(deleted)
- return pend
-
diff --git a/xos/synchronizers/onboarding/steps/sync_xos.yaml b/xos/synchronizers/onboarding/steps/sync_xos.yaml
deleted file mode 100644
index 49d86fe..0000000
--- a/xos/synchronizers/onboarding/steps/sync_xos.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- hosts: localhost
-
- vars:
- dockerfiles:
- {% for dockerfile in dockerfiles %}
- - docker_image_name: {{ dockerfile.docker_image_name }}
- dockerfile_fn: {{ dockerfile.dockerfile_fn }}
- {% endfor %}
-
- tasks:
- {% for dockerfile in dockerfiles %}
- - name: build_docker_{{ dockerfile.docker_image_name }}
- shell: chdir={{ build_dir }} docker build -f {{ dockerfile.dockerfile_fn }} --rm -t {{ dockerfile.docker_image_name }} .
- {% endfor %}
-
- - name: run docker-compose
- shell: docker-compose -p {{ docker_project_name }} -f /opt/xos/synchronizers/onboarding/docker-compose/docker-compose.yml up -d
-
diff --git a/xos/synchronizers/onboarding/templates/docker-compose.yml.j2 b/xos/synchronizers/onboarding/templates/docker-compose.yml.j2
deleted file mode 100644
index 55252d1..0000000
--- a/xos/synchronizers/onboarding/templates/docker-compose.yml.j2
+++ /dev/null
@@ -1,80 +0,0 @@
-version: '2'
-
-{% if networks %}
-networks:
-{% for network in networks %}
- {{ network }}:
- external: true
-{% endfor %}
-{% endif %}
-
-services:
-{% for container_name, container in containers.iteritems() %}
- {{ container_name }}:
-# container_name: {{ container.container_base_name }}_{{ container_name }}_1
- image: {{ container.image }}
-{%- if container.networks %}
- networks:
-{%- for network in container.networks %}
- - {{ network }}
-{%- endfor %}
-{%- endif %}
-{%- if container.command %}
- command: {{ container.command }}
-{%- endif %}
-{%- if container.ports %}
- ports:
-{%- for src,dest in container.ports.iteritems() %}
- - "{{ src }}:{{ dest }}"
-{%- endfor %}
-{%- endif %}
-{%- if container.links %}
- links:
-{%- for link in container.links %}
- - {{ link }}
-{%- endfor %}
-{%- endif %}
-{%- if container.external_links %}
- external_links:
-{%- for link in container.external_links %}
- - {{ link }}
-{%- endfor %}
-{%- endif %}
-{%- if container.volumes %}
- volumes:
-{%- for volume in container.volumes %}
-{%- if volume.read_only %}
- - {{ volume.host_path }}:{{ volume.container_path }}:ro
-{%- elif volume.host_path == "" %}
- - {{ volume.container_path }}
-{%- else %}
- - {{ volume.host_path }}:{{ volume.container_path }}
-{%- endif %}
-{%- endfor %}
-{%- endif %}
-{%- if container.volumes_from %}
- volumes_from:
-{%- for volume in container.volumes_from %}
- - {{ volume }}
-{%- endfor %}
-{%- endif %}
-{%- if container.expose %}
- expose:
-{%- for expose in container.expose %}
- - "{{ expose }}"
-{%- endfor %}
-{%- endif %}
-{%- if container.extra_hosts %}
- extra_hosts:
-{%- for host in container.extra_hosts %}
- - "{{ host }}"
-{%- endfor %}
-{%- endif %}
- logging:
- driver: "json-file"
- options:
- max-size: "1000k"
- max-file: "5"
-
-{%- endfor %}
-
diff --git a/xos/synchronizers/onboarding/xosbuilder.py b/xos/synchronizers/onboarding/xosbuilder.py
deleted file mode 100644
index b1acdb2..0000000
--- a/xos/synchronizers/onboarding/xosbuilder.py
+++ /dev/null
@@ -1,507 +0,0 @@
-import os
-import base64
-import jinja2
-import string
-import sys
-import urllib2
-import urlparse
-import xmlrpclib
-
-from xos.config import Config
-from core.models import Service, ServiceController, LoadableModule, LoadableModuleResource, XOS, XOSComponent
-from xos.logger import Logger, logging
-
-from django.utils import timezone
-
-logger = Logger(level=logging.INFO)
-
-
-def add_unique(list, item):
- if not item in list:
- list.append(item)
-
-
-class XOSBuilder(object):
- UI_KINDS = ["models", "xproto", "admin", "admin_template", "django_library", "rest_service", "rest_tenant", "tosca_custom_types", "tosca_resource","public_key","vendor_js"]
- SYNC_CONTROLLER_KINDS = ["synchronizer", "private_key", "public_key"]
- SYNC_ALLCONTROLLER_KINDS = ["models", "xproto", "django_library"]
-
- def __init__(self):
- self.build_dir = "/opt/xos/BUILD/"
- self.build_tainted = False
-
- # stuff that has to do with downloading
-
- def get_base_dest_dir(self, scr):
- xos_base = "opt/xos"
- service_name = scr.loadable_module.name
- base_dirs = {"models": "%s/services/%s/" % (xos_base, service_name),
- "xproto": "%s/services/%s/xproto/" % (xos_base, service_name),
- "admin": "%s/services/%s/" % (xos_base, service_name),
- "admin_template": "%s/services/%s/templates/" % (xos_base, service_name),
- "django_library": "%s/services/%s/" % (xos_base, service_name),
- "synchronizer": "%s/synchronizers/%s/" % (xos_base, service_name),
- "tosca_custom_types": "%s/tosca/custom_types/" % (xos_base),
- "tosca_resource": "%s/tosca/resources/" % (xos_base),
- "rest_service": "%s/api/service/" % (xos_base),
- "rest_tenant": "%s/api/tenant/" % (xos_base),
- "private_key": "%s/services/%s/keys/" % (xos_base, service_name),
- "public_key": "%s/services/%s/keys/" % (xos_base, service_name),
- "vendor_js": "%s/core/xoslib/static/vendor/" % (xos_base)}
- dest_dir = base_dirs[scr.kind]
-
- return dest_dir
-
- def get_dest_dir(self, scr):
- dest_dir = self.get_base_dest_dir(scr)
-
- if scr.subdirectory:
- dest_dir = os.path.join(dest_dir, scr.subdirectory)
-
- return dest_dir
-
- def get_build_fn(self, scr):
- dest_dir = self.get_dest_dir(scr)
- dest_fn = os.path.split(urlparse.urlsplit(scr.full_url).path)[-1]
- return os.path.join(dest_dir, dest_fn)
-
- def get_download_fn(self, scr):
- dest_fn = self.get_build_fn(scr)
- return os.path.join(self.build_dir, dest_fn)
-
- def read_manifest(self, scr, fn):
- manifest = []
- manifest_lines = file(fn).readlines()
- manifest_lines = [x.strip() for x in manifest_lines]
- manifest_lines = [x for x in manifest_lines if x]
- for line in manifest_lines:
- url_parts = urlparse.urlsplit(scr.full_url)
- new_path = os.path.join(os.path.join(*os.path.split(url_parts.path)[:-1]), line)
- url = urlparse.urlunsplit((url_parts.scheme, url_parts.netloc, new_path, url_parts.query, url_parts.fragment))
-
- build_fn = os.path.join(self.get_dest_dir(scr), line)
- download_fn = os.path.join(self.build_dir, build_fn)
-
- manifest.append((url, download_fn, build_fn))
- return manifest
-
- def download_file(self, url, dest_fn):
- logger.info("Download %s to %s" % (url, dest_fn))
- if not os.path.exists(os.path.dirname(dest_fn)):
- os.makedirs(os.path.dirname(dest_fn))
- obj = urllib2.urlopen(url)
- file(dest_fn, "w").write(obj.read())
-
- # make python files executable
- if dest_fn.endswith(".py"): # and contents.startswith("#!"):
- os.chmod(dest_fn, 0755)
-
- def download_resource(self, scr):
- if scr.format == "manifest":
- manifest_fn = self.get_download_fn(scr)
- self.download_file(scr.full_url, manifest_fn)
- manifest = self.read_manifest(scr, manifest_fn)
- for (url, download_fn, build_fn) in manifest:
- self.download_file(url, download_fn)
- else:
- self.download_file(scr.full_url, self.get_download_fn(scr))
-
-# XXX docker creates a new container and commits it for every single COPY
-# line in the dockerfile. This causes services with many files (for example,
-# vsg) to take ~ 10-15 minutes to build the docker file. So instead we'll copy
-# the whole build directory, and then run a script that copies the files
-# we want.
-
-# def get_docker_lines(self, scr):
-# if scr.format == "manifest":
-# manifest_fn = self.get_download_fn(scr)
-# manifest = self.read_manifest(scr, manifest_fn)
-# lines = []
-# for (url, download_fn, build_fn) in manifest:
-# script.append("mkdir -p
-# #lines.append("COPY %s /%s" % (build_fn, build_fn))
-# return lines
-# else:
-# build_fn = self.get_build_fn(scr)
-# #return ["COPY %s /%s" % (build_fn, build_fn)]
-
-# def get_controller_docker_lines(self, controller, kinds):
-# need_service_init_py = False
-# dockerfile=[]
-# for scr in controller.loadable_module_resources.all():
-# if scr.kind in kinds:
-# lines = self.get_docker_lines(scr)
-# dockerfile = dockerfile + lines
-# if scr.kind in ["admin", "models"]:
-# need_service_init_py = True
-#
-# if need_service_init_py:
-# file(os.path.join(self.build_dir, "opt/xos/empty__init__.py"),"w").write("")
-# dockerfile.append("COPY opt/xos/empty__init__.py /opt/xos/services/%s/__init__.py" % controller.name)
-#
-# return dockerfile
-
- def get_script_lines(self, scr):
- if scr.format == "manifest":
- manifest_fn = self.get_download_fn(scr)
- manifest = self.read_manifest(scr, manifest_fn)
- lines = []
- for (url, download_fn, build_fn) in manifest:
- lines.append("mkdir -p /%s" % os.path.dirname(build_fn))
- lines.append("cp /build/%s /%s" % (build_fn, build_fn))
- return lines
- else:
- build_fn = self.get_build_fn(scr)
- return ["mkdir -p /%s" % os.path.dirname(build_fn),
- "cp /build/%s /%s" % (build_fn, build_fn)]
-
- def get_controller_script_lines(self, controller, kinds):
- need_service_init_py = False
- script = []
- inits = []
- for scr in list(controller.loadable_module_resources.all()):
- if not (scr.kind in kinds):
- continue
-
- # Check and see if the resource we're trying to install has
- # disappeared. This may happen if the onboarding synchronizer
- # container has been destroyed and restarted. In this case, flag
- # the resource for re-download, and set the build_tainted bit
- # so we can throw an exception after we've evaluated all
- # resources.
-
- download_fn = self.get_download_fn(scr)
- if not os.path.exists(download_fn):
- logger.info("File %s is missing; dirtying the resource" % download_fn)
- scr.backend_status = "2 - download_fn is missing"
- scr.updated = timezone.now()
- scr.save(update_fields=['backend_status', 'updated'])
- self.build_tainted = True
- continue
-
- lines = self.get_script_lines(scr)
- script = script + lines
-
- # compute the set of __init__.py files that we will need
- if scr.kind in ["admin", "models", "rest_service", "rest_tenant"]:
- dir = self.get_base_dest_dir(scr)
- add_unique(inits, dir)
-
- if scr.subdirectory:
- for part in scr.subdirectory.split("/"):
- dir = os.path.join(dir, part)
- add_unique(inits, dir)
-
- for init in inits:
- script.append("echo > %s" % os.path.join("/", init, "__init__.py"))
-
- return script
-
- def check_controller_unready(self, controller):
- unready_resources = []
- for scr in controller.loadable_module_resources.all():
- if (not scr.backend_status) or (not scr.backend_status.startswith("1")):
- unready_resources.append(scr)
-
- return unready_resources
-
- # stuff that has to do with building
-
- def create_xos_app_data(self, name, script, app_list, migration_list):
- if not os.path.exists(os.path.join(self.build_dir, "opt/xos/xos")):
- os.makedirs(os.path.join(self.build_dir, "opt/xos/xos"))
-
- if app_list:
- script.append("mkdir -p /opt/xos/xos")
- script.append("cp /build/opt/xos/xos/%s_xosbuilder_app_list /opt/xos/xos/xosbuilder_app_list" % name)
- # dockerfile.append("COPY opt/xos/xos/%s_xosbuilder_app_list /opt/xos/xos/xosbuilder_app_list" % name)
- file(os.path.join(self.build_dir, "opt/xos/xos/%s_xosbuilder_app_list") % name, "w").write("\n".join(app_list)+"\n")
-
- if migration_list:
- script.append("mkdir -p /opt/xos/xos")
- script.append("cp /build/opt/xos/xos/%s_xosbuilder_migration_list /opt/xos/xos/xosbuilder_migration_list" % name)
- # dockerfile.append("COPY opt/xos/xos/%s_xosbuilder_migration_list /opt/xos/xos/xosbuilder_migration_list" % name)
- file(os.path.join(self.build_dir, "opt/xos/xos/%s_xosbuilder_migration_list") % name, "w").write("\n".join(migration_list)+"\n")
-
- def create_ui_dockerfile(self):
- self.build_tainted = False
- xos = XOS.objects.all()[0]
- dockerfile_fn = "Dockerfile.UI"
-
- app_list = []
- migration_list = []
-
- dockerfile = ["FROM %s" % xos.source_ui_image]
- script = []
- for controller in LoadableModule.objects.all():
- if self.check_controller_unready(controller):
- logger.warning("Loadable Module %s has unready resources" % str(controller))
- continue
-
- # dockerfile = dockerfile + self.get_controller_docker_lines(controller, self.UI_KINDS)
- script = script + self.get_controller_script_lines(controller, self.UI_KINDS)
- if controller.loadable_module_resources.filter(kind="models").exists():
- app_list.append("services." + controller.name)
- migration_list.append(controller.name)
-
- self.create_xos_app_data("ui", script, app_list, migration_list)
-
- file(os.path.join(self.build_dir, "install-xos.sh"), "w").write("\n".join(script)+"\n")
- dockerfile.append("COPY . /build/")
- dockerfile.append("RUN bash /build/install-xos.sh")
-
- file(os.path.join(self.build_dir, dockerfile_fn), "w").write("\n".join(dockerfile)+"\n")
-
- if self.build_tainted:
- raise Exception("Build was tainted due to errors")
-
- return {"dockerfile_fn": dockerfile_fn,
- "docker_image_name": "xosproject/xos-ui"}
-
- def create_synchronizer_dockerfile(self, controller):
- self.build_tainted = False
-
- if not controller.loadable_module_resources.filter(kind="synchronizer").exists():
- # it doesn't have a synchronizer, therefore it doesn't need a dockerfile
- return None
-
- # bake in the synchronizer from this controller
- sync_lines = self.get_controller_script_lines(controller, self.SYNC_CONTROLLER_KINDS)
-
- if self.build_tainted:
- raise Exception("Build was tainted due to errors")
-
- # If there's no sync_lines for this ServiceController, then it must not
- # have a synchronizer.
- if not sync_lines:
- return None
-
- dockerfile_fn = "Dockerfile.%s" % controller.name
- dockerfile = ["FROM %s" % controller.xos.source_ui_image]
- script = []
-
- # Now bake in models from this controller as well as the others
- # It's important to bake all services in, because some services'
- # synchronizers may depend on models from another service.
- app_list = []
- for c in LoadableModule.objects.all():
- script = script + self.get_controller_script_lines(c, self.SYNC_ALLCONTROLLER_KINDS)
- if c.loadable_module_resources.filter(kind="models").exists():
- app_list.append("services." + c.name)
-
- self.create_xos_app_data(controller.name, script, app_list, None)
-
- script = script + sync_lines
-
- file(os.path.join(self.build_dir, "install-%s.sh" % controller.name), "w").write("\n".join(script)+"\n")
- dockerfile.append("COPY . /build/")
- dockerfile.append("RUN bash /build/install-%s.sh" % controller.name)
-
- file(os.path.join(self.build_dir, dockerfile_fn), "w").write("\n".join(dockerfile)+"\n")
-
- if self.build_tainted:
- raise Exception("Build was tainted due to errors")
-
- return {"dockerfile_fn": dockerfile_fn,
- "docker_image_name": "xosproject/xos-synchronizer-%s" % controller.name}
-
- def create_docker_compose(self):
- xos = XOS.objects.all()[0]
-
- volume_list = []
- for volume in xos.volumes.all():
- volume_list.append({"host_path": volume.host_path,
- "container_path": volume.container_path,
- "read_only": volume.read_only})
-
- # Force all legacy UI and Synchronizer containers to include
- # the certificate, so that it does not have to be built at build time.
-
- volume_list.append({"host_path": "/opt/cord_profile/im_cert_chain.pem",
- "container_path": "/usr/local/share/ca-certificates/local_certs.crt",
- "read_only": True})
-
- if xos.extra_hosts:
- extra_hosts = [x.strip() for x in xos.extra_hosts.split(",")]
- else:
- extra_hosts = []
-
- networks = [ "xos" ] # docker networks used by XOS, parameterize in the future
-
- containers = {}
-
- external_links = []
- if xos.db_container_name:
- external_links.append("%s:%s" % (xos.db_container_name, "xos_db"))
- if xos.redis_container_name:
- external_links.append("%s:%s" % (xos.redis_container_name, "redis"))
-
- # eventually xos_ui will go away, and only xos_core shall remain.
-
- containers["xos_ui"] = {
- "image": xos.dest_ui_image,
- "command": "python /opt/xos/manage.py runserver 0.0.0.0:%d --insecure --makemigrations" % xos.ui_port,
- "networks": networks,
- "ports": {"%d" % xos.ui_port: "%d" % xos.ui_port},
- "external_links": external_links,
- "extra_hosts": extra_hosts,
- "volumes": volume_list}
-
- # The core needs access to docker so it can restart Chameleon
- core_volume_list = volume_list + [{"host_path": "/var/run/docker.sock", "container_path": "/var/run/docker.sock", "read_only": False}]
-
- containers["xos_core"] = {
- "image": xos.dest_ui_image,
- "command": 'bash -c "cd coreapi; bash ./start_coreapi.sh"',
- "networks": networks,
- "ports": {"50055": "50055", "50051" : "50051"},
- "external_links": external_links,
- "extra_hosts": extra_hosts,
- "volumes": core_volume_list}
-
- if xos.no_start:
- containers["xos_ui"]["command"] = "sleep 864000"
- containers["xos_core"]["command"] = "sleep 864000"
-
- # creating Component containers
- for c in XOSComponent.objects.all():
-
- # create internal and external links list
- component_links = []
- component_external_links = []
- for l in c.links.all():
- if l.kind == 'internal':
- component_links.append("%s:%s" % (l.container, l.alias))
- elif l.kind == 'external':
- component_external_links.append("%s:%s" % (l.container, l.alias))
-
- # creating volumes list
- component_volume_list = []
- for volume in c.volumes.all():
- component_volume_list.append({"host_path": volume.host_path,
- "container_path": volume.container_path,
- "read_only": volume.read_only})
-
- # creating containervolumes list
- component_containervolume_list = []
- for volume in c.volumecontainers.all():
- component_containervolume_list.append(volume.container)
-
- if c.ports:
- port = c.ports.split(":")
- ports = {
- port[0]: port[1]
- }
- else:
- ports = {}
-
- containers[c.name] = {
- "image": c.image,
- "command": c.command,
- "networks": networks,
- "ports": ports,
- "links": component_links,
- "external_links": component_external_links,
- "volumes": component_volume_list,
- "volumes_from": component_containervolume_list,
- }
-
- if c.no_start:
- containers[c.name]["command"] = "sleep 864000"
-
- if not xos.frontend_only:
- for c in ServiceController.objects.all():
- if self.check_controller_unready(c):
- logger.warning("Controller %s has unready resources" % str(c))
- continue
-
- if c.no_deploy:
- logger.info("Controller %s has no_deploy set" % str(c))
-
- if c.no_build and c.image:
- # refactored synchronizer containers
-
- nb_volume_list=[{"host_path": "/opt/cord_profile/node_key",
- "container_path": "/opt/cord_profile/node_key",
- "read_only": True},
- {"host_path": "/opt/cord/build/platform-install/credentials/xosadmin@opencord.org",
- "container_path": "/opt/xos/services/%s/credentials/xosadmin@opencord.org" % c.name,
- "read_only": True},
- {"host_path": "/opt/cord_profile/im_cert_chain.pem",
- "container_path": "/usr/local/share/ca-certificates/local_certs.crt",
- "read_only": True}]
-
- # keys inside onboarding sync are relative to /opt/xos/key_import
- # keys outside onboarding sync are relative to /opt/cord_profile
- if os.path.exists("/opt/xos/key_import/%s_rsa" % c.name):
- nb_volume_list.append({"host_path": "/opt/cord_profile/key_import/%s_rsa" % c.name, # Note: not all services have keys
- "container_path": "/opt/xos/services/%s/keys/%s_rsa" % (c.name, c.name),
- "read_only": True})
-
- if c.name.lower() == "vtr":
- # VTR is special -- it has the vSG's private key
- # TODO: If docker-compose autogenerate remains a feature,
- # then replace this special-purposing with a general-
- # purpose mechanism for sharing private keys with
- # trusted services.
- nb_volume_list.append({"host_path": "/opt/cord_profile/key_import/vsg_rsa",
- "container_path": "/opt/xos/services/%s/keys/vsg_rsa" % c.name,
- "read_only": True})
-
- if c.name.lower() == "openstack":
- # OpenStack is special -- it needs to onboard images to glance
- # TODO: If docker-compose autogenerate remains a feature,
- # then replace this special-purposing with a general-
- # purpose mechanism allowing volume mounts to specific
- # containers.
- nb_volume_list.append({"host_path": "/opt/cord_profile/images",
- "container_path": "/opt/xos/images",
- "read_only": True})
-
-
- nb_external_links = []
- if xos.redis_container_name:
- nb_external_links.append("%s:%s" % (xos.redis_container_name, "redis"))
-
- containers["%s-synchronizer" % c.name] = {
- "image": c.image,
- "volumes": nb_volume_list,
- "external_links": nb_external_links,
- "networks": networks}
-
- elif c.loadable_module_resources.filter(kind="synchronizer").exists():
- # old-style synchronizer containers
- if c.synchronizer_run and c.synchronizer_config:
- command = 'bash -c "sleep 120; update-ca-certificates; cd /opt/xos/synchronizers/%s; python ./%s -C %s"' % (c.name, c.synchronizer_run, c.synchronizer_config)
- else:
- command = 'bash -c "sleep 120; update-ca-certificates; cd /opt/xos/synchronizers/%s; bash ./run.sh"' % c.name
-
- containers["xos_synchronizer_%s" % c.name] = {
- "image": "xosproject/xos-synchronizer-%s" % c.name,
- "command": command,
- "networks": networks,
- "external_links": external_links,
- "extra_hosts": extra_hosts,
- "volumes": volume_list
- }
-
- if c.no_start:
- containers["xos_synchronizer_%s" % c.name]["command"] = "sleep 864000"
-
- vars = {"containers": containers, "networks": networks }
-
- template_loader = jinja2.FileSystemLoader("/opt/xos/synchronizers/onboarding/templates/")
- template_env = jinja2.Environment(loader=template_loader)
- template = template_env.get_template("docker-compose.yml.j2")
- buffer = template.render(vars)
-
- if not os.path.exists("/opt/xos/synchronizers/onboarding/docker-compose"):
- os.makedirs("/opt/xos/synchronizers/onboarding/docker-compose")
- file("/opt/xos/synchronizers/onboarding/docker-compose/docker-compose.yml", "w").write(buffer)
-
-# def build_xos(self):
-# dockerfiles=[]
-# dockerfiles.append(self.create_ui_dockerfile())
-#
-# for controller in ServiceController.objects.all():
-# dockerfiles.append(self.create_synchronizer_dockerfile(controller))