Merge branch 'feature/bootstrap' into develop
diff --git a/Dockerfile b/Dockerfile
deleted file mode 100644
index b69b7fd..0000000
--- a/Dockerfile
+++ /dev/null
@@ -1,144 +0,0 @@
-FROM ubuntu:14.04.3
-MAINTAINER Andy Bavier <acb@cs.princeton.edu>
-
-# XXX Workaround for docker bug:
-# https://github.com/docker/docker/issues/6345
-# Kernel 3.15 breaks docker, uss the line below as a workaround
-# until there is a fix
-RUN ln -s -f /bin/true /usr/bin/chfn
-# XXX End workaround
-
-# Install.
-RUN apt-get update && apt-get install -y \
- git \
- postgresql \
- python-psycopg2 \
- graphviz \
- graphviz-dev \
- libxslt1.1 \
- libxslt1-dev \
- python-pip \
- tar \
- gcc \
- python-httplib2 \
- geoip-database \
- libgeoip1 \
- wget \
- curl \
- python-dev \
- libyaml-dev \
- pkg-config \
- python-pycurl
-
-RUN pip install django==1.7
-RUN pip install djangorestframework==2.4.4
-RUN pip install markdown # Markdown support for the browseable API.
-RUN pip install pyyaml # YAML content-type support.
-RUN pip install django-filter # Filtering support
-RUN pip install lxml # XML manipulation library
-RUN pip install netaddr # IP Addr library
-RUN pip install pytz
-RUN pip install django-timezones
-RUN pip install requests
-RUN pip install django-crispy-forms
-RUN pip install django-geoposition
-RUN pip install django-extensions
-RUN pip install django-suit
-RUN pip install django-bitfield
-RUN pip install django-ipware
-RUN pip install django-encrypted-fields
-RUN pip install python-keyczar
-RUN pip install pygraphviz
-RUN pip install dnslib
-
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-keystoneclient
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-novaclient
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-neutronclient
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-glanceclient
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-ceilometerclient
-
-RUN pip install django_rest_swagger
-
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-setuptools
-RUN easy_install django_evolution
-RUN easy_install python_gflags
-RUN easy_install --upgrade httplib2
-RUN easy_install google_api_python_client
-RUN easy_install httplib2.ca_certs_locater
-
-# Install custom Ansible
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-crypto
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-yaml
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y openssh-client
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-paramiko
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-jinja2
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-httplib2
-RUN git clone -b release1.8.2 git://github.com/ansible/ansible.git /opt/ansible
-RUN git clone -b release1.8.2 git://github.com/ansible/ansible-modules-extras.git /opt/ansible/lib/ansible/modules/extras
-RUN git clone -b release1.8.2 git://github.com/ansible/ansible-modules-extras.git /opt/ansible/v2/ansible/modules/extras
-RUN git clone git://github.com/sb98052/ansible-modules-core.git /opt/ansible/lib/ansible/modules/core
-RUN git clone git://github.com/sb98052/ansible-modules-core.git /opt/ansible/v2/ansible/modules/core
-ADD ansible-hosts /etc/ansible/hosts
-
-ADD http://code.jquery.com/jquery-1.9.1.min.js /usr/local/lib/python2.7/dist-packages/suit/static/suit/js/
-
-# For Observer
-RUN git clone git://git.planet-lab.org/fofum.git /tmp/fofum
-RUN cd /tmp/fofum; python setup.py install
-RUN rm -rf /tmp/fofum
-
-RUN mkdir -p /usr/local/share /bin
-ADD http://phantomjs.googlecode.com/files/phantomjs-1.7.0-linux-x86_64.tar.bz2 /usr/local/share/
-RUN tar jxvf /usr/local/share/phantomjs-1.7.0-linux-x86_64.tar.bz2 -C /usr/local/share/
-RUN rm -f /usr/local/share/phantomjs-1.7.0-linux-x86_64.tar.bz2
-RUN ln -s /usr/local/share/phantomjs-1.7.0-linux-x86_64 /usr/local/share/phantomjs
-RUN ln -s /usr/local/share/phantomjs/bin/phantomjs /bin/phantomjs
-
-# Supervisor
-RUN DEBIAN_FRONTEND=noninteractive apt-get install -y supervisor
-ADD observer.conf /etc/supervisor/conf.d/
-
-# Get XOS
-ADD xos /opt/xos
-
-# Initscript is broken in Ubuntu
-#ADD observer-initscript /etc/init.d/xosobserver
-
-RUN chmod +x /opt/xos/scripts/opencloud
-RUN /opt/xos/scripts/opencloud genkeys
-
-# Workaround for AUFS issue
-# https://github.com/docker/docker/issues/783#issuecomment-56013588
-RUN mkdir /etc/ssl/private-copy; mv /etc/ssl/private/* /etc/ssl/private-copy/; rm -r /etc/ssl/private; mv /etc/ssl/private-copy /etc/ssl/private; chmod -R 0700 /etc/ssl/private; chown -R postgres /etc/ssl/private
-
-# Set postgres password to match default value in settings.py
-RUN service postgresql start; sudo -u postgres psql -c "alter user postgres with password 'password';"
-
-# Turn DEBUG on so that devel server will serve static files
-# (not necessary if --insecure is passed to 'manage.py runserver')
-# RUN sed -i 's/DEBUG = False/DEBUG = True/' /opt/xos/xos/settings.py
-
-# Cruft to workaround problems with migrations, should go away...
-RUN /opt/xos/scripts/opencloud remigrate
-
-# git clone uses cached copy, doesn't pick up latest
-RUN git -C /opt/ansible pull
-RUN git -C /opt/ansible/lib/ansible/modules/core pull
-RUN git -C /opt/ansible/v2/ansible/modules/core pull
-
-# install Tosca engine
-RUN apt-get install -y m4
-RUN pip install python-dateutil
-RUN bash /opt/xos/tosca/install_tosca.sh
-
-EXPOSE 8000
-
-# Set environment variables.
-ENV HOME /root
-
-# Define working directory.
-WORKDIR /root
-
-# Define default command.
-#CMD ["/bin/bash"]
-CMD /opt/xos/scripts/docker_start_xos
diff --git a/Dockerfile.cord b/Dockerfile.cord
deleted file mode 100644
index ee0879d..0000000
--- a/Dockerfile.cord
+++ /dev/null
@@ -1,7 +0,0 @@
-FROM xos:latest
-MAINTAINER Andy Bavier <acb@cs.princeton.edu>
-
-ADD xos/observers/vcpe/supervisor/vcpe-observer.conf /etc/supervisor/conf.d/
-RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/observers/vcpe/vcpe_observer_config
-ADD xos/observers/monitoring_channel/supervisor/monitoring_channel_observer.conf /etc/supervisor/conf.d/
-RUN sed -i 's/proxy_ssh=True/proxy_ssh=False/' /opt/xos/observers/monitoring_channel/monitoring_channel_observer_config
diff --git a/README.Docker b/README.Docker
deleted file mode 100644
index 0076bca..0000000
--- a/README.Docker
+++ /dev/null
@@ -1,49 +0,0 @@
-The Dockerfile in this directory will build a Docker image for running
-XOS using the Django development server. It copies whatever files are
-in the local repository into the image. Here's how to do it:
-
-1. A minimal initial_data.json is provided. The login credentials
- for this initial_data.json are username=padmin@vicci.org,
- password=letmein.
-
- This initial_data.json doesn't contain any nodes and is suitable
- for fresh installations. To obtain an initial_data.json (for demo
- purposes) that contains an interesting set of Nodes and Slices,
- a dump can be made on portal.opencloud.us:
-
- 1) log in to portal, and run:
- $ sudo /opt/xos/scripts/opencloud dumpdata
-
- 2) replace the initial_data.json file with the dumpdata
- file produced above.
-
-2. $ docker build -t xos .
-
-3. $ docker run -t -i -p 8000:8000 xos
-
-4. Now you will have a bash prompt as root inside the XOS container.
- Start up XOS:
-
- # /opt/xos/scripts/opencloud runserver
-
-You can access the XOS login at http:<server>:8000, where <server> is
-the name of the server running Docker.
-
-5. From another terminal window, you can run following command to find
-the running container id
-
- $ docker ps
- CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
- a3b668454d21 xos:latest "/bin/bash" 3 hours ago Up 3 hours 0.0.0.0:8000->8000/tcp romantic_bohr
-
-and then you can have another bash prompt (in a different TTY) as root inside the XOS container.
-
- $ docker exec -it a3b668454d21 bash
-
-and start observer
-
- # python /opt/xos/xos-observer.py
-
-STILL TO DO
------------
-* Test Observer
diff --git a/cloudlab-init.sh b/cloudlab-init.sh
deleted file mode 100755
index 9a2c94a..0000000
--- a/cloudlab-init.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/bash
-set -x
-
-# This script assumes that it is being run on the ctl node of the OpenStack
-# profile on CloudLab.
-
-XOS="http://ctl:9999/"
-AUTH="padmin@vicci.org:letmein"
-CORD=0
-IMAGE="xos"
-
-# Create public key if none present
-[ -e ~/.ssh/id_rsa ] || cat /dev/zero | ssh-keygen -q -N ""
-
-# Install Docker
-which docker > /dev/null || wget -qO- https://get.docker.com/ | sh
-sudo usermod -aG docker $(whoami)
-
-sudo apt-get -y install httpie
-
-if [ "$CORD" -ne 0 ]
-then
- cp ~/.ssh/id_rsa.pub xos/observers/vcpe/vcpe_public_key
- cp ~/.ssh/id_rsa xos/observers/vcpe/vcpe_private_key
- cp ~/.ssh/id_rsa.pub xos/observers/monitoring_channel/monitoring_channel_public_key
- cp ~/.ssh/id_rsa xos/observers/monitoring_channel/monitoring_channel_private_key
-fi
-
-sudo docker build -t xos .
-
-if [ "$CORD" -ne 0 ]
-then
- sudo docker build -t cord -f Dockerfile.cord .
- IMAGE="cord"
-fi
-
-# OpenStack is using port 8000...
-MYIP=$( hostname -i )
-MYFLATLANIF=$( sudo bash -c "netstat -i" |grep flat|awk '{print $1}' )
-MYFLATLANIP=$( ifconfig $MYFLATLANIF | grep "inet addr" | awk -F: '{print $2}' | awk '{print $1}' )
-sudo docker run -d --add-host="ctl:$MYIP" -p 9999:8000 $IMAGE
-
-echo "Waiting for XOS to come up"
-until http $XOS &> /dev/null
-do
- sleep 1
-done
-
-# Copy public key
-# BUG: Shouldn't have to set the 'enacted' field...
-PUBKEY=$( cat ~/.ssh/id_rsa.pub )
-http --auth $AUTH PATCH $XOS/xos/users/1/ public_key="$PUBKEY" enacted=$( date "+%Y-%m-%dT%T")
-
-# Set up controller
-sudo cp /root/setup/admin-openrc.sh /tmp
-sudo chmod a+r /tmp/admin-openrc.sh
-#sudo sed -i 's/:5000/:35357/' /tmp/admin-openrc.sh
-source /tmp/admin-openrc.sh
-
-if [ "$CORD" -ne 1 ]
-then
- http --auth $AUTH POST $XOS/xos/controllers/ name=CloudLab deployment=$XOS/xos/deployments/1/ backend_type=OpenStack version=Kilo auth_url=$OS_AUTH_URL admin_user=$OS_USERNAME admin_tenant=$OS_TENANT_NAME admin_password=$OS_PASSWORD domain=Default
-else
- sudo cp /root/setup/settings /tmp
- sudo chmod a+r /tmp/settings
- source /tmp/settings
- source /tmp/admin-openrc.sh
- http --auth $AUTH POST $XOS/xos/controllers/ name=CloudLab deployment=$XOS/xos/deployments/1/ backend_type=OpenStack version=Kilo auth_url=$OS_AUTH_URL admin_user=$OS_USERNAME admin_tenant=$OS_TENANT_NAME admin_password=$OS_PASSWORD domain=Default rabbit_host=$MYFLATLANIP rabbit_user=$RABBIT_USER rabbit_password=$RABBIT_PASS
-fi
-
-# Add controller to site
-http --auth $AUTH PATCH $XOS/xos/sitedeployments/1/ controller=$XOS/xos/controllers/1/
-
-# Add image
-http --auth $AUTH POST $XOS/xos/images/ name=trusty-server-multi-nic disk_format=QCOW2 container_format=BARE
-
-# Activate image
-http --auth $AUTH POST $XOS/xos/imagedeploymentses/ deployment=$XOS/xos/deployments/1/ image=$XOS/xos/images/1/
-
-# Add node
-NODES=$( sudo bash -c "source /root/setup/admin-openrc.sh ; nova hypervisor-list" |grep cloudlab|awk '{print $4}' )
-for NODE in $NODES
-do
- http --auth $AUTH POST $XOS/xos/nodes/ name=$NODE site_deployment=$XOS/xos/sitedeployments/1/
-done
-
-# Modify networktemplate/2
-# BUG: Shouldn't have to set the controller_kind field, it's invalid in the initial fixture
-FLATNET=$( sudo bash -c "source /root/setup/admin-openrc.sh ; neutron net-list" |grep flat|awk '{print $4}' )
-http --auth $AUTH PATCH $XOS/xos/networktemplates/2/ shared_network_name=$FLATNET controller_kind=""
-
-if [ "$CORD" -ne 0 ]
-then
- DOCKER=$( sudo docker ps|grep $IMAGE|awk '{print $NF}' )
- sudo docker exec $DOCKER bash -c "cd /opt/xos/tosca; python run.py padmin@vicci.org samples/cord-cloudlab.yaml; python run.py padmin@vicci.org samples/ceilometer.yaml"
-fi
diff --git a/containers/README b/containers/README
new file mode 100644
index 0000000..9891efe
--- /dev/null
+++ b/containers/README
@@ -0,0 +1,51 @@
+
+1. Introduction
+
+ XOS is comprised of 3 core services:
+
+ * A database backend (postgres)
+ * A webserver front end (django)
+ * A synchronizer daemon that interacts with the openstack backend.
+
+ We have created separate dockerfiles for each of these services, making it easier to
+ build the services independently and also deploy and run them in isolated environments.
+
+2. Database Container
+
+ To build and run the database container:
+
+ $ cd postgres; make build && make run;
+
+3. XOS container
+
+ To build and run the xos webserver container:
+
+ $ cd xos; make build && make run;
+
+ You should now be able to access the login page by visiting http://localhost:80 and
+ log in using the default paadmin account. It may be helpful to bootstrap xos with
+ some sample data; deployment, controllers, sites, slices, etc. You can get started by
+ loading tosca configuration for the opencloud demo dataset:
+
+ $ cd xos; make runtosca;
+
+ Or you can create you own tosca configuraton file and customize the dataset however you
+ want. You can all load your own tosca configuration by setting the TOSCA_CONFIG_PATH
+ environment variable before executing the make command:
+
+ $ cd xos; TOSCA_CONFIG_PATH=/path/to/tosca/config.yaml make runtosca
+
+4. Synchronizer container
+
+ The syncornonizer shares many of the same dependencies as the xos container. The synchronizer
+ container takes advantage of this by building itself on top of the xos image. This means
+ you must build the xos image before building the synchronizer image. The XOS and
+ synchronizer containers can run on separate hosts, but you must build the xos image
+ on the host that you plan to run the synchronizer container. Assuming you have already
+ built the xos container, executing the following will build and run the synchronizer container:
+
+ $ cd synchronizer; make build && make run
+
+
+
+
diff --git a/containers/observer/Makefile b/containers/observer/Makefile
deleted file mode 100644
index e7fedf5..0000000
--- a/containers/observer/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-CONTAINER_NAME:=observer-server
-
-.PHONY: build
-build: ; docker build --rm -t observer .
-
-.PHONY: run
-run: ; docker run -d --name ${CONTAINER_NAME} observer
-
-.PHONY: stop
-stop: ; docker stop ${CONTAINER_NAME}
-
-.PHONY: rm
-rm: ; docker rm ${CONTAINER_NAME}
diff --git a/containers/observer/Dockerfile b/containers/synchronizer/Dockerfile
similarity index 87%
rename from containers/observer/Dockerfile
rename to containers/synchronizer/Dockerfile
index 7ec6592..44b058e 100644
--- a/containers/observer/Dockerfile
+++ b/containers/synchronizer/Dockerfile
@@ -25,7 +25,7 @@
# For Observer
RUN mkdir -p /usr/local/share /bin /etc/ansible
-RUN cp /tmp/xos/containers/observer/conf/ansible-hosts /etc/ansible/hosts
+RUN cp /tmp/xos/containers/synchronizer/conf/ansible-hosts /etc/ansible/hosts
ADD http://phantomjs.googlecode.com/files/phantomjs-1.7.0-linux-x86_64.tar.bz2 /usr/local/share/
@@ -39,6 +39,6 @@
# Supervisor
-RUN cp /tmp/xos/containers/observer/conf/observer.conf /etc/supervisor/conf.d/
+RUN cp /tmp/xos/containers/synchronizer/conf/synchronizer.conf /etc/supervisor/conf.d/
-CMD /usr/bin/supervisord -c /etc/supervisor/conf.d/observer.conf
+CMD /usr/bin/supervisord -c /etc/supervisor/conf.d/synchronizer.conf
diff --git a/containers/synchronizer/Makefile b/containers/synchronizer/Makefile
new file mode 100644
index 0000000..14520d9
--- /dev/null
+++ b/containers/synchronizer/Makefile
@@ -0,0 +1,13 @@
+CONTAINER_NAME:=synchronizer-server
+
+.PHONY: build
+build: ; docker build --rm -t synchronizer .
+
+.PHONY: run
+run: ; docker run -d --name ${CONTAINER_NAME} synchronizer
+
+.PHONY: stop
+stop: ; docker stop ${CONTAINER_NAME}
+
+.PHONY: rm
+rm: ; docker rm ${CONTAINER_NAME}
diff --git a/containers/observer/conf/ansible-hosts b/containers/synchronizer/conf/ansible-hosts
similarity index 100%
rename from containers/observer/conf/ansible-hosts
rename to containers/synchronizer/conf/ansible-hosts
diff --git a/containers/observer/conf/observer.conf b/containers/synchronizer/conf/synchronizer.conf
similarity index 62%
rename from containers/observer/conf/observer.conf
rename to containers/synchronizer/conf/synchronizer.conf
index 48f61dd..cda6716 100644
--- a/containers/observer/conf/observer.conf
+++ b/containers/synchronizer/conf/synchronizer.conf
@@ -3,7 +3,7 @@
pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
nodaemon=true
-[program:observer]
+[program:synchronizer]
command=python /opt/xos/xos-observer.py
-stderr_logfile=/var/log/supervisor/observer.err.log
-stdout_logfile=/var/log/supervisor/observer.out.log
+stderr_logfile=/var/log/supervisor/synchronizer.err.log
+stdout_logfile=/var/log/supervisor/synchronizer.out.log
diff --git a/xos/ceilometer/admin.py b/xos/ceilometer/admin.py
index 987877c..3ed70b2 100644
--- a/xos/ceilometer/admin.py
+++ b/xos/ceilometer/admin.py
@@ -81,6 +81,18 @@
form = MonitoringChannelForm
suit_form_tabs = (('general','Details'),)
+ actions=['delete_selected_objects']
+
+ def get_actions(self, request):
+ actions = super(MonitoringChannelAdmin, self).get_actions(request)
+ if 'delete_selected' in actions:
+ del actions['delete_selected']
+ return actions
+
+ def delete_selected_objects(self, request, queryset):
+ for obj in queryset:
+ obj.delete()
+ delete_selected_objects.short_description = "Delete Selected MonitoringChannel Objects"
def queryset(self, request):
return MonitoringChannel.get_tenant_objects_by_user(request.user)
diff --git a/xos/ceilometer/models.py b/xos/ceilometer/models.py
index a838c4e..e149eb5 100644
--- a/xos/ceilometer/models.py
+++ b/xos/ceilometer/models.py
@@ -29,7 +29,7 @@
sync_attributes = ("private_ip", "private_mac",
"ceilometer_ip", "ceilometer_mac",
- "nat_ip", "nat_mac",)
+ "nat_ip", "nat_mac", "ceilometer_port",)
default_attributes = {}
def __init__(self, *args, **kwargs):
@@ -37,6 +37,11 @@
if ceilometer_services:
self._meta.get_field("provider_service").default = ceilometer_services[0].id
super(MonitoringChannel, self).__init__(*args, **kwargs)
+ self.set_attribute("use_same_instance_for_multiple_tenants", True)
+
+ def can_update(self, user):
+ #Allow creation of this model instances for non-admin users also
+ return True
def save(self, *args, **kwargs):
if not self.creator:
@@ -62,7 +67,7 @@
@property
def addresses(self):
- if not self.instance:
+ if (not self.id) or (not self.instance):
return {}
addresses = {}
@@ -121,6 +126,12 @@
for cs in slice.controllerslices.all():
if cs.tenant_id:
tenant_ids.add(cs.tenant_id)
+ if self.creator.is_admin:
+ #TODO: Ceilometer publishes the SDN meters without associating to any tenant IDs.
+ #For now, ceilometer code is changed to pusblish all such meters with tenant
+ #id as "default_admin_tenant". Here add that default tenant as authroized tenant_id
+ #for all admin users.
+ tenant_ids.add("default_admin_tenant")
return tenant_ids
@property
@@ -132,10 +143,17 @@
return ", ".join(self.tenant_list)
@property
+ def ceilometer_port(self):
+ # TODO: Find a better logic to choose unique ceilometer port number for each instance
+ if not self.id:
+ return None
+ return 8888+self.id
+
+ @property
def ceilometer_url(self):
if not self.ceilometer_ip:
return None
- return "http://" + self.private_ip + ":8888/"
+ return "http://" + self.private_ip + ":" + str(self.ceilometer_port) + "/"
def model_policy_monitoring_channel(pk):
# TODO: this should be made in to a real model_policy
diff --git a/xos/configurations/bash/Makefile.inside b/xos/configurations/bash/Makefile.inside
index 30bf6d6..176ef47 100644
--- a/xos/configurations/bash/Makefile.inside
+++ b/xos/configurations/bash/Makefile.inside
@@ -2,5 +2,6 @@
setup_xos:
bash /opt/xos/scripts/docker_setup_xos
+ python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab.yaml
python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab-nodes.yaml
diff --git a/xos/configurations/common/Dockerfile.common b/xos/configurations/common/Dockerfile.common
index 03b479a..d78585a 100644
--- a/xos/configurations/common/Dockerfile.common
+++ b/xos/configurations/common/Dockerfile.common
@@ -43,7 +43,7 @@
RUN pip install django-crispy-forms
RUN pip install django-geoposition
RUN pip install django-extensions
-RUN pip install django-suit
+RUN pip install django-suit==0.3a1
RUN pip install django-bitfield
RUN pip install django-ipware
RUN pip install django-encrypted-fields
@@ -60,7 +60,6 @@
RUN pip install django_rest_swagger
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y python-setuptools
-RUN easy_install django_evolution
RUN easy_install python_gflags
RUN easy_install --upgrade httplib2
RUN easy_install google_api_python_client
diff --git a/xos/configurations/common/Makefile.cloudlab b/xos/configurations/common/Makefile.cloudlab
index 43e7497..6e609d7 100644
--- a/xos/configurations/common/Makefile.cloudlab
+++ b/xos/configurations/common/Makefile.cloudlab
@@ -13,6 +13,7 @@
flat_name:
sudo bash -c "source /root/setup/admin-openrc.sh ; neutron net-list" |grep flat|awk '{printf "%s",$$4}' > flat_net_name
+ [ -s flat_net_name ] # throw error if flat_net_name is empty
nodes_yaml:
bash ./make-cloudlab-nodes-yaml.sh
diff --git a/xos/configurations/common/fixtures.yaml b/xos/configurations/common/fixtures.yaml
new file mode 100644
index 0000000..c5e9dd1
--- /dev/null
+++ b/xos/configurations/common/fixtures.yaml
@@ -0,0 +1,23 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Some basic fixtures
+
+imports:
+ - custom_types/xos.yaml
+
+topology_template:
+ node_templates:
+ s_tag:
+ type: tosca.nodes.NetworkParameterType
+
+ c_tag:
+ type: tosca.nodes.NetworkParameterType
+
+ next_hop:
+ type: tosca.nodes.NetworkParameterType
+
+ device:
+ type: tosca.nodes.NetworkParameterType
+
+ bridge:
+ type: tosca.nodes.NetworkParameterType
diff --git a/xos/configurations/cord/Makefile b/xos/configurations/cord/Makefile
index 4c2e423..86b4ca1 100644
--- a/xos/configurations/cord/Makefile
+++ b/xos/configurations/cord/Makefile
@@ -2,7 +2,7 @@
RUNNING_CONTAINER:=$(shell sudo docker ps|grep "xos"|awk '{print $$NF}')
LAST_CONTAINER=$(shell sudo docker ps -l -q)
-test: common_cloudlab ceilometer_dashboard
+cord: common_cloudlab ceilometer_dashboard
echo "# Autogenerated -- do not edit" > Dockerfile
cat ../common/Dockerfile.common Dockerfile.cord >> Dockerfile
cd ../../..; sudo docker build -t xos -f xos/configurations/cord/Dockerfile .
diff --git a/xos/configurations/cord/Makefile.inside b/xos/configurations/cord/Makefile.inside
index c412f25..b8e23f8 100644
--- a/xos/configurations/cord/Makefile.inside
+++ b/xos/configurations/cord/Makefile.inside
@@ -2,6 +2,7 @@
setup_xos:
bash /opt/xos/scripts/docker_setup_xos
+ python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab.yaml
python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/cloudlab-nodes.yaml
python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord/cord.yaml
diff --git a/xos/configurations/cord/README.md b/xos/configurations/cord/README.md
index 4d64953..e1b2d2e 100644
--- a/xos/configurations/cord/README.md
+++ b/xos/configurations/cord/README.md
@@ -43,13 +43,17 @@
To get started on CloudLab:
* Create an experiment using the *OpenStack-CORD* profile. (You can also use the *OpenStack* profile, but choose *Kilo*
-and disable security groups.)
+with two compute nodes and disable security groups.)
* Wait until you get an email from CloudLab with title "OpenStack Instance Finished Setting Up".
* Login to the *ctl* node of your experiment and run:
```
-$ git clone https://github.com/open-cloud/xos.git
-$ cd xos/xos/configurations/cord/
-$ make
+ctl:~$ git clone https://github.com/open-cloud/xos.git
+ctl:~$ cd xos/xos/configurations/cord/
+```
+Edit `cord.yaml` in this directory. Change the hostnames `cp-1.devel.xos-pg0.clemson.cloudlab.us` and
+`cp-2.devel.xos-pg0.clemson.cloudlab.us` to the names of the compute nodes in your experiment. Now run:
+```
+ctl:~/xos/xos/configurations/cord$ make
```
Running `make` in this directory creates the XOS Docker container and runs the TOSCA engine with `cord.yaml` to
@@ -66,12 +70,67 @@
ONOS app for the vCPE. To verify that it has received an IP address mapping, look at the **Routeable subnet:** field in
the appropriate *Vbng tenant* object in XOS. It should contain an IP address in the 10.254.0.0/24 subnet.
+After launching the ONOS apps, it is necessary to configure software switches along the dataplane so that ONOS can control
+them. To do this, from the `cord` configuration directory:
+```
+ctl:~/xos/xos/configurations/cord$ cd dataplane/
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-inventory.sh > hosts
+ctl:~/xos/xos/configurations/cord/dataplane$ ansible-playbook -i hosts dataplane.yaml
+```
+
+To setup the dataplane for containers on bare metal, perform these steps in addition to the above (note: make sure to sudo when running the playbook):
+```
+ctl:~/xos/xos/configurations/cord/dataplane$ ./generate-bm.sh > hosts-bm
+ctl:~/xos/xos/configurations/cord/dataplane$ sudo ansible-playbook -i hosts-bm dataplane-bm.yaml
+```
+
+Check that the vCPE container has started, by going into the XOS UI, selecting 'Services', 'service_vcpe', 'Administration', 'Vcpe Tenants', and make sure there's a green icon next to the vCPE.
+
+If the vCPE Tenant is still red, then the Instance could be exponentially backed-off due to errors while trying to sync before dataplane.yaml was run. You can reset the exponential backoff by tracking down the vCPE Instance (Slices->mysite_vcpe->Instances, and find the Instance associated with the vCPE Tenant) and hitting the save button.
+
+Currently the vOLT switch is not forwarding ARP and so it is necessary to set up ARP mappings between the client
+and vCPE. Log into the client and add an ARP entry for the vCPE:
+```
+client:$ sudo arp -s 192.168.0.1 <mac-of-eth1-in-vCPE-container>
+```
+Inside the vCPE container add a similar entry for the client:
+```
+vcpe:$ arp -s 192.168.0.2 <mac-of-br-sub-on-client>
+```
+
+Now SSH into ONOS running the OLT app (see below) and activate the subscriber:
+```
+onos> add-subscriber-access of:0000000000000001 1 432
+```
+
+At this point you should be able to ping 192.168.0.1 from the client. The final step is to set the
+vCPE as the gateway on the client:
+```
+client:$ sudo route del default gw 10.11.10.5
+client:$ sudo route add default gw 192.168.0.1
+```
+The client should now be able to surf the Internet through the dataplane.
+
+## Setting up /etc/hosts
+
+To make it easy to log into the various VMs that make up the dataplane, add entries for them into `/etc/hosts` on the
+*ctl* node. As root, run:
+```
+ctl:~/xos/xos/configurations/cord/dataplane$ ./gen-etc-hosts.sh >> /etc/hosts
+```
+For example, to log into the client:
+```
+ctl:~$ ssh ubuntu@client
+```
+
## How to log into ONOS
-The ONOS Docker container runs in the VMs belonging to the *mysite_onos* slice. All ports exposed by the ONOS container are forwarded to the outside, and can be accessed from the *ctl* node using the `flat-lan-1-net` address of the hosting VM. For example, if the IP addresss of the VM is 10.11.10.30, then it is possible to SSH to ONOS as follows (password is *karaf*):
+ONOS apps are run inside Docker containers hosted in VMs. All ports exposed by the ONOS container are forwarded to the
+outside, and can be accessed from the *ctl* node over the `flat-lan-1-net` network. Assuming that `/etc/hosts`
+has been configured as described above, it is possible to SSH to the ONOS running the `virtualbng` app as follows (password is *karaf*):
```
-$ ssh -p 8101 karaf@10.11.10.30
+$ ssh -p 8101 karaf@onos_vbng
Password authentication
Password:
Welcome to Open Network Operating System (ONOS)!
@@ -95,3 +154,9 @@
Private IP - Public IP
10.0.1.3 - 10.254.0.129
```
+
+## Troubleshooting
+
+#### Problem: No external connectivity from vCPE container
+1. Make sure the hosts listed in `virtualbng.json` are the actual compute nodes used in your experiment.
+2. Try rebooting the ONOS container running the `virtualbng` app: `$ ssh ubuntu@onos-vbng "sudo docker restart ONOS"`
diff --git a/xos/configurations/cord/ceilometer_vcpe_notification_agent.tar.gz b/xos/configurations/cord/ceilometer_vcpe_notification_agent.tar.gz
deleted file mode 100644
index dcc6765..0000000
--- a/xos/configurations/cord/ceilometer_vcpe_notification_agent.tar.gz
+++ /dev/null
Binary files differ
diff --git a/xos/configurations/cord/cord.yaml b/xos/configurations/cord/cord.yaml
index e9a6291..9929a84 100644
--- a/xos/configurations/cord/cord.yaml
+++ b/xos/configurations/cord/cord.yaml
@@ -7,7 +7,6 @@
topology_template:
node_templates:
-
# CORD Services
service_volt:
type: tosca.nodes.Service
@@ -15,6 +14,12 @@
- vcpe_tenant:
node: service_vcpe
relationship: tosca.relationships.TenantOfService
+ - lan_network:
+ node: lan_network
+ relationship: tosca.relationships.UsesNetwork
+ - wan_network:
+ node: wan_network
+ relationship: tosca.relationships.UsesNetwork
properties:
view_url: /admin/cord/voltservice/$id$/
kind: vOLT
@@ -29,6 +34,7 @@
view_url: /admin/cord/vcpeservice/$id$/
backend_network_label: hpc_client
public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+ private_key_fn: /opt/xos/observers/vcpe/vcpe_private_key
artifacts:
pubkey: /opt/xos/observers/vcpe/vcpe_public_key
@@ -58,7 +64,7 @@
# - (Synchronizer should copy the files to ONOS container immediately)
# - Log into service_ONOS_vBNG VM and restart ONOS Docker container
# (Should roll this step into a Synchronizer)
-#
+#f
vBNG_ONOS_app:
type: tosca.nodes.ONOSvBNGApp
requirements:
@@ -116,6 +122,7 @@
artifacts:
pubkey: /opt/xos/observers/onos/onos_key.pub
+
vOLT_ONOS_app:
type: tosca.nodes.ONOSvOLTApp
requirements:
@@ -126,7 +133,22 @@
node: service_volt
relationship: tosca.relationships.UsedByService
properties:
- dependencies: org.onosproject.olt
+ dependencies: org.onosproject.openflow, org.onosproject.olt
+ config_network-cfg.json: >
+ {
+ "devices" : {
+ "of:0000000000000001" : {
+ "accessDevice" : {
+ "uplink" : "2",
+ "vlan" : "222",
+ "defaultVlan" : "1"
+ },
+ "basic" : {
+ "driver" : "default"
+ }
+ }
+ }
+ }
# Network templates
Private:
@@ -225,6 +247,11 @@
- site:
node: mysite
relationship: tosca.relationships.MemberOfSite
+ - vcpe_docker_image:
+ node: andybavier/docker-vcpe
+ relationship: tosca.relationships.UsesImage
+ properties:
+ default_isolation: container
mysite_onos_vbng:
description: ONOS Controller Slice for vBNG
@@ -387,6 +414,15 @@
node: mysite_clients
relationship: tosca.relationships.MemberOfSlice
+ # docker image for vcpe containers
+ andybavier/docker-vcpe:
+ # TODO: need to attach this to mydeployment
+ type: tosca.nodes.Image
+ properties:
+ kind: container
+ container_format: na
+ disk_format: na
+
# A subscriber
My House:
type: tosca.nodes.CORDSubscriber
@@ -441,7 +477,8 @@
type: tosca.nodes.VOLTTenant
properties:
service_specific_id: 123
- vlan_id: 432
+ s_tag: 222
+ c_tag: 432
requirements:
- provider_service:
node: service_volt
diff --git a/xos/configurations/cord/dataplane/dataplane-bm.yaml b/xos/configurations/cord/dataplane/dataplane-bm.yaml
new file mode 100644
index 0000000..e1e78ee
--- /dev/null
+++ b/xos/configurations/cord/dataplane/dataplane-bm.yaml
@@ -0,0 +1,36 @@
+---
+- hosts: switch_volt
+ sudo: yes
+ tasks:
+ - name: Create tunnel port on br-lan
+ openvswitch_port:
+ bridge=br-lan
+ port={{ item }}
+ state=present
+ with_items: "grenames.split(' ')"
+
+ - name: Set up GRE tunnel to vCPE
+ shell: ovs-vsctl set Interface {{ item.0 }} type=gre options:remote_ip={{ item.1 }}
+ with_together:
+ - "grenames.split(' ')"
+ - "bm_ips.split(' ')"
+
+- hosts: baremetal
+
+ user: root
+ sudo: no
+ tasks:
+ - name: Create br-lan
+ openvswitch_bridge:
+ bridge=br-lan
+ state=present
+
+ - name: Create tunnel port
+ openvswitch_port:
+ bridge=br-lan
+ port={{ grename }}
+ state=present
+
+ - name: Configure GRE tunnel to vOLT switch
+ shell: ovs-vsctl set Interface {{ grename }} type=gre options:remote_ip={{ volt_addr }}
+
diff --git a/xos/configurations/cord/dataplane/dataplane.yaml b/xos/configurations/cord/dataplane/dataplane.yaml
index 4799515..f43e4d7 100644
--- a/xos/configurations/cord/dataplane/dataplane.yaml
+++ b/xos/configurations/cord/dataplane/dataplane.yaml
@@ -47,6 +47,9 @@
port={{ public_net.stdout }}
state=present
+ - name: Remove IP address on public_network
+ command: /sbin/ifconfig {{ public_net.stdout }} 0.0.0.0
+
- name: Change datapath ID of bridge to match config file
command: /usr/bin/ovs-vsctl set bridge br-vbng other-config:datapath-id={{ ovs_dpid }}
@@ -58,6 +61,7 @@
vars:
controller_ip: "{{ hostvars['onos_volt']['ansible_ssh_host'] }}"
controller_port: 6653
+ vcpe_lan_ip: "{{ hostvars['vcpe']['lan_ip'] }}"
tags:
- volt
tasks:
@@ -73,6 +77,7 @@
with_items:
- git
- python-netifaces
+ - openvswitch-switch
- name: Checkout the Mininet repo
git: repo=https://github.com/mininet/mininet.git
@@ -87,14 +92,156 @@
script: scripts/if_from_ip.py {{ subscriber_ip }}
register: subscriber_net
- - name: Find lan_network interface
- script: scripts/if_from_ip.py {{ lan_ip }}
- register: lan_net
+ - name: Create bridge br-sub
+ openvswitch_bridge:
+ bridge=br-sub
+ state=present
+
+ - name: Add subscriber_net to br-sub
+ openvswitch_port:
+ bridge=br-sub
+ port={{ subscriber_net.stdout }}
+ state=present
+
+ # The CPqD switch is expecting that packets coming from the client have
+ # VLAN tag 1. However Neutron's OvS configuration eats VLAN-tagged packets.
+ # So tag them with VLAN 1 here before sending to CPqD.
+ #
+ # Note that the VLAN tag is 0 in the real-world setup, but the CPqD switch
+ # seems to have a problem with these packets.
+
+ # Using OvS to tag packets with VLAN ID 1 is not quite working for some reason.
+ # The packets from the client get tagged OK, but only the first packet from the
+ # VCPE gets its tag stripped off. Very weird. That's why we are using veth
+ # devices instead.
+ #- name: Add tag 1 to br-sub port
+ # shell: ovs-vsctl set port {{ subscriber_net.stdout }} tag=1
+
+ - name: Create a pair of veth devices
+ shell: ifconfig veth0 >> /dev/null || ip link add veth0 type veth peer name veth1
+
+ - name: Create veth0.1
+ shell: ifconfig veth0.1 >> /dev/null || ip link add link veth0 name veth0.1 type vlan id 1
+
+ - name: Bring the interfaces up
+ shell: ip link set {{ item }} up
+ with_items:
+ - veth0
+ - veth1
+ - veth0.1
+
+ - name: Add veth0.1 to br-sub
+ openvswitch_port:
+ bridge=br-sub
+ port=veth0.1
+ state=present
+
+ - name: Create bridge br-lan
+ openvswitch_bridge:
+ bridge=br-lan
+ state=present
+
+ - name: Create tunnel port on br-lan
+ openvswitch_port:
+ bridge=br-lan
+ port=gre0
+ state=present
+
+ - name: Set up GRE tunnel to vCPE
+ shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ vcpe_lan_ip }}
+
+ - name: Check if br-lan has an IPv6 address
+ shell: ip addr show br-lan|grep inet6|awk '{print $2}'
+ register: ipv6
+
+ - name: Remove br-lan IPv6 address if present
+ shell: ifconfig br-lan inet6 del {{ ipv6.stdout }}
+ when: ipv6.stdout != ""
- name: Run the datapath
- command: /usr/local/bin/ofdatapath -i {{ subscriber_net.stdout_lines[0] }},{{ lan_net.stdout_lines[0] }} punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
+ command: /usr/local/bin/ofdatapath -i veth1,br-lan punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
creates=/usr/local/var/run/ofdatapath.pid
- name: Run the control program
command: /usr/local/bin/ofprotocol unix:/tmp/s1 tcp:{{ controller_ip }}:{{ controller_port }} --fail=closed --listen=punix:/tmp/s1.listen -D -P
creates=/usr/local/var/run/ofprotocol.pid
+
+- hosts: client
+ sudo: yes
+ tags:
+ - client
+ tasks:
+
+ - name: Fix /etc/hosts
+ lineinfile:
+ dest=/etc/hosts
+ regexp="127.0.0.1 localhost"
+ line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+ - name: Install packages
+ apt: name={{ item }}
+ state=latest
+ update_cache=yes
+ with_items:
+ - openvswitch-switch
+ - python-netifaces
+
+ - name: Create br-sub
+ openvswitch_bridge:
+ bridge=br-sub
+ state=present
+
+ - name: Find subscriber_network interface
+ script: scripts/if_from_ip.py {{ subscriber_ip }}
+ register: client_net
+
+ - name: Hook up subscriber-network to OvS
+ openvswitch_port:
+ bridge=br-sub
+ port={{ client_net.stdout }}
+ state=present
+
+ - name: Run some commands on br-sub
+ shell: "{{ item }}"
+ with_items:
+ - ifconfig br-sub 192.168.0.2 mtu 1400 up
+ - ethtool -K br-sub tso off
+ - ethtool -K br-sub tx off
+
+ # Run dhclient on br-sub internal interface to issue DHCP request to vCPE
+
+#
+# Need to set up a tunnel between vCPE and vOLT to keep VLAN-tagged
+# packets from being swallowed by the network.
+#
+- hosts: vcpe
+ sudo: yes
+ vars:
+ volt_lan_ip: "{{ hostvars['switch_volt']['lan_ip'] }}"
+ tags:
+ - vcpe
+ tasks:
+
+ - name: Install packages
+ apt: name={{ item }}
+ state=latest
+ update_cache=yes
+ with_items:
+ - openvswitch-switch
+
+ - name: Create br-lan
+ openvswitch_bridge:
+ bridge=br-lan
+ state=present
+
+ - name: Create tunnel port
+ openvswitch_port:
+ bridge=br-lan
+ port=gre0
+ state=present
+
+ - name: Configure GRE tunnel to vOLT switch
+ shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ volt_lan_ip }}
+
+ - name: Restart vCPEs
+ script: scripts/restart-vcpes.sh
diff --git a/xos/configurations/cord/dataplane/gen-etc-hosts.sh b/xos/configurations/cord/dataplane/gen-etc-hosts.sh
new file mode 100755
index 0000000..ce98731
--- /dev/null
+++ b/xos/configurations/cord/dataplane/gen-etc-hosts.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# set -x
+
+source ../../common/admin-openrc.sh
+
+get_ip () {
+ LABEL=$1
+ NETWORK=$2
+ nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+}
+
+cat <<EOF
+$( get_ip mysite_onos_vbng flat-lan-1-net) onos_vbng
+$( get_ip mysite_vbng flat-lan-1-net) switch_vbng
+$( get_ip mysite_onos_volt flat-lan-1-net) onos_volt
+$( get_ip mysite_volt flat-lan-1-net) switch_volt
+$( get_ip mysite_clients flat-lan-1-net) client
+$( get_ip mysite_vcpe flat-lan-1-net) vcpe
+EOF
diff --git a/xos/configurations/cord/dataplane/generate.sh b/xos/configurations/cord/dataplane/gen-inventory.sh
similarity index 65%
rename from xos/configurations/cord/dataplane/generate.sh
rename to xos/configurations/cord/dataplane/gen-inventory.sh
index 360ed67..590376d 100755
--- a/xos/configurations/cord/dataplane/generate.sh
+++ b/xos/configurations/cord/dataplane/gen-inventory.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# set -x
-source ~/admin-openrc.sh
+source ../../common/admin-openrc.sh
get_ip () {
LABEL=$1
@@ -14,5 +14,8 @@
switch_vbng ansible_ssh_host=$( get_ip mysite_vbng flat-lan-1-net) wan_ip=$( get_ip mysite_vbng wan_network) public_ip=$( get_ip mysite_vbng tun0-net )
onos_volt ansible_ssh_host=$( get_ip mysite_onos_volt flat-lan-1-net)
-switch_volt ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) subscriber_ip=$( get_ip mysite_volt subscriber_network) lan_ip=$( get_ip mysite_volt lan_network) vcpe_lan_ip=$( get_ip mysite_vcpe lan_network)
+switch_volt ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) subscriber_ip=$( get_ip mysite_volt subscriber_network) lan_ip=$( get_ip mysite_volt lan_network)
+
+client ansible_ssh_host=$( get_ip mysite_clients flat-lan-1-net) subscriber_ip=$( get_ip mysite_clients subscriber_network)
+vcpe ansible_ssh_host=$( get_ip mysite_vcpe flat-lan-1-net) lan_ip=$( get_ip mysite_vcpe lan_network)
EOF
diff --git a/xos/configurations/cord/dataplane/generate-bm.sh b/xos/configurations/cord/dataplane/generate-bm.sh
new file mode 100755
index 0000000..25f6fa5
--- /dev/null
+++ b/xos/configurations/cord/dataplane/generate-bm.sh
@@ -0,0 +1,37 @@
+source ../../common/admin-openrc.sh
+
+get_ip () {
+ LABEL=$1
+ NETWORK=$2
+ nova list --all-tenants|grep $LABEL|sed "s/^.*$NETWORK=//g"|sed 's/; .*$//g'|awk '{print $1}'
+ }
+
+GRENAMES=()
+BM_IPS=()
+
+NODES=`sudo bash -c "source /root/setup/admin-openrc.sh ; nova hypervisor-list" |grep cloudlab|awk '{print $4}'`
+I=1
+for NODE in $NODES; do
+ BM_SSH_IP=`getent hosts $NODE | awk '{ print $1 }'`
+ IFS=. read BM_NAME BM_REMAINDER <<< $NODE
+ BM_IP=`sudo grep -i $BM_NAME /root/setup/data-hosts.flat-lan-1 | awk '{print $1}'`
+
+ GRE_NAMES+=("gre-bm-$I")
+ BM_IPS+=("$BM_IP")
+
+ #echo switch_volt$I ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grename=gre-bm-$I bm_addr=$BM_IP
+ echo bm$I ansible_ssh_host=$BM_SSH_IP grename=gre-bm-$I volt_addr=$( get_ip mysite_volt flat-lan-1-net) ansible_ssh_private_key_file=/root/.ssh/id_rsa
+ I=$(( I+1 ))
+done
+
+GRE_NAMES=${GRE_NAMES[@]}
+BM_IPS=${BM_IPS[@]}
+
+echo switch_volt ansible_ssh_host=$( get_ip mysite_volt flat-lan-1-net) grenames=\"$GRE_NAMES\" bm_ips=\"$BM_IPS\"
+
+echo "[baremetal]"
+I=1
+for NODE in $NODES; do
+ echo bm$I
+ I=$((I+1))
+done
diff --git a/xos/configurations/cord/dataplane/scripts/if_from_ip.py b/xos/configurations/cord/dataplane/scripts/if_from_ip.py
index be1da48..28524fe 100644
--- a/xos/configurations/cord/dataplane/scripts/if_from_ip.py
+++ b/xos/configurations/cord/dataplane/scripts/if_from_ip.py
@@ -8,7 +8,7 @@
for iface in netifaces.interfaces():
addrs = netifaces.ifaddresses(iface)
if 2 in addrs and addrs[2][0]['addr'] == addr:
- print iface
-
+ sys.stdout.write(iface)
+
if __name__ == "__main__":
main(sys.argv[1:])
diff --git a/xos/configurations/cord/dataplane/scripts/restart-vcpes.sh b/xos/configurations/cord/dataplane/scripts/restart-vcpes.sh
new file mode 100644
index 0000000..d1c9fce
--- /dev/null
+++ b/xos/configurations/cord/dataplane/scripts/restart-vcpes.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+for VCPE in $( docker ps|grep vcpe|awk '{print $NF}' )
+do
+ service $VCPE stop
+ sleep 1
+ service $VCPE start
+done
diff --git a/xos/configurations/cord/install_ceilometer_patch.sh b/xos/configurations/cord/install_ceilometer_patch.sh
new file mode 100755
index 0000000..279e07c
--- /dev/null
+++ b/xos/configurations/cord/install_ceilometer_patch.sh
@@ -0,0 +1,20 @@
+if [ -d /usr/lib/python2.7/dist-packages/ceilometer/network/ext_services ]; then
+ echo "Seems VCPE notification listeners are already enabled in ceilometer... so exiting gracefully..."
+ exit 0
+fi
+echo "Verifying if all the required files are present"
+if [ ! -f openstack_ceilometer_patch.tar.gz ];
+then
+ echo "File openstack_ceilometer_patch.tar.gz not found"
+ exit 1
+fi
+echo "Copying the ceilometer patch files to /usr/lib/python2.7/dist-packages/ceilometer"
+tar -xzf openstack_ceilometer_patch.tar.gz
+sudo mv ceilometer/network/ext_services /usr/lib/python2.7/dist-packages/ceilometer/network/
+sudo mv ceilometer/network/statistics /usr/lib/python2.7/dist-packages/ceilometer/network/
+sudo mv ceilometer-2015.1.1.egg-info/entry_points.txt /usr/lib/python2.7/dist-packages/ceilometer-*egg-info/
+sudo mv pipeline.yaml /etc/ceilometer/
+echo "Restarting ceilometer-agent-notification"
+sudo service ceilometer-agent-notification restart
+echo "Restarting ceilometer-agent-central"
+sudo service ceilometer-agent-central restart
diff --git a/xos/configurations/cord/install_ceilometer_vcpe_notification_listener.sh b/xos/configurations/cord/install_ceilometer_vcpe_notification_listener.sh
deleted file mode 100755
index 50a4132..0000000
--- a/xos/configurations/cord/install_ceilometer_vcpe_notification_listener.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-if [ -d /usr/lib/python2.7/dist-packages/ceilometer/network/ext_services ]; then
- echo "Seems VCPE notification listeners are already enabled in ceilometer... so exiting gracefully..."
- exit 0
-fi
-echo "Verifying if all the required files are present"
-if [ ! -f ceilometer_vcpe_notification_agent.tar.gz ];
-then
- echo "File ceilometer_vcpe_notification_agent.tar.gz not found"
- exit 1
-fi
-echo "Copying the ceilometer vcpe notification agent files /usr/lib/python2.7/dist-packages/ceilometer"
-tar -xzf ceilometer_vcpe_notification_agent.tar.gz
-sudo mv ceilometer/network/ext_services /usr/lib/python2.7/dist-packages/ceilometer/network/
-sudo mv ceilometer-2015.1.1.egg-info/entry_points.txt /usr/lib/python2.7/dist-packages/ceilometer-2015.1.1.egg-info/
-echo "Restarting ceilometer-agent-notification"
-sudo service ceilometer-agent-notification restart
diff --git a/xos/configurations/cord/openstack_ceilometer_patch.tar.gz b/xos/configurations/cord/openstack_ceilometer_patch.tar.gz
new file mode 100644
index 0000000..dc1852a
--- /dev/null
+++ b/xos/configurations/cord/openstack_ceilometer_patch.tar.gz
Binary files differ
diff --git a/xos/configurations/cord/xos_metering_dashboard.tar.gz b/xos/configurations/cord/xos_metering_dashboard.tar.gz
index 648f91a..3b4d127 100644
--- a/xos/configurations/cord/xos_metering_dashboard.tar.gz
+++ b/xos/configurations/cord/xos_metering_dashboard.tar.gz
Binary files differ
diff --git a/xos/configurations/frontend/Dockerfile.frontend b/xos/configurations/frontend/Dockerfile.frontend
index bdf9186..8372fbf 100644
--- a/xos/configurations/frontend/Dockerfile.frontend
+++ b/xos/configurations/frontend/Dockerfile.frontend
@@ -7,17 +7,6 @@
RUN echo "autostart=false" >> /etc/supervisor/conf.d/observer.conf
-# Install Node.js
-RUN sudo apt-get install -y \
- nodejs \
- npm
-
-# Link nodejs to node
-RUN sudo ln -s "$(which nodejs)" /usr/bin/node
-
-RUN echo "Node Version: "
-RUN node -v
-
# TODO
# - Enter /opt/xos/core/xoslib and run npm install
diff --git a/xos/cord/admin.py b/xos/cord/admin.py
index 6137212..686d8ae 100644
--- a/xos/cord/admin.py
+++ b/xos/cord/admin.py
@@ -51,7 +51,8 @@
return VOLTService.get_service_objects_by_user(request.user)
class VOLTTenantForm(forms.ModelForm):
- vlan_id = forms.CharField()
+ s_tag = forms.CharField()
+ c_tag = forms.CharField()
creator = forms.ModelChoiceField(queryset=User.objects.all())
def __init__(self,*args,**kwargs):
@@ -60,7 +61,8 @@
self.fields['provider_service'].queryset = VOLTService.get_service_objects().all()
if self.instance:
# fields for the attributes
- self.fields['vlan_id'].initial = self.instance.vlan_id
+ self.fields['c_tag'].initial = self.instance.c_tag
+ self.fields['s_tag'].initial = self.instance.s_tag
self.fields['creator'].initial = self.instance.creator
if (not self.instance) or (not self.instance.pk):
# default fields for an 'add' form
@@ -70,7 +72,8 @@
self.fields["provider_service"].initial = VOLTService.get_service_objects().all()[0]
def save(self, commit=True):
- self.instance.vlan_id = self.cleaned_data.get("vlan_id")
+ self.instance.s_tag = self.cleaned_data.get("s_tag")
+ self.instance.c_tag = self.cleaned_data.get("c_tag")
self.instance.creator = self.cleaned_data.get("creator")
return super(VOLTTenantForm, self).save(commit=commit)
@@ -78,10 +81,10 @@
model = VOLTTenant
class VOLTTenantAdmin(ReadOnlyAwareAdmin):
- list_display = ('backend_status_icon', 'id', 'service_specific_id', 'vlan_id', 'subscriber_root' )
+ list_display = ('backend_status_icon', 'id', 'service_specific_id', 's_tag', 'c_tag', 'subscriber_root' )
list_display_links = ('backend_status_icon', 'id')
fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'subscriber_root', 'service_specific_id', # 'service_specific_attribute',
- 'vlan_id', 'creator'],
+ 's_tag', 'c_tag', 'creator'],
'classes':['suit-tab suit-tab-general']})]
readonly_fields = ('backend_status_text', 'service_specific_attribute')
form = VOLTTenantForm
diff --git a/xos/cord/models.py b/xos/cord/models.py
index 67ffdc7..daac40c 100644
--- a/xos/cord/models.py
+++ b/xos/cord/models.py
@@ -1,5 +1,5 @@
from django.db import models
-from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, Port
from core.models.plcorebase import StrippedCharField
import os
from django.db import models, transaction
@@ -254,7 +254,7 @@
KIND = VOLT_KIND
- default_attributes = {"vlan_id": None, }
+ default_attributes = {"vlan_id": None, "s_tag": None, "c_tag": None}
def __init__(self, *args, **kwargs):
volt_services = VOLTService.get_service_objects().all()
if volt_services:
@@ -263,12 +263,30 @@
self.cached_vcpe = None
@property
+ def s_tag(self):
+ return self.get_attribute("s_tag", self.default_attributes["s_tag"])
+
+ @s_tag.setter
+ def s_tag(self, value):
+ self.set_attribute("s_tag", value)
+
+ @property
+ def c_tag(self):
+ return self.get_attribute("c_tag", self.default_attributes["c_tag"])
+
+ @c_tag.setter
+ def c_tag(self, value):
+ self.set_attribute("c_tag", value)
+
+ # for now, vlan_id is a synonym for c_tag
+
+ @property
def vlan_id(self):
- return self.get_attribute("vlan_id", self.default_attributes["vlan_id"])
+ return self.c_tag
@vlan_id.setter
def vlan_id(self, value):
- self.set_attribute("vlan_id", value)
+ self.c_tag = value
@property
def vcpe(self):
@@ -470,6 +488,7 @@
"hpc_client_ip", "hpc_client_mac")
default_attributes = {"instance_id": None,
+ "container_id": None,
"users": [],
"bbs_account": None,
"last_ansible_hash": None}
@@ -534,11 +553,15 @@
@property
def addresses(self):
- if not self.instance:
+ if self.instance:
+ ports = self.instance.ports.all()
+ elif self.container:
+ ports = self.container.ports.all()
+ else:
return {}
addresses = {}
- for ns in self.instance.ports.all():
+ for ns in ports:
if "lan" in ns.network.name.lower():
addresses["lan"] = (ns.ip, ns.mac)
elif "wan" in ns.network.name.lower():
@@ -655,6 +678,37 @@
self.bbs_account = None
super(VCPETenant, self).save()
+ def find_or_make_port(self, instance, network, **kwargs):
+ port = Port.objects.filter(instance=instance, network=network)
+ if port:
+ port = port[0]
+ else:
+ port = Port(instance=instance, network=network, **kwargs)
+ port.save()
+ return port
+
+ def save_instance(self, instance):
+ with transaction.atomic():
+ instance.volumes = "/etc/dnsmasq.d"
+ super(VCPETenant, self).save_instance(instance)
+
+ if instance.isolation in ["container", "container_vm"]:
+ lan_networks = [x for x in instance.slice.networks.all() if "lan" in x.name]
+ if not lan_networks:
+ raise XOSProgrammingError("No lan_network")
+ port = self.find_or_make_port(instance, lan_networks[0], ip="192.168.0.1", port_id="unmanaged")
+ port.set_parameter("c_tag", self.volt.c_tag)
+ port.set_parameter("s_tag", self.volt.s_tag)
+ port.set_parameter("device", "eth1")
+ port.set_parameter("bridge", "br-lan")
+
+ wan_networks = [x for x in instance.slice.networks.all() if "wan" in x.name]
+ if not wan_networks:
+ raise XOSProgrammingError("No wan_network")
+ port = self.find_or_make_port(instance, wan_networks[0])
+ port.set_parameter("next_hop", value="10.0.1.253") # FIX ME
+ port.set_parameter("device", "eth0")
+
def save(self, *args, **kwargs):
if not self.creator:
if not getattr(self, "caller", None):
diff --git a/xos/cord/rest_examples/add_volt_tenant.sh b/xos/cord/rest_examples/add_volt_tenant.sh
index 5dd3dd4..4bbe2bb 100755
--- a/xos/cord/rest_examples/add_volt_tenant.sh
+++ b/xos/cord/rest_examples/add_volt_tenant.sh
@@ -3,8 +3,9 @@
source ./config.sh
SERVICE_SPECIFIC_ID=1238
-VLAN_ID=1238
+C_TAG=1238
+S_TAG=3333
-echo curl "-H \"Accept: application/json; indent=4\" -H \"Content-Type: application/json\" -u $AUTH -X POST -d \"{\\\"service_specific_id\\\": \\\"$SERVICE_SPECIFIC_ID\\\", \\\"vlan_id\\\": \\\"$VLAN_ID\\\"}\" $HOST/xoslib/volttenant/"
+echo curl "-H \"Accept: application/json; indent=4\" -H \"Content-Type: application/json\" -u $AUTH -X POST -d \"{\\\"service_specific_id\\\": \\\"$SERVICE_SPECIFIC_ID\\\", \\\"c_tag\\\": \\\"$C_TAG\\\", \\\"s_tag\\\": \\\"$S_TAG\\\"}\" $HOST/xoslib/volttenant/"
-curl -H "Accept: application/json; indent=4" -H "Content-Type: application/json" -u $AUTH -X POST -d "{\"service_specific_id\": \"$SERVICE_SPECIFIC_ID\", \"vlan_id\": \"$VLAN_ID\"}" $HOST/xoslib/volttenant/
+curl -H "Accept: application/json; indent=4" -H "Content-Type: application/json" -u $AUTH -X POST -d "{\"service_specific_id\": \"$SERVICE_SPECIFIC_ID\", \"c_tag\": \"$C_TAG\", \"s_tag\": \"$S_TAG\"}" $HOST/xoslib/volttenant/
diff --git a/xos/cord/rest_examples/config.sh b/xos/cord/rest_examples/config.sh
index 7b8c8e1..06162ee 100644
--- a/xos/cord/rest_examples/config.sh
+++ b/xos/cord/rest_examples/config.sh
@@ -1,5 +1,6 @@
#HOST=198.0.0.44:8000
-HOST=10.254.1.22:8000
+#HOST=10.254.1.22:8000
+HOST=clnode050.clemson.cloudlab.us:9999
#AUTH=scott@onlab.us:letmein
AUTH=padmin@vicci.org:letmein
diff --git a/xos/core/admin.py b/xos/core/admin.py
index a0cabd1..be9dcc0 100644
--- a/xos/core/admin.py
+++ b/xos/core/admin.py
@@ -23,7 +23,6 @@
from cgi import escape as html_escape
from django.contrib import messages
-import django_evolution
import threading
# thread locals necessary to work around a django-suit issue
@@ -106,6 +105,10 @@
# this 'if' might be redundant if save_by_user is implemented right
raise PermissionDenied
+ # reset exponential backoff
+ if hasattr(obj, "backend_register"):
+ obj.backend_register = "{}"
+
obj.caller = request.user
# update openstack connection to use this site/tenant
obj.save_by_user(request.user)
@@ -880,7 +883,7 @@
class ServiceAdmin(XOSBaseAdmin):
list_display = ("backend_status_icon","name","kind","versionNumber","enabled","published")
list_display_links = ('backend_status_icon', 'name', )
- fieldList = ["backend_status_text","name","kind","description","versionNumber","enabled","published","view_url","icon_url","public_key","service_specific_attribute","service_specific_id"]
+ fieldList = ["backend_status_text","name","kind","description","versionNumber","enabled","published","view_url","icon_url","public_key","private_key_fn","service_specific_attribute","service_specific_id"]
fieldsets = [(None, {'fields': fieldList, 'classes':['suit-tab suit-tab-general']})]
inlines = [ServiceAttrAsTabInline,SliceInline,ProviderTenantInline,SubscriberTenantInline,ServicePrivilegeInline]
readonly_fields = ('backend_status_text', )
@@ -1051,7 +1054,7 @@
class SliceAdmin(XOSBaseAdmin):
form = SliceForm
- fieldList = ['backend_status_text', 'site', 'name', 'serviceClass', 'enabled','description', 'service', 'slice_url', 'max_instances']
+ fieldList = ['backend_status_text', 'site', 'name', 'serviceClass', 'enabled','description', 'service', 'slice_url', 'max_instances', "default_isolation"]
fieldsets = [('Slice Details', {'fields': fieldList, 'classes':['suit-tab suit-tab-general']}),]
readonly_fields = ('backend_status_text', )
list_display = ('backend_status_icon', 'name', 'site','serviceClass', 'slice_url', 'max_instances')
@@ -1203,7 +1206,7 @@
class ImageAdmin(XOSBaseAdmin):
fieldsets = [('Image Details',
- {'fields': ['backend_status_text', 'name', 'disk_format', 'container_format'],
+ {'fields': ['backend_status_text', 'name', 'kind', 'disk_format', 'container_format'],
'classes': ['suit-tab suit-tab-general']})
]
readonly_fields = ('backend_status_text', )
@@ -1214,7 +1217,7 @@
user_readonly_fields = ['name', 'disk_format', 'container_format']
- list_display = ['backend_status_icon', 'name']
+ list_display = ['backend_status_icon', 'name', 'kind']
list_display_links = ('backend_status_icon', 'name', )
class NodeForm(forms.ModelForm):
@@ -1273,7 +1276,7 @@
fields = ['backend_status_icon', 'network', 'instance', 'ip', 'mac']
readonly_fields = ("backend_status_icon", "ip", "mac")
model = Port
- selflink_fieldname = "network"
+ #selflink_fieldname = "network"
extra = 0
verbose_name_plural = "Ports"
verbose_name = "Port"
@@ -1282,13 +1285,14 @@
class InstanceAdmin(XOSBaseAdmin):
form = InstanceForm
fieldsets = [
- ('Instance Details', {'fields': ['backend_status_text', 'slice', 'deployment', 'flavor', 'image', 'node', 'all_ips_string', 'instance_id', 'instance_name', 'ssh_command'], 'classes': ['suit-tab suit-tab-general'], })
+ ('Instance Details', {'fields': ['backend_status_text', 'slice', 'deployment', 'isolation', 'flavor', 'image', 'node', 'parent', 'all_ips_string', 'instance_id', 'instance_name', 'ssh_command', ], 'classes': ['suit-tab suit-tab-general'], }),
+ ('Container Settings', {'fields': ['volumes'], 'classes': ['suit-tab suit-tab-container'], }),
]
readonly_fields = ('backend_status_text', 'ssh_command', 'all_ips_string')
- list_display = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'slice', 'flavor', 'image', 'node', 'deployment']
+ list_display = ['backend_status_icon', 'all_ips_string', 'instance_id', 'instance_name', 'isolation', 'slice', 'flavor', 'image', 'node', 'deployment']
list_display_links = ('backend_status_icon', 'all_ips_string', 'instance_id', )
- suit_form_tabs =(('general', 'Instance Details'), ('ports', 'Ports'))
+ suit_form_tabs =(('general', 'Instance Details'), ('ports', 'Ports'), ('container', 'Container Settings'))
inlines = [TagInline, InstancePortInline]
@@ -1372,38 +1376,38 @@
# obj.os_manager = OpenStackManager(auth=auth, caller=request.user)
# obj.delete()
-class ContainerPortInline(XOSTabularInline):
- fields = ['backend_status_icon', 'network', 'container', 'ip', 'mac', 'segmentation_id']
- readonly_fields = ("backend_status_icon", "ip", "mac", "segmentation_id")
- model = Port
- selflink_fieldname = "network"
- extra = 0
- verbose_name_plural = "Ports"
- verbose_name = "Port"
- suit_classes = 'suit-tab suit-tab-ports'
+#class ContainerPortInline(XOSTabularInline):
+# fields = ['backend_status_icon', 'network', 'container', 'ip', 'mac', 'segmentation_id']
+# readonly_fields = ("backend_status_icon", "ip", "mac", "segmentation_id")
+# model = Port
+# selflink_fieldname = "network"
+# extra = 0
+# verbose_name_plural = "Ports"
+# verbose_name = "Port"
+# suit_classes = 'suit-tab suit-tab-ports'
-class ContainerAdmin(XOSBaseAdmin):
- fieldsets = [
- ('Container Details', {'fields': ['backend_status_text', 'slice', 'node', 'docker_image', 'no_sync'], 'classes': ['suit-tab suit-tab-general'], })
- ]
- readonly_fields = ('backend_status_text', )
- list_display = ['backend_status_icon', 'id']
- list_display_links = ('backend_status_icon', 'id', )
-
- suit_form_tabs =(('general', 'Container Details'), ('ports', 'Ports'))
-
- inlines = [TagInline, ContainerPortInline]
-
- def formfield_for_foreignkey(self, db_field, request, **kwargs):
- if db_field.name == 'slice':
- kwargs['queryset'] = Slice.select_by_user(request.user)
-
- return super(ContainerAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
-
- def queryset(self, request):
- # admins can see all instances. Users can only see instances of
- # the slices they belong to.
- return Container.select_by_user(request.user)
+#class ContainerAdmin(XOSBaseAdmin):
+# fieldsets = [
+# ('Container Details', {'fields': ['backend_status_text', 'slice', 'node', 'docker_image', 'volumes', 'no_sync'], 'classes': ['suit-tab suit-tab-general'], })
+# ]
+# readonly_fields = ('backend_status_text', )
+# list_display = ['backend_status_icon', 'id']
+# list_display_links = ('backend_status_icon', 'id', )
+#
+# suit_form_tabs =(('general', 'Container Details'), ('ports', 'Ports'))
+#
+# inlines = [TagInline, ContainerPortInline]
+#
+# def formfield_for_foreignkey(self, db_field, request, **kwargs):
+# if db_field.name == 'slice':
+# kwargs['queryset'] = Slice.select_by_user(request.user)
+#
+# return super(ContainerAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
+#
+# def queryset(self, request):
+# # admins can see all instances. Users can only see instances of
+# # the slices they belong to.
+# return Container.select_by_user(request.user)
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
@@ -1760,10 +1764,10 @@
readonly_fields = ('backend_status_icon', )
class NetworkPortInline(XOSTabularInline):
- fields = ['backend_status_icon', 'network', 'instance', 'container', 'ip', 'mac']
+ fields = ['backend_status_icon', 'network', 'instance', 'ip', 'mac']
readonly_fields = ("backend_status_icon", "ip", "mac")
model = Port
- selflink_fieldname = "instance"
+ #selflink_fieldname = "instance"
extra = 0
verbose_name_plural = "Ports"
verbose_name = "Port"
@@ -1842,10 +1846,25 @@
list_display_links = ('backend_status_icon', 'name', )
user_readonly_fields = ["name", "guaranteed_bandwidth", "visibility"]
user_readonly_inlines = []
+ inlines = [NetworkParameterInline,]
fieldsets = [
(None, {'fields': ['name', 'description', 'guaranteed_bandwidth', 'visibility', 'translation', 'shared_network_name', 'shared_network_id', 'topology_kind', 'controller_kind'],
'classes':['suit-tab suit-tab-general']}),]
- suit_form_tabs = (('general','Network Template Details'), )
+ suit_form_tabs = (('general','Network Template Details'), ('netparams', 'Parameters') )
+
+class PortAdmin(XOSBaseAdmin):
+ list_display = ("backend_status_icon", "name", "id", "ip")
+ list_display_links = ('backend_status_icon', 'id')
+ readonly_fields = ("subnet", )
+ inlines = [NetworkParameterInline]
+
+ fieldsets = [
+ (None, {'fields': ['backend_status_text', 'network', 'instance', 'ip', 'port_id', 'mac'],
+ 'classes':['suit-tab suit-tab-general']}),
+ ]
+
+ readonly_fields = ('backend_status_text', )
+ suit_form_tabs = (('general', 'Port Details'), ('netparams', 'Parameters'))
class FlavorAdmin(XOSBaseAdmin):
list_display = ("backend_status_icon", "name", "flavor", "order", "default")
@@ -2017,12 +2036,6 @@
# unregister the Group model from admin.
#admin.site.unregister(Group)
-#Do not show django evolution in the admin interface
-from django_evolution.models import Version, Evolution
-#admin.site.unregister(Version)
-#admin.site.unregister(Evolution)
-
-
# When debugging it is often easier to see all the classes, but for regular use
# only the top-levels should be displayed
showAll = False
@@ -2034,6 +2047,7 @@
admin.site.register(Service, ServiceAdmin)
#admin.site.register(Reservation, ReservationAdmin)
admin.site.register(Network, NetworkAdmin)
+admin.site.register(Port, PortAdmin)
admin.site.register(Router, RouterAdmin)
admin.site.register(NetworkTemplate, NetworkTemplateAdmin)
admin.site.register(Program, ProgramAdmin)
@@ -2057,5 +2071,5 @@
admin.site.register(TenantRoot, TenantRootAdmin)
admin.site.register(TenantRootRole, TenantRootRoleAdmin)
admin.site.register(TenantAttribute, TenantAttributeAdmin)
- admin.site.register(Container, ContainerAdmin)
+# admin.site.register(Container, ContainerAdmin)
diff --git a/xos/core/models/__init__.py b/xos/core/models/__init__.py
index bc97dab..c380e9c 100644
--- a/xos/core/models/__init__.py
+++ b/xos/core/models/__init__.py
@@ -24,7 +24,6 @@
from .node import Node
from .slicetag import SliceTag
from .instance import Instance
-from .container import Container
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, Port, NetworkTemplate, Router, NetworkSlice, ControllerNetwork
diff --git a/xos/core/models/container.py b/xos/core/models/container.py
deleted file mode 100644
index 151b576..0000000
--- a/xos/core/models/container.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import os
-from django.db import models
-from django.db.models import Q
-from django.core import exceptions
-from core.models import PlCoreBase,PlCoreBaseManager,PlCoreBaseDeletionManager
-from core.models.plcorebase import StrippedCharField
-from core.models import Image
-from core.models import Slice, SlicePrivilege
-from core.models import Node
-from core.models import Site
-from core.models import Deployment
-from core.models import Controller
-from core.models import User
-from core.models import Tag
-from core.models import Flavor
-from django.contrib.contenttypes import generic
-from xos.config import Config
-from monitor import driver as monitor
-from django.core.exceptions import PermissionDenied, ValidationError
-
-config = Config()
-
-
-# Create your models here.
-class Container(PlCoreBase):
- name = StrippedCharField(max_length=200, help_text="Container name")
- slice = models.ForeignKey(Slice, related_name='containers')
- node = models.ForeignKey(Node, related_name='containers')
- creator = models.ForeignKey(User, related_name='containers', blank=True, null=True)
- docker_image = StrippedCharField(null=True, blank=True, max_length=200, help_text="name of docker container to instantiate")
-
- def __unicode__(self):
- return u'container-%s' % str(self.id)
-
- def save(self, *args, **kwds):
- if not self.name:
- self.name = self.slice.name
- if not self.creator and hasattr(self, 'caller'):
- self.creator = self.caller
- if not self.creator:
- raise ValidationError('container has no creator')
-
- if (self.slice.creator != self.creator):
- # Check to make sure there's a slice_privilege for the user. If there
- # isn't, then keystone will throw an exception inside the observer.
- slice_privs = SlicePrivilege.objects.filter(slice=self.slice, user=self.creator)
- if not slice_privs:
- raise ValidationError('container creator has no privileges on slice')
-
-# XXX smbaker - disabled for now, was causing fault in tenant view create slice
-# if not self.controllerNetwork.test_acl(slice=self.slice):
-# raise exceptions.ValidationError("Deployment %s's ACL does not allow any of this slice %s's users" % (self.controllerNetwork.name, self.slice.name))
-
- super(Container, self).save(*args, **kwds)
-
- def can_update(self, user):
- return True
-
- @staticmethod
- def select_by_user(user):
- if user.is_admin:
- qs = Container.objects.all()
- else:
- slices = Slice.select_by_user(user)
- qs = Container.objects.filter(slice__in=slices)
- return qs
-
- def get_public_keys(self):
- slice_memberships = SlicePrivilege.objects.filter(slice=self.slice)
- pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
-
- if self.creator.public_key:
- pubkeys.add(self.creator.public_key)
-
- if self.slice.creator.public_key:
- pubkeys.add(self.slice.creator.public_key)
-
- if self.slice.service and self.slice.service.public_key:
- pubkeys.add(self.slice.service.public_key)
-
- return pubkeys
-
-
diff --git a/xos/core/models/image.py b/xos/core/models/image.py
index 21d4f23..1a3cbf7 100644
--- a/xos/core/models/image.py
+++ b/xos/core/models/image.py
@@ -7,7 +7,10 @@
# Create your models here.
class Image(PlCoreBase):
+ KIND_CHOICES = (('vm', 'Virtual Machine'), ('container', 'Container'), )
+
name = StrippedCharField(max_length=256, unique=True)
+ kind = models.CharField(null=False, blank=False, max_length=30, choices=KIND_CHOICES, default="vm")
disk_format = StrippedCharField(max_length=256)
container_format = StrippedCharField(max_length=256)
path = StrippedCharField(max_length=256, null=True, blank=True, help_text="Path to image on local disk")
diff --git a/xos/core/models/instance.py b/xos/core/models/instance.py
index 75826f6..cd7dd26 100644
--- a/xos/core/models/instance.py
+++ b/xos/core/models/instance.py
@@ -80,6 +80,8 @@
# Create your models here.
class Instance(PlCoreBase):
+ ISOLATION_CHOICES = (('vm', 'Virtual Machine'), ('container', 'Container'), ('container_vm', 'Container In VM'))
+
objects = InstanceManager()
deleted_objects = InstanceDeletionManager()
instance_id = StrippedCharField(null=True, blank=True, max_length=200, help_text="Nova instance id")
@@ -88,7 +90,6 @@
instance_name = StrippedCharField(blank=True, null=True, max_length=200, help_text="OpenStack generated name")
ip = models.GenericIPAddressField(help_text="Instance ip address", blank=True, null=True)
image = models.ForeignKey(Image, related_name='instances')
- #key = models.ForeignKey(Key, related_name='instances')
creator = models.ForeignKey(User, related_name='instances', blank=True, null=True)
slice = models.ForeignKey(Slice, related_name='instances')
deployment = models.ForeignKey(Deployment, verbose_name='deployment', related_name='instance_deployment')
@@ -97,6 +98,9 @@
flavor = models.ForeignKey(Flavor, help_text="Flavor of this instance", default=get_default_flavor)
tags = generic.GenericRelation(Tag)
userData = models.TextField(blank=True, null=True, help_text="user_data passed to instance during creation")
+ isolation = models.CharField(null=False, blank=False, max_length=30, choices=ISOLATION_CHOICES, default="vm")
+ volumes = models.TextField(null=True, blank=True, help_text="Comma-separated list of directories to expose to parent context")
+ parent = models.ForeignKey("Instance", null=True, blank=True, help_text="Parent Instance for containers nested inside of VMs")
def __unicode__(self):
if self.name and Slice.objects.filter(id=self.slice_id) and (self.name != self.slice.name):
@@ -120,6 +124,19 @@
if not self.creator:
raise ValidationError('instance has no creator')
+ if (self.isolation == "container") or (self.isolation == "container_vm"):
+ if (self.image.kind != "container"):
+ raise ValidationError("Container instance must use container image")
+ elif (self.isolation == "vm"):
+ if (self.image.kind != "vm"):
+ raise ValidationError("VM instance must use VM image")
+
+ if (self.isolation == "container_vm") and (not self.parent):
+ raise ValidationError("Container-vm instance must have a parent")
+
+ if (self.parent) and (self.isolation != "container_vm"):
+ raise ValidationError("Parent field can only be set on Container-vm instances")
+
if (self.slice.creator != self.creator):
# Check to make sure there's a slice_privilege for the user. If there
# isn't, then keystone will throw an exception inside the observer.
diff --git a/xos/core/models/network.py b/xos/core/models/network.py
index 48af5a6..6894f9f 100644
--- a/xos/core/models/network.py
+++ b/xos/core/models/network.py
@@ -2,7 +2,7 @@
import socket
import sys
from django.db import models
-from core.models import PlCoreBase, Site, Slice, Instance, Controller, Container
+from core.models import PlCoreBase, Site, Slice, Instance, Controller
from core.models import ControllerLinkManager,ControllerLinkDeletionManager
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
@@ -64,7 +64,38 @@
except Exception,e:
raise ValidationError(str(e))
-class NetworkTemplate(PlCoreBase):
+class ParameterMixin(object):
+ # helper classes for dealing with NetworkParameter
+
+ def get_parameters(self):
+ parameter_dict = {}
+
+ instance_type = ContentType.objects.get_for_model(self)
+ for param in NetworkParameter.objects.filter(content_type__pk=instance_type.id, object_id=self.id):
+ parameter_dict[param.parameter.name] = param.value
+
+ return parameter_dict
+
+ def set_parameter(self, name, value):
+ instance_type = ContentType.objects.get_for_model(self)
+ existing_params = NetworkParameter.objects.filter(parameter__name=name, content_type__pk=instance_type.id, object_id=self.id)
+ if existing_params:
+ p=existing_params[0]
+ p.value = value
+ p.save()
+ else:
+ pt = NetworkParameterType.objects.get(name=name)
+ p = NetworkParameter(parameter=pt, content_type=instance_type, object_id=self.id, value=value)
+ p.save()
+
+ def unset_parameter(self, name):
+ instance_type = ContentType.objects.get_for_model(self)
+ existing_params = NetworkParameter.objects.filter(parameter__name=name, content_type__pk=instance_type.id, object_id=self.id)
+ for p in existing_params:
+ p.delete()
+
+
+class NetworkTemplate(PlCoreBase, ParameterMixin):
VISIBILITY_CHOICES = (('public', 'public'), ('private', 'private'))
TRANSLATION_CHOICES = (('none', 'none'), ('NAT', 'NAT'))
TOPOLOGY_CHOICES = (('bigswitch', 'BigSwitch'), ('physical', 'Physical'), ('custom', 'Custom'))
@@ -97,7 +128,7 @@
def __unicode__(self): return u'%s' % (self.name)
-class Network(PlCoreBase):
+class Network(PlCoreBase, ParameterMixin):
name = models.CharField(max_length=32)
template = models.ForeignKey(NetworkTemplate)
subnet = models.CharField(max_length=32, blank=True)
@@ -147,6 +178,14 @@
qs = Network.objects.filter(owner__in=slices)
return qs
+ def get_parameters(self):
+ # returns parameters from the template, updated by self.
+ p={}
+ if self.template:
+ p = self.template.get_parameters()
+ p.update(ParameterMixin.get_parameters(self))
+ return p
+
class ControllerNetwork(PlCoreBase):
objects = ControllerLinkManager()
deleted_objects = ControllerLinkDeletionManager()
@@ -161,7 +200,7 @@
class Meta:
unique_together = ('network', 'controller')
-
+
@staticmethod
def select_by_user(user):
if user.is_admin:
@@ -208,15 +247,12 @@
qs = NetworkSlice.objects.filter(Q(slice__in=slice_ids) | Q(network__in=network_ids))
return qs
-class Port(PlCoreBase):
+class Port(PlCoreBase, ParameterMixin):
network = models.ForeignKey(Network,related_name='links')
instance = models.ForeignKey(Instance, null=True, blank=True, related_name='ports')
- container = models.ForeignKey(Container, null=True, blank=True, related_name='ports')
ip = models.GenericIPAddressField(help_text="Instance ip address", blank=True, null=True)
- port_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum port id")
+ port_id = models.CharField(null=True, blank=True, max_length=256, help_text="Neutron port id")
mac = models.CharField(null=True, blank=True, max_length=256, help_text="MAC address associated with this port")
- #unattached = models.BooleanField(default=False, help_text="create this port even if no Instance is attached")
- segmentation_id = models.CharField(null=True, blank=True, max_length=256, help_text="GRE segmentation id for port")
class Meta:
unique_together = ('network', 'instance')
@@ -258,6 +294,14 @@
qs = Port.objects.filter(Q(instance__in=instance_ids) | Q(network__in=network_ids))
return qs
+ def get_parameters(self):
+ # returns parameters from the network, updated by self.
+ p={}
+ if self.network:
+ p = self.network.get_parameters()
+ p.update(ParameterMixin.get_parameters(self))
+ return p
+
class Router(PlCoreBase):
name = models.CharField(max_length=32)
owner = models.ForeignKey(Slice, related_name="routers")
diff --git a/xos/core/models/node.py b/xos/core/models/node.py
index ec67975..5496d6b 100644
--- a/xos/core/models/node.py
+++ b/xos/core/models/node.py
@@ -16,6 +16,10 @@
def __unicode__(self): return u'%s' % (self.name)
+ def __init__(self, *args, **kwargs):
+ super(Node, self).__init__(*args, **kwargs)
+ self.no_sync=True
+
def save(self, *args, **kwds):
if self.site is None and self.site_deployment is not None:
self.site = self.site_deployment.site
diff --git a/xos/core/models/service.py b/xos/core/models/service.py
index 950ce02..ba54a33 100644
--- a/xos/core/models/service.py
+++ b/xos/core/models/service.py
@@ -52,6 +52,7 @@
view_url = StrippedCharField(blank=True, null=True, max_length=1024)
icon_url = StrippedCharField(blank=True, null=True, max_length=1024)
public_key = models.TextField(null=True, blank=True, max_length=1024, help_text="Public key string")
+ private_key_fn = StrippedCharField(blank=True, null=True, max_length=1024)
# Service_specific_attribute and service_specific_id are opaque to XOS
service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
@@ -336,6 +337,106 @@
return None
return sorted(st, key=attrgetter('id'))[0]
+class Scheduler(object):
+ # XOS Scheduler Abstract Base Class
+ # Used to implement schedulers that pick which node to put instances on
+
+ def __init__(self, slice):
+ self.slice = slice
+
+ def pick(self):
+ # this method should return a tuple (node, parent)
+ # node is the node to instantiate on
+ # parent is for container_vm instances only, and is the VM that will
+ # hold the container
+
+ raise Exception("Abstract Base")
+
+class LeastLoadedNodeScheduler(Scheduler):
+ # This scheduler always return the node with the fewest number of instances.
+
+ def __init__(self, slice):
+ super(LeastLoadedNodeScheduler, self).__init__(slice)
+
+ def pick(self):
+ from core.models import Node
+ nodes = list(Node.objects.all())
+ # TODO: logic to filter nodes by which nodes are up, and which
+ # nodes the slice can instantiate on.
+ nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+ return [nodes[0], None]
+
+class ContainerVmScheduler(Scheduler):
+ # This scheduler picks a VM in the slice with the fewest containers inside
+ # of it. If no VMs are suitable, then it creates a VM.
+
+ # this is a hack and should be replaced by something smarter...
+ LOOK_FOR_IMAGES=["ubuntu-vcpe4", # ONOS demo machine -- preferred vcpe image
+ "Ubuntu 14.04 LTS", # portal
+ "Ubuntu-14.04-LTS", # ONOS demo machine
+ "trusty-server-multi-nic", # CloudLab
+ ]
+
+ MAX_VM_PER_CONTAINER = 10
+
+ def __init__(self, slice):
+ super(ContainerVmScheduler, self).__init__(slice)
+
+ @property
+ def image(self):
+ from core.models import Image
+
+ look_for_images = self.LOOK_FOR_IMAGES
+ for image_name in look_for_images:
+ images = Image.objects.filter(name = image_name)
+ if images:
+ return images[0]
+
+ raise XOSProgrammingError("No ContainerVM image (looked for %s)" % str(look_for_images))
+
+ def make_new_instance(self):
+ from core.models import Instance, Flavor
+
+ flavors = Flavor.objects.filter(name="m1.small")
+ if not flavors:
+ raise XOSConfigurationError("No m1.small flavor")
+
+ (node,parent) = LeastLoadedNodeScheduler(self.slice).pick()
+
+ instance = Instance(slice = self.slice,
+ node = node,
+ image = self.image,
+ creator = self.slice.creator,
+ deployment = node.site_deployment.deployment,
+ flavor = flavors[0],
+ isolation = "vm",
+ parent = parent)
+ instance.save()
+ # We rely on a special naming convention to identify the VMs that will
+ # hole containers.
+ instance.name = "%s-outer-%s" % (instance.slice.name, instance.id)
+ instance.save()
+ return instance
+
+ def pick(self):
+ from core.models import Instance, Flavor
+
+ for vm in self.slice.instances.filter(isolation="vm"):
+ avail_vms = []
+ if (vm.name.startswith("%s-outer-" % self.slice.name)):
+ container_count = Instance.objects.filter(parent=vm).count()
+ if (container_count < self.MAX_VM_PER_CONTAINER):
+ avail_vms.append( (vm, container_count) )
+ # sort by least containers-per-vm
+ avail_vms = sorted(avail_vms, key = lambda x: x[1])
+ print "XXX", avail_vms
+ if avail_vms:
+ instance = avail_vms[0][0]
+ return (instance.node, instance)
+
+ instance = self.make_new_instance()
+ return (instance.node, instance)
+
class TenantWithContainer(Tenant):
""" A tenant that manages a container """
@@ -346,6 +447,8 @@
"trusty-server-multi-nic", # CloudLab
]
+ LOOK_FOR_CONTAINER_IMAGES=["andybavier/docker-vcpe"]
+
class Meta:
proxy = True
@@ -406,20 +509,55 @@
from core.models import Image
# Implement the logic here to pick the image that should be used when
# instantiating the VM that will hold the container.
- for image_name in self.LOOK_FOR_IMAGES:
+
+ slice = self.provider_service.slices.all()
+ if not slice:
+ raise XOSProgrammingError("provider service has no slice")
+ slice = slice[0]
+
+ if slice.default_isolation in ["container", "container_vm"]:
+ look_for_images = self.LOOK_FOR_CONTAINER_IMAGES
+ else:
+ look_for_images = self.LOOK_FOR_IMAGES
+
+ for image_name in look_for_images:
images = Image.objects.filter(name = image_name)
if images:
return images[0]
- raise XOSProgrammingError("No VPCE image (looked for %s)" % str(self.LOOK_FOR_IMAGES))
+ raise XOSProgrammingError("No VPCE image (looked for %s)" % str(look_for_images))
- def pick_node(self):
- from core.models import Node
- nodes = list(Node.objects.all())
- # TODO: logic to filter nodes by which nodes are up, and which
- # nodes the slice can instantiate on.
- nodes = sorted(nodes, key=lambda node: node.instances.all().count())
- return nodes[0]
+ @creator.setter
+ def creator(self, value):
+ if value:
+ value = value.id
+ if (value != self.get_attribute("creator_id", None)):
+ self.cached_creator=None
+ self.set_attribute("creator_id", value)
+
+ def save_instance(self, instance):
+ # Override this function to do custom pre-save or post-save processing,
+ # such as creating ports for containers.
+ instance.save()
+
+ def pick_least_loaded_instance_in_slice(self, slices):
+ for slice in slices:
+ if slice.instances.all().count() > 0:
+ for instance in slice.instances.all():
+ #Pick the first instance that has lesser than 5 tenants
+ if self.count_of_tenants_of_an_instance(instance) < 5:
+ return instance
+ return None
+
+ #TODO: Ideally the tenant count for an instance should be maintained using a
+ #many-to-one relationship attribute, however this model being proxy, it does
+ #not permit any new attributes to be defined. Find if any better solutions
+ def count_of_tenants_of_an_instance(self, instance):
+ tenant_count = 0
+ for tenant in self.get_tenant_objects().all():
+ if tenant.get_attribute("instance_id", None) == instance.id:
+ tenant_count += 1
+ return tenant_count
def manage_container(self):
from core.models import Instance, Flavor
@@ -433,32 +571,55 @@
if self.instance is None:
if not self.provider_service.slices.count():
- raise XOSConfigurationError("The VCPE service has no slices")
+ raise XOSConfigurationError("The service has no slices")
- flavors = Flavor.objects.filter(name="m1.small")
- if not flavors:
- raise XOSConfigurationError("No m1.small flavor")
+ new_instance_created = False
+ instance = None
+ if self.get_attribute("use_same_instance_for_multiple_tenants", default=False):
+ #Find if any existing instances can be used for this tenant
+ slices = self.provider_service.slices.all()
+ instance = self.pick_least_loaded_instance_in_slice(slices)
- node =self.pick_node()
- instance = Instance(slice = self.provider_service.slices.all()[0],
- node = node,
- image = self.image,
- creator = self.creator,
- deployment = node.site_deployment.deployment,
- flavor = flavors[0])
- instance.save()
+ if not instance:
+ flavors = Flavor.objects.filter(name="m1.small")
+ if not flavors:
+ raise XOSConfigurationError("No m1.small flavor")
+
+ slice = self.provider_service.slices.all()[0]
+
+ if slice.default_isolation == "container_vm":
+ (node, parent) = ContainerVmScheduler(slice).pick()
+ else:
+ (node, parent) = LeastLoadedNodeScheduler(slice).pick()
+
+ instance = Instance(slice = slice,
+ node = node,
+ image = self.image,
+ creator = self.creator,
+ deployment = node.site_deployment.deployment,
+ flavor = flavors[0],
+ isolation = slice.default_isolation,
+ parent = parent)
+ self.save_instance(instance)
+ new_instance_created = True
try:
self.instance = instance
super(TenantWithContainer, self).save()
except:
- instance.delete()
+ if new_instance_created:
+ instance.delete()
raise
def cleanup_container(self):
if self.instance:
- # print "XXX cleanup instance", self.instance
- self.instance.delete()
+ if self.get_attribute("use_same_instance_for_multiple_tenants", default=False):
+ #Delete the instance only if this is last tenant in that instance
+ tenant_count = self.count_of_tenants_of_an_instance(self.instance)
+ if tenant_count == 0:
+ self.instance.delete()
+ else:
+ self.instance.delete()
self.instance = None
class CoarseTenant(Tenant):
diff --git a/xos/core/models/slice.py b/xos/core/models/slice.py
index 18d3cb6..df36b26 100644
--- a/xos/core/models/slice.py
+++ b/xos/core/models/slice.py
@@ -19,6 +19,8 @@
# Create your models here.
class Slice(PlCoreBase):
+ ISOLATION_CHOICES = (('vm', 'Virtual Machine'), ('container', 'Container'), ('container_vm', 'Container In VM'))
+
name = StrippedCharField(unique=True, help_text="The Name of the Slice", max_length=80)
enabled = models.BooleanField(default=True, help_text="Status for this Slice")
omf_friendly = models.BooleanField(default=False)
@@ -37,6 +39,8 @@
default_image = models.ForeignKey(Image, related_name = "slices", null=True, blank=True);
mount_data_sets = StrippedCharField(default="GenBank",null=True, blank=True, max_length=256)
+ default_isolation = models.CharField(null=False, blank=False, max_length=30, choices=ISOLATION_CHOICES, default="vm")
+
def __unicode__(self): return u'%s' % (self.name)
@property
diff --git a/xos/core/static/xos.css b/xos/core/static/xos.css
index 33d05cb..14702d9 100644
--- a/xos/core/static/xos.css
+++ b/xos/core/static/xos.css
@@ -1,4 +1,4 @@
-g/*************************
+/*************************
colors:
tab - active/focus color
background-color: #105E9E !important;
@@ -12,6 +12,274 @@
*************************/
+html, body {
+ /*height: 100%;*/
+ min-height: 100%;
+ margin: 0;
+}
+
+body{
+ max-width: 100%;
+ overflow-x: hidden;
+}
+
+/*#wrap {
+ min-height: 100%;
+}*/
+
+/* ************************* SIDENAV TOGGLE ************************* */
+
+#wrapper {
+ padding-left: 0;
+ -webkit-transition: all 0.5s ease;
+ -moz-transition: all 0.5s ease;
+ -o-transition: all 0.5s ease;
+ transition: all 0.5s ease;
+}
+
+#wrapper.toggled {
+ padding-left: 250px;
+}
+
+#sidebar-wrapper {
+ z-index: 1000;
+ position: fixed;
+ left: 250px;
+ width: 0;
+ height: 100%;
+ margin-left: -250px;
+ overflow-y: auto;
+ -webkit-transition: all 0.5s ease;
+ -moz-transition: all 0.5s ease;
+ -o-transition: all 0.5s ease;
+ transition: all 0.5s ease;
+}
+
+#wrapper.toggled #sidebar-wrapper {
+ width: 250px;
+ padding: 10px;
+}
+
+#page-content-wrapper {
+ width: 100%;
+ position: absolute;
+ padding: 15px;
+}
+
+#wrapper.toggled #page-content-wrapper {
+ position: absolute;
+ margin-right: -250px;
+}
+
+@media(min-width:768px) {
+ #wrapper {
+ padding-left: 250px;
+ }
+
+ #wrapper.toggled {
+ padding-left: 0;
+ }
+
+ #sidebar-wrapper {
+ width: 250px;
+ padding: 10px;
+ }
+
+ #wrapper.toggled #sidebar-wrapper {
+ width: 0;
+ }
+
+ #page-content-wrapper {
+ padding: 20px;
+ position: relative;
+ }
+
+ #wrapper.toggled #page-content-wrapper {
+ position: relative;
+ margin-right: 0;
+ }
+}
+
+.navbar-toggle{
+ border: 1px solid #08C;
+}
+
+.navbar-toggle .icon-bar{
+ background: #08C;
+}
+/* ************************* END SIDENAV TOGGLE ************************* */
+
+/* ************************* HEADER STYLE ************************* */
+
+.header{
+ background-color: #ffffff !important;
+ border-bottom: 3px solid #C5CCD4;
+ margin-bottom: 14px;
+ height: 85px;
+}
+
+.header .logo{
+ max-height: 80px;
+}
+
+.nav-quick-search{
+ margin: 30px -10px 0px 0px !important;
+ padding:0 25px 0 0 !important;
+ float:right !important;
+}
+
+.nav-quick-search .search-query{
+ border-radius:5px;
+ border:none;
+ box-shadow:0px;
+ background-color:lightGrey;
+ padding-left: 27px;
+}
+
+
+.header #branding {
+ width: 100%;
+ height:60px;
+ border-right:none;
+}
+
+.header a {
+ color: #08C;
+ font-weight: bold;
+ /*border-bottom: 1px solid #C5CCD4;*/
+}
+a {
+ color: #08C;
+ font-weight: bold;
+ /*border-bottom: 1px solid #C5CCD4;*/
+}
+
+
+#branding2{
+height:20px;
+width:100%;
+color: #333;
+/*background-color: #000000;*/
+margin-bottom: 10px;
+}
+
+.header #user-tools {
+ padding: 12px 20px 0px 0px;
+ float: right;
+ margin-top: -5px;
+}
+
+.header .header-content .date{
+padding-left:10px;
+}
+
+.header .header-content .time {
+font-weight: normal;
+}
+.header .header-content.header-content-first{
+height: 15px;
+padding-bottom: 0px;
+}
+
+.header .header-content {
+padding-bottom: 0px;
+padding: 7px 0 0 0px;
+}
+
+/************************* END HEADER *************************/
+
+/************************* FOOTER *************************/
+
+.footer{
+ z-index: 1001;
+}
+
+/* FIXME */
+@media(max-width:768px) {
+ .footer{
+ display: none;
+ }
+
+ #page-content-wrapper{
+ padding-bottom: 60px;
+ }
+}
+
+.footer .content .statusMsg {
+ float: right;
+ padding: 15px 20px 0 0;
+ display: block;
+}
+/************************* END FOOTER *************************/
+
+/************************* NAV *************************/
+
+#sidebar-wrapper {
+ -webkit-box-shadow: 3px 0px 5px 0px rgba(50, 50, 50, 0.75);
+ -moz-box-shadow: 3px 0px 5px 0px rgba(50, 50, 50, 0.75);
+ box-shadow: 3px 0px 5px 0px rgba(50, 50, 50, 0.75);
+}
+
+#sidebar-wrapper .logo{
+ max-width: 100%;
+}
+
+.icon-app {
+background-image: url("opencloudApp.png");
+}
+.icon-home {
+/* Going with darker standard color nav -- so using over png's background-image: url("Home.png"); */
+background-image: url("Home.png");
+}
+.icon-deployment{
+background-image: url("Deployments.png");
+}
+.icon-site{
+background-image: url("Sites.png");
+}
+.icon-slice{
+background-image: url("Slices.png");
+}
+.icon-user{
+background-image: url("Users.png");
+}
+.icon-reservation{
+background-image: url("Reservations.png");
+}
+
+#sidebar-wrapper>ul>li.active>a>.icon-home ,
+/*#sidebar-wrapper>ul>li:hover>a>.icon-home ,*/
+#sidebar-wrapper>ul>li.focus>a>.icon-home{
+ background-image: url("Home_over.png");
+}
+
+#sidebar-wrapper>ul>li.active>a>.icon-deployment,
+/*#sidebar-wrapper>ul>li:hover>a>.icon-deployment,*/
+#sidebar-wrapper>ul>li.focus>a>.icon-deployment{
+ background-image: url("Deployments_over.png");
+}
+#sidebar-wrapper>ul>li.active>a>.icon-site,
+/*#sidebar-wrapper>ul>li:hover>a>.icon-site,*/
+#sidebar-wrapper>ul>li.focus>a>.icon-site{
+ background-image: url("Sites_over.png");
+}
+#sidebar-wrapper>ul>li.active>a>.icon-slice,
+/*#sidebar-wrapper>ul>li:hover>a>.icon-slice,*/
+#sidebar-wrapper>ul>li.focus>a>.icon-slice{
+ background-image: url("Slices_over.png");
+}
+#sidebar-wrapper>ul>li.active>a>.icon-user,
+/*#sidebar-wrapper>ul>li:hover>a>.icon-user,*/
+#sidebar-wrapper>ul>li.focus>a>.icon-user{
+ background-image: url("Users_over.png");
+}
+#sidebar-wrapper>ul>li.active>a>.icon-reservation,
+/*#sidebar-wrapper>ul>li:hover>a>.icon-reservation,*/
+#sidebar-wrapper>ul>li.focus>a>.icon-reservation{
+ background-image: url("Reservations_over.png");
+}
+/************************* END NAV *************************/
+
/* CSS for jquery Tabs */
#hometabs {
border-bottom: 1px solid #105E9E;
@@ -50,97 +318,7 @@
text-shadow: rgb(46, 43, 43) 0.1em 0.1em 0.2em;
font-weight: normal;
}
-/*************************/
-/* Header elements */
-.logo {
-}
-.header{
- background-color: #ffffff !important;
- /*background-image: url('bg2.jpg');*/
- background-size: 100% auto;
- background-image: none !important;
- text-shadow: none;
- border-bottom: 3px solid #C5CCD4;
- margin-bottom: 14px;
- margin-top: -120px;
- height: 85px;
- /*min-width: 1321px;*/
-}
-
-.nav-quick-search{
-margin: 30px -10px 0px 0px !important;
-padding:0 25px 0 0 !important;
-float:right !important;
-}
-
-.nav-quick-search .search-query{
-border-radius:5px;
-border:none;
-box-shadow:0px;
-background-color:lightGrey;
-padding-left: 27px;
-}
-
-
-.header #branding {
-width: 100%;
-height:60px;
-}
-
-.header a {
-color: #08C;
-font-weight: bold;
-/*border-bottom: 1px solid #C5CCD4;*/
-}
-a {
-color: #08C;
-font-weight: bold;
-/*border-bottom: 1px solid #C5CCD4;*/
-}
-
-.header #branding {
-border-right:none;
-}
-
-#branding2{
-height:20px;
-width:100%;
-color: #333;
-/*background-color: #000000;*/
-margin-bottom: 10px;
-}
-
-.header #user-tools {
- padding: 12px 20px 0px 0px;
- float: right;
- margin-top: -5px;
-}
-
-.header .header-content .date{
-padding-left:10px;
-}
-
-.header .header-content .time {
-font-weight: normal;
-}
-.header .header-content.header-content-first{
-height: 15px;
-padding-bottom: 0px;
-}
-
-.header .header-content {
-padding-bottom: 0px;
-padding: 7px 0 0 0px;
-}
-
-/*************************/
-
-.footer .content .statusMsg {
- float: right;
- padding: 15px 20px 0 0;
- display: block;
-}
.alignCenter {
text-align: center !important;
@@ -248,7 +426,7 @@
#suit-center {
padding: 20px;
width: 100%;
-min-width:650px;
+/*min-width:650px;*/
}
.inner-two-columns .inner-center-column .tab-content {
overflow-x: auto;
@@ -266,60 +444,12 @@
margin-bottom: 5px;
}
-
-/*For changing the background color of the left side navigation list items*/
-/*For changing the color of the left side navigation list items*/
-/*For changing the font of the left side navigation list items*/
-.left-nav>ul>li>a {
-/*background-color: #CDE7FF;*/
-/* Light color nav choices */
-/*background-color: #DFECF8;*/
-/*background-color: #91BFE4;*/
-/*background-color: #DAECFC;*/
-background-color: #448CCA;
-/*ONlab.us reg color blue*/
-/*background-color: #ccffff;*/
-font-weight: normal;
-/*color: #105E9E;*/
-color: #ffffff;
-text-shadow: rgb(46, 43, 43) 0.1em 0.1em 0.2em;
-letter-spacing: 1px;
-font-size: 12px;
-border-bottom: none;
-}
-
-/*For giving the padding for the left side navigation*/
-.left-nav>ul {
-padding-left: 5px;
-}
-
-/*For increasing the height of left side navigation list items*/
-.left-nav>ul>li {
-padding-top:4px;
-line-height: 35px;
-width: 200px;
-}
-
/*For changing background color of suit center*/
#suit-center {
background-color: #ffffff;
}
-.left-nav>ul>li.active>a {
-/*background-color: #448CCA;*/
-/* Last Active
-background-color: #0061B7; */
-background-color: #515151;
-/*background-color: #448CCA;*/
-font-weight:normal;
-color:#ffffff;
-text-shadow: rgb(46, 43, 43) 0.1em 0.1em 0.2em;
-letter-spacing: 1px;
-/*left: 10px;*/
-background-image: url("right_arrow.png");
- background-position: 97% center;
-}
.nav-tabs-suit li{
@@ -380,37 +510,6 @@
line-height: 0;
}
-
-/* create an arrow that points down */
-
-
-.left-nav>ul>li.active>a:hover{
-/*background-color: #448CCA;*/
-/*background-color: #91BFE4;*/
-/*background-color: #D6E7F8;*/
-font-weight:normal;
-font-size: 1.2em;
-/*font-weight: bold;*/
-color:#ffffff;
-/*padding-top:10px;*/
-text-decoration:none;
-}
-
-/* Adjust font weight to normal on hover, else white blurs */
-.left-nav>ul>li>a:hover{
-/*background-color: #448CCA;*/
-/*background-color: #004775;*/
-background-color: #515151;
-color:#ffffff;
-font-weight:normal;
-font-size: 1.2em;
-/*padding-top:10px;*/
-text-decoration:none;
-/*border-left: 15px solid #105E9E ;*/
-border-left: 10px solid #ffffff;
-text-shadow: rgb(46, 43, 43) 0.1em 0.1em 0.2em;
-letter-spacing: 1px;
-}
.nav-tabs-suit li.active a:hover,.nav-tabs-suit li a:hover{
/*background-color: #448CCA;*/
background-color: #515151;
@@ -600,9 +699,10 @@
}*/
.login {
-background-image: url('bg.jpg');
-background-size: 100%;
-background-repeat: no-repeat;
+ background-image: url('bg.jpg');
+ background-size: cover;
+ background-position: center;
+ background-repeat: no-repeat;
}
@@ -622,7 +722,7 @@
browser resizes **/
#content-main {
overflow-x:auto;
- min-width: 1000px;
+ /*min-width: 1000px;*/
}
.tab-content tab-content-main {
@@ -666,14 +766,14 @@
.login #wrap {
background: none repeat scroll 0 0 rgba(0, 0, 0, 0);
+ overflow: hidden;
+ padding: 0f;
}
#wrap {
background: none repeat scroll 0 0 rgba(0, 0, 0, 0);
}
#wrap {
- height: auto !important;
- margin: 0 auto -60px;
- min-height: 100%;
+ padding-bottom: 60px;
}
.forgotLink {
@@ -709,7 +809,6 @@
/*Added by Beena for adding the three components in dashboard*/
.breadcrumb{
- width: 37%;
display:inline-block;
background-color: #fff;
@@ -766,52 +865,13 @@
}
.icon-home ,.icon-deployment ,.icon-site ,.icon-slice ,.icon-user, .icon-reservation, .icon-app{
-background-position: left center;
-width:22px;
-height:22px;
-}
-
-.icon-app {
-background-image: url("opencloudApp.png");
-}
-.icon-home {
-/* Going with darker standard color nav -- so using over png's background-image: url("Home.png"); */
-background-image: url("Home_over.png");
-}
-.icon-deployment{
-background-image: url("Deployments_over.png");
-}
-.icon-site{
-background-image: url("Sites_over.png");
-}
-.icon-slice{
-background-image: url("Slices_over.png");
-}
-.icon-user{
-background-image: url("Users_over.png");
-}
-.icon-reservation{
-background-image: url("Reservations_over.png");
-}
-
-.left-nav>ul>li.active>a>.icon-home , .left-nav>ul>li:hover>a>.icon-home , .left-nav>ul>li.focus>a>.icon-home{
-background-image: url("Home_over.png");
-}
-
-.left-nav>ul>li.active>a>.icon-deployment,.left-nav>ul>li:hover>a>.icon-deployment,.left-nav>ul>li.focus>a>.icon-deployment{
-background-image: url("Deployments_over.png");
-}
-.left-nav>ul>li.active>a>.icon-site , .left-nav>ul>li:hover>a>.icon-site , .left-nav>ul>li.focus>a>.icon-site{
-background-image: url("Sites_over.png");
-}
-.left-nav>ul>li.active>a>.icon-slice , .left-nav>ul>li:hover>a>.icon-slice , .left-nav>ul>li.focus>a>.icon-slice {
-background-image: url("Slices_over.png");
-}
-.left-nav>ul>li.active>a>.icon-user , .left-nav>ul>li:hover>a>.icon-user , .left-nav>ul>li.focus>a>.icon-user{
-background-image: url("Users_over.png");
-}
-.left-nav>ul>li.active>a>.icon-reservation , .left-nav>ul>li:hover>a>.icon-reservation , .left-nav>ul>li.focus>a>.icon-reservation{
-background-image: url("Reservations_over.png");
+ background-position: left center;
+ width:22px;
+ height:22px;
+ display: inline-block;
+ margin-right: 10px;
+ position: relative;
+ top: 5px;
}
#dashboardHPC {
diff --git a/xos/core/xoslib/methods/cordsubscriber.py b/xos/core/xoslib/methods/cordsubscriber.py
index c26ac54..297ac4a 100644
--- a/xos/core/xoslib/methods/cordsubscriber.py
+++ b/xos/core/xoslib/methods/cordsubscriber.py
@@ -28,7 +28,9 @@
class CordSubscriberIdSerializer(serializers.ModelSerializer, PlusSerializerMixin):
id = ReadOnlyField()
service_specific_id = ReadOnlyField()
- vlan_id = ReadOnlyField()
+ vlan_id = ReadOnlyField() # XXX remove this
+ c_tag = ReadOnlyField()
+ s_tag = ReadOnlyField()
vcpe_id = ReadOnlyField()
instance = ReadOnlyField()
image = ReadOnlyField()
@@ -59,7 +61,7 @@
class Meta:
model = CordSubscriber
fields = ('humanReadableName', 'id',
- 'service_specific_id', 'vlan_id',
+ 'service_specific_id', 'vlan_id', 's_tag', 'c_tag',
'vcpe_id', 'instance', 'instance_name', 'image', 'image_name',
'firewall_enable', 'firewall_rules',
'url_filter_enable', 'url_filter_rules', 'url_filter_level',
diff --git a/xos/core/xoslib/methods/volttenant.py b/xos/core/xoslib/methods/volttenant.py
index e5998da..bf48290 100644
--- a/xos/core/xoslib/methods/volttenant.py
+++ b/xos/core/xoslib/methods/volttenant.py
@@ -26,7 +26,9 @@
class VOLTTenantIdSerializer(serializers.ModelSerializer, PlusSerializerMixin):
id = ReadOnlyField()
service_specific_id = serializers.CharField()
- vlan_id = serializers.CharField()
+ #vlan_id = serializers.CharField()
+ s_tag = serializers.CharField()
+ c_tag = serializers.CharField()
provider_service = serializers.PrimaryKeyRelatedField(queryset=VOLTService.get_service_objects().all(), default=get_default_volt_service)
humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
@@ -35,7 +37,7 @@
class Meta:
model = VOLTTenant
- fields = ('humanReadableName', 'id', 'provider_service', 'service_specific_id', 'vlan_id', 'computeNodeName' )
+ fields = ('humanReadableName', 'id', 'provider_service', 'service_specific_id', 's_tag', 'c_tag', 'computeNodeName' )
def getHumanReadableName(self, obj):
return obj.__unicode__()
@@ -62,9 +64,19 @@
if service_specific_id is not None:
queryset = queryset.filter(service_specific_id=service_specific_id)
- vlan_id = self.request.QUERY_PARAMS.get('vlan_id', None)
- if vlan_id is not None:
- ids = [x.id for x in queryset if x.get_attribute("vlan_id", None)==vlan_id]
+# vlan_id = self.request.QUERY_PARAMS.get('vlan_id', None)
+# if vlan_id is not None:
+# ids = [x.id for x in queryset if x.get_attribute("vlan_id", None)==vlan_id]
+# queryset = queryset.filter(id__in=ids)
+
+ c_tag = self.request.QUERY_PARAMS.get('c_tag', None)
+ if c_tag is not None:
+ ids = [x.id for x in queryset if x.get_attribute("c_tag", None)==c_tag]
+ queryset = queryset.filter(id__in=ids)
+
+ s_tag = self.request.QUERY_PARAMS.get('s_tag', None)
+ if s_tag is not None:
+ ids = [x.id for x in queryset if x.get_attribute("s_tag", None)==s_tag]
queryset = queryset.filter(id__in=ids)
return queryset
@@ -74,7 +86,7 @@
existing_obj = None
for obj in VOLTTenant.get_tenant_objects().all():
- if (obj.vlan_id == data.get("vlan_id", None)) and (obj.service_specific_id == data.get("service_specific_id",None)):
+ if (obj.c_tag == data.get("c_tag", None)) and (obj.s_tag == data.get("s_tag", None)) and (obj.service_specific_id == data.get("service_specific_id",None)):
existing_obj = obj
if existing_obj:
diff --git a/xos/core/xoslib/ngXosLib/generator-xos/app/index.js b/xos/core/xoslib/ngXosLib/generator-xos/app/index.js
index 15d400b..547a8f9 100755
--- a/xos/core/xoslib/ngXosLib/generator-xos/app/index.js
+++ b/xos/core/xoslib/ngXosLib/generator-xos/app/index.js
@@ -9,18 +9,53 @@
_fistCharToUpper: function(string){
return string.replace(/^./, string[0].toUpperCase());
},
- prompting: function(){
- var done = this.async();
- this.prompt({
- type : 'input',
- name : 'name',
- message : 'Your project name',
- default : this.config.get('name') // value set in .yo-rc.json
- }, function (answers) {
- // TODO check if this view already exist
- config.name = answers.name;
- done();
- }.bind(this));
+ prompting: {
+ name:function(){
+ var done = this.async();
+ this.prompt({
+ type : 'input',
+ name : 'name',
+ message : 'Your project name',
+ default : this.config.get('name') // value set in .yo-rc.json
+ }, function (answers) {
+ // TODO check if this view already exist
+ config.name = answers.name;
+ done();
+ }.bind(this));
+ },
+ host:function(){
+ var done = this.async();
+ this.prompt({
+ type : 'input',
+ name : 'host',
+ message : 'Your project remote host (with port)'
+ }, function (answers) {
+ config.host = answers.host;
+ done();
+ }.bind(this));
+ },
+ token:function(){
+ var done = this.async();
+ this.prompt({
+ type : 'input',
+ name : 'token',
+ message : 'Insert your active session token'
+ }, function (answers) {
+ config.token = answers.token;
+ done();
+ }.bind(this));
+ },
+ session:function(){
+ var done = this.async();
+ this.prompt({
+ type : 'input',
+ name : 'session',
+ message : 'Insert your active session id'
+ }, function (answers) {
+ config.session = answers.session;
+ done();
+ }.bind(this));
+ }
},
writing: {
rcFiles: function(){
@@ -34,6 +69,13 @@
{ name: config.name, author: {name:user.git.name()} }
);
},
+ envConfig: function(){
+ this.fs.copyTpl(
+ this.templatePath('env/default.js'),
+ this.destinationPath(`${this.config.get('folder')}/${config.name}/env/default.js`),
+ { host: config.host, token: config.token, session: config.session }
+ );
+ },
bowerJson: function(){
this.fs.copyTpl(
this.templatePath('bower.json'),
diff --git a/xos/core/xoslib/ngXosLib/generator-xos/app/templates/env/default.js b/xos/core/xoslib/ngXosLib/generator-xos/app/templates/env/default.js
new file mode 100644
index 0000000..5db8632
--- /dev/null
+++ b/xos/core/xoslib/ngXosLib/generator-xos/app/templates/env/default.js
@@ -0,0 +1,13 @@
+// This is a default configuration for your development environment.
+// You can duplicate this configuration for any of your Backend Environments.
+// Different configurations are loaded setting a NODE_ENV variable that contain the config file name.
+// `NODE_ENV=local npm start`
+//
+// If xoscsrftoken or xossessionid are not specified the browser value are used
+// (works only for local environment as both application are served on the same domain)
+
+module.exports = {
+ host: '<%= host %>',
+ xoscsrftoken: '<%= token %>',
+ xossessionid: '<%= session %>'
+};
diff --git a/xos/core/xoslib/ngXosLib/generator-xos/app/templates/gulp/server.js b/xos/core/xoslib/ngXosLib/generator-xos/app/templates/gulp/server.js
index 8eab1bf..7605294 100644
--- a/xos/core/xoslib/ngXosLib/generator-xos/app/templates/gulp/server.js
+++ b/xos/core/xoslib/ngXosLib/generator-xos/app/templates/gulp/server.js
@@ -10,8 +10,17 @@
var httpProxy = require('http-proxy');
var del = require('del');
+const environment = process.env.NODE_ENV;
+
+if (environment){
+ var conf = require(`../env/${environment}.js`);
+}
+else{
+ var conf = require('../env/default.js')
+}
+
var proxy = httpProxy.createProxyServer({
- target: 'http://0.0.0.0:9999'
+ target: conf.host || 'http://0.0.0.0:9999'
});
@@ -49,6 +58,10 @@
req.url.indexOf('/xoslib/') !== -1 ||
req.url.indexOf('/hpcapi/') !== -1
){
+ if(conf.xoscsrftoken && conf.xossessionid){
+ req.headers.cookie = `xoscsrftoken=${conf.xoscsrftoken}; xossessionid=${conf.xossessionid}`;
+ req.headers['x-csrftoken'] = conf.xoscsrftoken;
+ }
proxy.web(req, res);
}
else{
@@ -130,4 +143,4 @@
['browser']
);
});
-};
\ No newline at end of file
+};
diff --git a/xos/core/xoslib/ngXosLib/xosHelpers/src/services/csrfToken.interceptor.js b/xos/core/xoslib/ngXosLib/xosHelpers/src/services/csrfToken.interceptor.js
index 51f4918..283e90d 100644
--- a/xos/core/xoslib/ngXosLib/xosHelpers/src/services/csrfToken.interceptor.js
+++ b/xos/core/xoslib/ngXosLib/xosHelpers/src/services/csrfToken.interceptor.js
@@ -5,7 +5,7 @@
.module('xos.helpers')
.factory('SetCSRFToken', setCSRFToken);
- function setCSRFToken($cookies) {
+ function setCSRFToken($cookies) {
return {
request: function(request){
if(request.method !== 'GET'){
@@ -15,4 +15,4 @@
}
};
}
-})();
\ No newline at end of file
+})();
diff --git a/xos/core/xoslib/ngXosViews/contentProvider/gulp/build.js b/xos/core/xoslib/ngXosViews/contentProvider/gulp/build.js
index e851505..9af8074 100644
--- a/xos/core/xoslib/ngXosViews/contentProvider/gulp/build.js
+++ b/xos/core/xoslib/ngXosViews/contentProvider/gulp/build.js
@@ -23,7 +23,10 @@
var rename = require('gulp-rename');
var replace = require('gulp-replace');
-var TEMPLATE_FOOTER = '}]);angular.bootstrap(angular.element(\'#xosContentProvider\'), [\'xos.contentProvider\']);';
+var TEMPLATE_FOOTER = `}]);
+angular.module('xos.contentProvider').run(function($location){$location.path('/')});
+angular.bootstrap(angular.element('#xosContentProvider'), ['xos.contentProvider']);`;
+
module.exports = function(options){
// delete previous builded file
diff --git a/xos/core/xoslib/ngXosViews/contentProvider/gulp/server.js b/xos/core/xoslib/ngXosViews/contentProvider/gulp/server.js
index ad7c681..8eab1bf 100644
--- a/xos/core/xoslib/ngXosViews/contentProvider/gulp/server.js
+++ b/xos/core/xoslib/ngXosViews/contentProvider/gulp/server.js
@@ -31,6 +31,7 @@
// reloadDelay: 500,
// logLevel: 'debug',
// logConnections: true,
+ startPath: '#/',
snippetOptions: {
rule: {
match: /<!-- browserSync -->/i
diff --git a/xos/core/xoslib/ngXosViews/sampleView/spec/sample.test.js b/xos/core/xoslib/ngXosViews/contentProvider/spec/sample.test.js
similarity index 95%
rename from xos/core/xoslib/ngXosViews/sampleView/spec/sample.test.js
rename to xos/core/xoslib/ngXosViews/contentProvider/spec/sample.test.js
index c169d10..177bc7d 100644
--- a/xos/core/xoslib/ngXosViews/sampleView/spec/sample.test.js
+++ b/xos/core/xoslib/ngXosViews/contentProvider/spec/sample.test.js
@@ -4,7 +4,7 @@
var scope, element, isolatedScope, httpBackend;
- beforeEach(module('xos.sampleView'));
+ beforeEach(module('xos.contentProvider'));
beforeEach(module('templates'));
beforeEach(inject(function($httpBackend, $compile, $rootScope){
diff --git a/xos/core/xoslib/ngXosViews/sampleView/src/templates/users-list.tpl.html b/xos/core/xoslib/ngXosViews/contentProvider/src/templates/users-list.tpl.html
similarity index 100%
rename from xos/core/xoslib/ngXosViews/sampleView/src/templates/users-list.tpl.html
rename to xos/core/xoslib/ngXosViews/contentProvider/src/templates/users-list.tpl.html
diff --git a/xos/core/xoslib/ngXosViews/sampleView/.bowerrc b/xos/core/xoslib/ngXosViews/sampleView/.bowerrc
deleted file mode 100644
index e491038..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/.bowerrc
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "directory": "src/vendor/"
-}
\ No newline at end of file
diff --git a/xos/core/xoslib/ngXosViews/sampleView/.eslintrc b/xos/core/xoslib/ngXosViews/sampleView/.eslintrc
deleted file mode 100644
index c852748..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/.eslintrc
+++ /dev/null
@@ -1,42 +0,0 @@
-{
- "ecmaFeatures": {
- "blockBindings": true,
- "forOf": true,
- "destructuring": true,
- "arrowFunctions": true,
- "templateStrings": true
- },
- "env": {
- "browser": true,
- "node": true,
- "es6": true
- },
- "plugins": [
- //"angular"
- ],
- "rules": {
- "quotes": [2, "single"],
- "camelcase": [1, {"properties": "always"}],
- "no-underscore-dangle": 1,
- "eqeqeq": [2, "smart"],
- "no-alert": 1,
- "key-spacing": [1, { "beforeColon": false, "afterColon": true }],
- "indent": [2, 2],
- "no-irregular-whitespace": 1,
- "eol-last": 0,
- "max-nested-callbacks": [2, 4],
- "comma-spacing": [1, {"before": false, "after": true}],
- "no-trailing-spaces": [1, { skipBlankLines: true }],
- "no-unused-vars": [1, {"vars": "all", "args": "after-used"}],
- "new-cap": 0,
-
- //"angular/ng_module_name": [2, '/^xos\.*[a-z]*$/'],
- //"angular/ng_controller_name": [2, '/^[a-z].*Ctrl$/'],
- //"angular/ng_service_name": [2, '/^[A-Z].*Service$/'],
- //"angular/ng_directive_name": [2, '/^[a-z]+[[A-Z].*]*$/'],
- //"angular/ng_di": [0, "function or array"]
- },
- "globals" :{
- "angular": true
- }
-}
\ No newline at end of file
diff --git a/xos/core/xoslib/ngXosViews/sampleView/.gitignore b/xos/core/xoslib/ngXosViews/sampleView/.gitignore
deleted file mode 100644
index 567aee4..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-dist/
-src/vendor
-.tmp
-node_modules
-npm-debug.log
-dist/
\ No newline at end of file
diff --git a/xos/core/xoslib/ngXosViews/sampleView/bower.json b/xos/core/xoslib/ngXosViews/sampleView/bower.json
deleted file mode 100644
index 2dd8883..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/bower.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "name": "xos-sampleView",
- "version": "0.0.0",
- "authors": [
- "Matteo Scandolo <matteo.scandolo@link-me.it>"
- ],
- "description": "The sampleView view",
- "license": "MIT",
- "ignore": [
- "**/.*",
- "node_modules",
- "bower_components",
- "static/js/vendor/",
- "test",
- "tests"
- ],
- "dependencies": {
- },
- "devDependencies": {
- "jquery": "~2.1.4",
- "angular-mocks": "~1.4.7",
- "angular": "~1.4.7",
- "angular-ui-router": "~0.2.15",
- "angular-cookies": "~1.4.7",
- "angular-resource": "~1.4.7",
- "ng-lodash": "~0.3.0",
- "bootstrap-css": "2.3.2"
- }
-}
diff --git a/xos/core/xoslib/ngXosViews/sampleView/gulp/build.js b/xos/core/xoslib/ngXosViews/sampleView/gulp/build.js
deleted file mode 100644
index 7e8e5a1..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/gulp/build.js
+++ /dev/null
@@ -1,115 +0,0 @@
-'use strict';
-
-// BUILD
-//
-// The only purpose of this gulpfile is to build a XOS view and copy the correct files into
-// .html => dashboards
-// .js (minified and concat) => static/js
-//
-// The template are parsed and added to js with angular $templateCache
-
-var gulp = require('gulp');
-var ngAnnotate = require('gulp-ng-annotate');
-var uglify = require('gulp-uglify');
-var templateCache = require('gulp-angular-templatecache');
-var runSequence = require('run-sequence');
-var concat = require('gulp-concat');
-var del = require('del');
-var wiredep = require('wiredep');
-var angularFilesort = require('gulp-angular-filesort');
-var _ = require('lodash');
-var eslint = require('gulp-eslint');
-var inject = require('gulp-inject');
-var rename = require('gulp-rename');
-var replace = require('gulp-replace');
-
-var TEMPLATE_FOOTER = '}]);angular.bootstrap(angular.element(\'#xosSampleView\'), [\'xos.sampleView\']);';
-module.exports = function(options){
-
- // delete previous builded file
- gulp.task('clean', function(){
- return del(
- [options.dashboards + 'xosSampleView.html'],
- {force: true}
- );
- });
-
- // compile and minify scripts
- gulp.task('scripts', function() {
- return gulp.src([
- options.tmp + '**/*.js'
- ])
- .pipe(ngAnnotate())
- .pipe(angularFilesort())
- .pipe(concat('xosSampleView.js'))
- .pipe(uglify())
- .pipe(gulp.dest(options.static + 'js/'));
- });
-
- // set templates in cache
- gulp.task('templates', function(){
- return gulp.src('./src/templates/*.html')
- .pipe(templateCache({
- module: 'xos.sampleView',
- root: 'templates/',
- templateFooter: TEMPLATE_FOOTER
- }))
- .pipe(gulp.dest(options.tmp));
- });
-
- // copy html index to Django Folder
- gulp.task('copyHtml', ['clean'], function(){
- return gulp.src(options.src + 'index.html')
- // remove dev dependencies from html
- .pipe(replace(/<!-- bower:css -->(\n.*)*\n<!-- endbower --><!-- endcss -->/, ''))
- .pipe(replace(/<!-- bower:js -->(\n.*)*\n<!-- endbower --><!-- endjs -->/, ''))
- .pipe(replace(/ng-app=".*"\s/, ''))
- // injecting minified files
- .pipe(
- inject(
- gulp.src([
- options.static + 'js/vendor/xosSampleViewVendor.js',
- options.static + 'js/xosSampleView.js'
- ])
- )
- )
- .pipe(rename('xosSampleView.html'))
- .pipe(gulp.dest(options.dashboards));
- });
-
- // minify vendor js files
- gulp.task('wiredep', function(){
- var bowerDeps = wiredep().js;
- if(!bowerDeps){
- return;
- }
-
- // remove angular (it's already loaded)
- _.remove(bowerDeps, function(dep){
- return dep.indexOf('angular/angular.js') !== -1;
- });
-
- return gulp.src(bowerDeps)
- .pipe(concat('xosSampleViewVendor.js'))
- .pipe(uglify())
- .pipe(gulp.dest(options.static + 'js/vendor/'));
- });
-
- gulp.task('lint', function () {
- return gulp.src(['src/js/**/*.js'])
- .pipe(eslint())
- .pipe(eslint.format())
- .pipe(eslint.failAfterError());
- });
-
- gulp.task('build', function() {
- runSequence(
- 'templates',
- 'babel',
- 'scripts',
- 'wiredep',
- 'copyHtml',
- 'cleanTmp'
- );
- });
-};
\ No newline at end of file
diff --git a/xos/core/xoslib/ngXosViews/sampleView/gulp/server.js b/xos/core/xoslib/ngXosViews/sampleView/gulp/server.js
deleted file mode 100644
index ad7c681..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/gulp/server.js
+++ /dev/null
@@ -1,132 +0,0 @@
-'use strict';
-
-var gulp = require('gulp');
-var browserSync = require('browser-sync').create();
-var inject = require('gulp-inject');
-var runSequence = require('run-sequence');
-var angularFilesort = require('gulp-angular-filesort');
-var babel = require('gulp-babel');
-var wiredep = require('wiredep').stream;
-var httpProxy = require('http-proxy');
-var del = require('del');
-
-var proxy = httpProxy.createProxyServer({
- target: 'http://0.0.0.0:9999'
-});
-
-
-proxy.on('error', function(error, req, res) {
- res.writeHead(500, {
- 'Content-Type': 'text/plain'
- });
-
- console.error('[Proxy]', error);
-});
-
-module.exports = function(options){
-
- // open in browser with sync and proxy to 0.0.0.0
- gulp.task('browser', function() {
- browserSync.init({
- // reloadDelay: 500,
- // logLevel: 'debug',
- // logConnections: true,
- snippetOptions: {
- rule: {
- match: /<!-- browserSync -->/i
- }
- },
- server: {
- baseDir: options.src,
- routes: {
- '/api': options.api,
- '/xosHelpers/src': options.helpers
- },
- middleware: function(req, res, next){
- if(
- req.url.indexOf('/xos/') !== -1 ||
- req.url.indexOf('/xoslib/') !== -1 ||
- req.url.indexOf('/hpcapi/') !== -1
- ){
- proxy.web(req, res);
- }
- else{
- next();
- }
- }
- }
- });
-
- gulp.watch(options.src + 'js/**/*.js', ['js-watch']);
- gulp.watch(options.src + 'vendor/**/*.js', ['bower'], function(){
- browserSync.reload();
- });
- gulp.watch(options.src + '**/*.html', function(){
- browserSync.reload();
- });
- });
-
- // transpile js with sourceMaps
- gulp.task('babel', function(){
- return gulp.src(options.scripts + '**/*.js')
- .pipe(babel({sourceMaps: true}))
- .pipe(gulp.dest(options.tmp));
- });
-
- // inject scripts
- gulp.task('injectScript', ['cleanTmp', 'babel'], function(){
- return gulp.src(options.src + 'index.html')
- .pipe(
- inject(
- gulp.src([
- options.tmp + '**/*.js',
- options.api + '*.js',
- options.helpers + '**/*.js'
- ])
- .pipe(angularFilesort()),
- {
- ignorePath: [options.src, '/../../ngXosLib']
- }
- )
- )
- .pipe(gulp.dest(options.src));
- });
-
- // inject CSS
- gulp.task('injectCss', function(){
- return gulp.src(options.src + 'index.html')
- .pipe(
- inject(
- gulp.src(options.src + 'css/*.css'),
- {
- ignorePath: [options.src]
- }
- )
- )
- .pipe(gulp.dest(options.src));
- });
-
- // inject bower dependencies with wiredep
- gulp.task('bower', function () {
- return gulp.src(options.src + 'index.html')
- .pipe(wiredep({devDependencies: true}))
- .pipe(gulp.dest(options.src));
- });
-
- gulp.task('js-watch', ['injectScript'], function(){
- browserSync.reload();
- });
-
- gulp.task('cleanTmp', function(){
- return del([options.tmp + '**/*']);
- });
-
- gulp.task('serve', function() {
- runSequence(
- 'bower',
- 'injectScript',
- 'injectCss',
- ['browser']
- );
- });
-};
\ No newline at end of file
diff --git a/xos/core/xoslib/ngXosViews/sampleView/gulpfile.js b/xos/core/xoslib/ngXosViews/sampleView/gulpfile.js
deleted file mode 100644
index f114774..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/gulpfile.js
+++ /dev/null
@@ -1,24 +0,0 @@
-'use strict';
-
-var gulp = require('gulp');
-var wrench = require('wrench');
-
-var options = {
- src: 'src/',
- scripts: 'src/js/',
- tmp: 'src/.tmp',
- dist: 'dist/',
- api: '../../ngXosLib/api/',
- helpers: '../../ngXosLib/xosHelpers/src/',
- static: '../../static/', // this is the django static folder
- dashboards: '../../dashboards/' // this is the django html folder
-};
-
-wrench.readdirSyncRecursive('./gulp')
-.map(function(file) {
- require('./gulp/' + file)(options);
-});
-
-gulp.task('default', function () {
- gulp.start('build');
-});
diff --git a/xos/core/xoslib/ngXosViews/sampleView/karma.conf.js b/xos/core/xoslib/ngXosViews/sampleView/karma.conf.js
deleted file mode 100644
index 83d3f63..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/karma.conf.js
+++ /dev/null
@@ -1,88 +0,0 @@
-// Karma configuration
-// Generated on Tue Oct 06 2015 09:27:10 GMT+0000 (UTC)
-
-/* eslint indent: [2,2], quotes: [2, "single"]*/
-
-/*eslint-disable*/
-var wiredep = require('wiredep');
-var path = require('path');
-
-var bowerComponents = wiredep( {devDependencies: true} )[ 'js' ].map(function( file ){
- return path.relative(process.cwd(), file);
-});
-
-module.exports = function(config) {
-/*eslint-enable*/
- config.set({
-
- // base path that will be used to resolve all patterns (eg. files, exclude)
- basePath: '',
-
-
- // frameworks to use
- // available frameworks: https://npmjs.org/browse/keyword/karma-adapter
- frameworks: ['jasmine'],
-
-
- // list of files / patterns to load in the browser
- files: bowerComponents.concat([
- '../../static/js/xosApi.js',
- '../../static/js/vendor/ngXosHelpers.js',
- 'src/js/**/*.js',
- 'spec/**/*.mock.js',
- 'spec/**/*.test.js',
- 'src/**/*.html'
- ]),
-
-
- // list of files to exclude
- exclude: [
- ],
-
-
- // preprocess matching files before serving them to the browser
- // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor
- preprocessors: {
- 'src/js/**/*.js': ['babel'],
- 'spec/**/*.test.js': ['babel'],
- 'src/**/*.html': ['ng-html2js']
- },
-
- ngHtml2JsPreprocessor: {
- stripPrefix: 'src/', //strip the src path from template url (http://stackoverflow.com/questions/22869668/karma-unexpected-request-when-testing-angular-directive-even-with-ng-html2js)
- moduleName: 'templates' // define the template module name
- },
-
- // test results reporter to use
- // possible values: 'dots', 'progress'
- // available reporters: https://npmjs.org/browse/keyword/karma-reporter
- reporters: ['mocha'],
-
-
- // web server port
- port: 9876,
-
-
- // enable / disable colors in the output (reporters and logs)
- colors: true,
-
-
- // level of logging
- // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
- logLevel: config.LOG_INFO,
-
-
- // enable / disable watching file and executing tests whenever any file changes
- autoWatch: true,
-
-
- // start these browsers
- // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
- browsers: ['PhantomJS'],
-
-
- // Continuous Integration mode
- // if true, Karma captures browsers, runs the tests and exits
- singleRun: false
- });
-};
diff --git a/xos/core/xoslib/ngXosViews/sampleView/package.json b/xos/core/xoslib/ngXosViews/sampleView/package.json
deleted file mode 100644
index 1cb8d7f..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/package.json
+++ /dev/null
@@ -1,45 +0,0 @@
-{
- "name": "xos-sampleView",
- "version": "1.0.0",
- "description": "Angular Application for XOS, created with generator-xos",
- "scripts": {
- "prestart": "npm install && bower install",
- "start": "gulp serve",
- "prebuild": "npm install && bower install",
- "build": "gulp",
- "test": "karma start",
- "lint": "eslint src/js/"
- },
- "keywords": [
- "XOS",
- "Angular",
- "XOSlib"
- ],
- "author": "Matteo Scandolo",
- "license": "MIT",
- "dependencies": {},
- "devDependencies": {
- "browser-sync": "^2.9.11",
- "del": "^2.0.2",
- "gulp": "^3.9.0",
- "gulp-angular-filesort": "^1.1.1",
- "gulp-angular-templatecache": "^1.8.0",
- "gulp-babel": "^5.3.0",
- "gulp-concat": "^2.6.0",
- "gulp-inject": "^3.0.0",
- "gulp-minify-html": "^1.0.4",
- "gulp-rename": "^1.2.2",
- "gulp-replace": "^0.5.4",
- "gulp-uglify": "^1.4.2",
- "http-proxy": "^1.12.0",
- "proxy-middleware": "^0.15.0",
- "run-sequence": "^1.1.4",
- "wiredep": "^3.0.0-beta",
- "wrench": "^1.5.8",
- "gulp-ng-annotate": "^1.1.0",
- "lodash": "^3.10.1",
- "eslint": "^1.8.0",
- "eslint-plugin-angular": "linkmesrl/eslint-plugin-angular",
- "gulp-eslint": "^1.0.0"
- }
-}
diff --git a/xos/core/xoslib/ngXosViews/sampleView/src/css/dev.css b/xos/core/xoslib/ngXosViews/sampleView/src/css/dev.css
deleted file mode 100644
index 74330ad..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/src/css/dev.css
+++ /dev/null
@@ -1,5 +0,0 @@
-#xosSampleView{
- position: absolute;
- top: 100px;
- left: 200px;
-}
\ No newline at end of file
diff --git a/xos/core/xoslib/ngXosViews/sampleView/src/index.html b/xos/core/xoslib/ngXosViews/sampleView/src/index.html
deleted file mode 100644
index b745fa6..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/src/index.html
+++ /dev/null
@@ -1,32 +0,0 @@
-<!-- browserSync -->
-<!-- bower:css -->
-<link rel="stylesheet" href="vendor/bootstrap-css/css/bootstrap.css" />
-<!-- endbower --><!-- endcss -->
-<!-- inject:css -->
-<link rel="stylesheet" href="/css/dev.css">
-<!-- endinject -->
-
-<div ng-app="xos.sampleView" id="xosSampleView">
- <div ui-view></div>
-</div>
-
-<!-- bower:js -->
-<script src="vendor/jquery/dist/jquery.js"></script>
-<script src="vendor/angular/angular.js"></script>
-<script src="vendor/angular-mocks/angular-mocks.js"></script>
-<script src="vendor/angular-ui-router/release/angular-ui-router.js"></script>
-<script src="vendor/angular-cookies/angular-cookies.js"></script>
-<script src="vendor/angular-resource/angular-resource.js"></script>
-<script src="vendor/ng-lodash/build/ng-lodash.js"></script>
-<script src="vendor/bootstrap-css/js/bootstrap.js"></script>
-<!-- endbower --><!-- endjs -->
-<!-- inject:js -->
-<script src="/xosHelpers/src/xosHelpers.module.js"></script>
-<script src="/xosHelpers/src/services/noHyperlinks.interceptor.js"></script>
-<script src="/xosHelpers/src/services/csrfToken.interceptor.js"></script>
-<script src="/xosHelpers/src/services/api.services.js"></script>
-<script src="/api/ng-xoslib.js"></script>
-<script src="/api/ng-xos.js"></script>
-<script src="/api/ng-hpcapi.js"></script>
-<script src="/.tmp/main.js"></script>
-<!-- endinject -->
diff --git a/xos/core/xoslib/ngXosViews/sampleView/src/js/main.js b/xos/core/xoslib/ngXosViews/sampleView/src/js/main.js
deleted file mode 100644
index 2c42227..0000000
--- a/xos/core/xoslib/ngXosViews/sampleView/src/js/main.js
+++ /dev/null
@@ -1,39 +0,0 @@
-'use strict';
-
-angular.module('xos.sampleView', [
- 'ngResource',
- 'ngCookies',
- 'ngLodash',
- 'ui.router',
- 'xos.helpers'
-])
-.config(($stateProvider) => {
-
- $stateProvider
- .state('user-list', {
- url: '/',
- template: '<users-list></users-list>'
- });
-})
-.config(function($httpProvider){
- $httpProvider.interceptors.push('NoHyperlinks');
-})
-.directive('usersList', function(){
- return {
- restrict: 'E',
- scope: {},
- bindToController: true,
- controllerAs: 'vm',
- templateUrl: 'templates/users-list.tpl.html',
- controller: function(XosApi){
- // retrieving user list
- XosApi.User_List_GET()
- .then((users) => {
- this.users = users;
- })
- .catch((e) => {
- throw new Error(e);
- });
- }
- };
-});
\ No newline at end of file
diff --git a/xos/core/xoslib/objects/cordsubscriber.py b/xos/core/xoslib/objects/cordsubscriber.py
index 318d54c..089c91b 100644
--- a/xos/core/xoslib/objects/cordsubscriber.py
+++ b/xos/core/xoslib/objects/cordsubscriber.py
@@ -113,7 +113,9 @@
# ("services", "vcpe.services"),
# ("cdn_enable", "vcpe.cdn_enable"),
- ("vlan_id", "volt.vlan_id"),
+ ("vlan_id", "volt.vlan_id"), # XXX remove this
+ ("c_tag", "volt.c_tag"),
+ ("s_tag", "volt.s_tag"),
("bbs_account", "volt.vcpe.bbs_account"),
("ssh_command", "volt.vcpe.ssh_command"),
diff --git a/xos/core/xoslib/package.json b/xos/core/xoslib/package.json
index c512432..dd4ea4c 100644
--- a/xos/core/xoslib/package.json
+++ b/xos/core/xoslib/package.json
@@ -26,5 +26,6 @@
"karma-phantomjs-launcher": "~0.2.1",
"phantomjs": "~1.9.18",
"wiredep": "^3.0.0-beta"
- }
+ },
+ "dependencies": {}
}
diff --git a/xos/core/xoslib/static/js/xosContentProvider.js b/xos/core/xoslib/static/js/xosContentProvider.js
index 219cbca..58ea2f6 100644
--- a/xos/core/xoslib/static/js/xosContentProvider.js
+++ b/xos/core/xoslib/static/js/xosContentProvider.js
@@ -1 +1 @@
-"use strict";angular.module("xos.contentProvider",["ngResource","ngCookies","ngLodash","xos.helpers","ui.router","xos.xos"]).config(["$stateProvider","$urlRouterProvider",function(n,e){n.state("list",{url:"/",template:"<content-provider-list></content-provider-list>"}).state("details",{url:"/contentProvider/:id",template:"<content-provider-detail></content-provider-detail>"}).state("cdn",{url:"/contentProvider/:id/cdn_prefix",template:"<content-provider-cdn></content-provider-cdn>"}).state("server",{url:"/contentProvider/:id/origin_server",template:"<content-provider-server></content-provider-server>"}).state("users",{url:"/contentProvider/:id/users",template:"<content-provider-users></content-provider-users>"})}]).config(["$httpProvider",function(n){n.interceptors.push("SetCSRFToken"),n.interceptors.push("NoHyperlinks")}]).service("ContentProvider",["$resource",function(n){return n("/hpcapi/contentproviders/:id/",{id:"@id"},{update:{method:"PUT"}})}]).service("ServiceProvider",["$resource",function(n){return n("/hpcapi/serviceproviders/:id/",{id:"@id"})}]).service("CdnPrefix",["$resource",function(n){return n("/hpcapi/cdnprefixs/:id/",{id:"@id"})}]).service("OriginServer",["$resource",function(n){return n("/hpcapi/originservers/:id/",{id:"@id"})}]).service("User",["$resource",function(n){return n("/xos/users/:id/",{id:"@id"})}]).directive("cpActions",["ContentProvider","$location",function(n,e){return{restrict:"E",scope:{id:"=id"},bindToController:!0,controllerAs:"vm",templateUrl:"templates/cp_actions.html",controller:function(){this.deleteCp=function(t){n["delete"]({id:t}).$promise.then(function(){e.url("/")})}}}}]).directive("contentProviderList",["ContentProvider","lodash",function(n,e){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_list.html",controller:function(){var t=this;n.query().$promise.then(function(n){t.contentProviderList=n})["catch"](function(n){throw new Error(n)}),this.deleteCp=function(s){n["delete"]({id:s}).$promise.then(function(){e.remove(t.contentProviderList,{id:s})})}}}}]).directive("contentProviderDetail",["ContentProvider","ServiceProvider","$stateParams","$location",function(n,e,t,s){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_detail.html",controller:function(){this.pageName="detail";var i=this;t.id?n.get({id:t.id}).$promise.then(function(n){i.cp=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}):i.cp=new n,e.query().$promise.then(function(n){i.sp=n}),this.saveContentProvider=function(n){var e,t=!1;n.id?e=n.$update():(t=!0,n.name=n.humanReadableName,e=n.$save()),e.then(function(n){i.result={status:1,msg:"Content Provider Saved"},t&&s.url("contentProvider/"+n.id+"/")})["catch"](function(n){i.result={status:0,msg:n.data.detail}})}}}}]).directive("contentProviderCdn",["$stateParams","CdnPrefix","ContentProvider","lodash",function(n,e,t,s){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_cdn_prefix.html",controller:function(){var i=this;this.pageName="cdn",n.id&&t.get({id:n.id}).$promise.then(function(n){i.cp=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),e.query().$promise.then(function(e){i.prf=e,i.cp_prf=s.where(e,{contentProvider:parseInt(n.id)})})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),this.addPrefix=function(t){t.contentProvider=n.id;var s=new e(t);s.$save().then(function(n){i.cp_prf.push(n)})["catch"](function(n){i.result={status:0,msg:n.data.detail}})},this.removePrefix=function(n){n.$delete().then(function(){s.remove(i.cp_prf,n)})["catch"](function(n){i.result={status:0,msg:n.data.detail}})}}}}]).directive("contentProviderServer",["$stateParams","OriginServer","ContentProvider","lodash",function(n,e,t,s){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_origin_server.html",controller:function(){this.pageName="server",this.protocols={http:"HTTP",rtmp:"RTMP",rtp:"RTP",shout:"SHOUTcast"};var i=this;n.id&&t.get({id:n.id}).$promise.then(function(n){i.cp=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),e.query({contentProvider:n.id}).$promise.then(function(n){i.cp_os=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),this.addOrigin=function(t){t.contentProvider=n.id;var s=new e(t);s.$save().then(function(n){i.cp_os.push(n)})["catch"](function(n){i.result={status:0,msg:n.data.detail}})},this.removeOrigin=function(n){n.$delete().then(function(){s.remove(i.cp_os,n)})["catch"](function(n){i.result={status:0,msg:n.data.detail}})}}}}]).directive("contentProviderUsers",["$stateParams","ContentProvider","User","lodash",function(n,e,t,s){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_user.html",controller:function(){var i=this;this.pageName="user",this.cp_users=[],n.id&&t.query().$promise.then(function(t){return i.users=t,e.get({id:n.id}).$promise}).then(function(n){return n.users=i.populateUser(n.users,i.users),n}).then(function(n){i.cp=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),this.populateUser=function(n,e){for(var t=0;t<n.length;t++)n[t]=s.find(e,{id:n[t]});return n},this.addUserToCp=function(n){i.cp.users.push(n)},this.removeUserFromCp=function(n){s.remove(i.cp.users,n)},this.saveContentProvider=function(n){n.users=s.pluck(n.users,"id"),n.$update().then(function(n){i.cp.users=i.populateUser(n.users,i.users),i.result={status:1,msg:"Content Provider Saved"}})["catch"](function(n){i.result={status:0,msg:n.data.detail}})}}}}]),angular.module("xos.contentProvider").run(["$templateCache",function(n){n.put("templates/cp_actions.html",'<a href="#/" class="btn btn-default">\n <i class="icon icon-arrow-left"></i>Back\n</a>\n<a href="#/contentProvider/" class="btn btn-success">\n <i class="icon icon-plus"></i>Create\n</a>\n<a ng-click="vm.deleteCp(vm.id)" class="btn btn-danger">\n <i class="icon icon-remove"></i>Remove\n</a>'),n.put("templates/cp_cdn_prefix.html",'<div class="row-fluid">\n <div class="span6">\n <h1>{$ vm.cp.humanReadableName $}</h1>\n </div>\n <div class="span6 text-right">\n <cp-actions id="vm.cp.id"></cp-actions>\n </div>\n</div>\n<hr>\n<div class="row-fluid">\n <div class="span2">\n <div ng-include="\'templates/cp_side_nav.html\'"></div>\n </div>\n <div class="span10">\n <div ng-repeat="item in vm.cp_prf" class="well">\n <div class="row-fluid">\n <div class="span4">\n {{item.humanReadableName}}\n </div>\n <div class="span6">\n <!-- TODO show the name instead that id -->\n {{item.defaultOriginServer}}\n </div>\n <div class="span2">\n <a ng-click="vm.removePrefix(item)" class="btn btn-danger pull-right">\n <i class="icon icon-remove"></i>\n </a>\n </div>\n </div>\n </div>\n <hr>\n <form ng-submit="vm.addPrefix(vm.new_prf)">\n <div class="row-fluid">\n <div class="span4">\n <label>Prefix</label>\n <input type="text" ng-model="vm.new_prf.prefix" required style="max-width: 90%">\n </div>\n <div class="span6">\n <label>Default Origin Server</label>\n <select ng-model="vm.new_prf.defaultOriginServer" style="max-width: 100%">\n <option ng-repeat="prf in vm.prf" ng-value="prf.id">{$ prf.humanReadableName $}</option>\n </select>\n </div>\n <div class="span2 text-right">\n <button class="btn btn-success margin-wells">\n <i class="icon icon-plus"></i>\n </button>\n </div>\n </div>\n </form>\n <div class="alert" ng-show="vm.result" ng-class="{\'alert-success\': vm.result.status === 1,\'alert-error\': vm.result.status === 0}">\n {$ vm.result.msg $}\n </div>\n </div>\n</div>'),n.put("templates/cp_detail.html",'<div class="row-fluid">\n <div class="span6">\n <h1>{$ vm.cp.humanReadableName $}</h1>\n </div>\n <div class="span6 text-right">\n <cp-actions id="vm.cp.id"></cp-actions>\n </div>\n</div>\n<hr>\n<div class="row-fluid">\n <div ng-show="vm.cp.id" class="span2">\n <div ng-include="\'templates/cp_side_nav.html\'"></div>\n </div>\n <div ng-class="{span10: vm.cp.id, span12: !vm.cp.id}">\n <!-- TODO hide form on not found -->\n <form ng-submit="vm.saveContentProvider(vm.cp)">\n <fieldset>\n <div class="row-fluid">\n <div class="span6">\n <label>Name:</label>\n <input type="text" ng-model="vm.cp.humanReadableName" required/>\n </div>\n <div class="span6">\n <label class="checkbox">\n <input type="checkbox" ng-model="vm.cp.enabled" /> Enabled\n </label>\n </div>\n </div>\n <div class="row-fluid">\n <div class="span12">\n <label>Description</label>\n <textarea style="width: 100%" ng-model="vm.cp.description"></textarea>\n </div>\n </div>\n <div class="row-fluid">\n <div class="span12">\n <label>Service provider</label>\n <select required ng-model="vm.cp.serviceProvider" ng-options="sp.id as sp.humanReadableName for sp in vm.sp"></select>\n </div>\n </div>\n <div class="row-fluid">\n <div class="span12">\n <button class="btn btn-success">\n <span ng-show="vm.cp.id">Save</span>\n <span ng-show="!vm.cp.id">Create</span>\n </button>\n </div>\n </div>\n </fieldset>\n </form>\n <div class="alert" ng-show="vm.result" ng-class="{\'alert-success\': vm.result.status === 1,\'alert-error\': vm.result.status === 0}">\n {$ vm.result.msg $}\n </div>\n </div>\n</div>'),n.put("templates/cp_list.html",'<table class="table table-striped" ng-show="vm.contentProviderList.length > 0">\n <thead>\n <tr>\n <th>\n Name\n </th>\n <th>Description</th>\n <th>Status</th>\n <th></th>\n </tr>\n </thead>\n <tr ng-repeat="item in vm.contentProviderList">\n <td>\n <a ui-sref="details({ id: item.id })">{$ item.humanReadableName $}</a>\n </td>\n <td>\n {$ item.description $}\n </td>\n <td>\n {$ item.enabled $}\n </td>\n <td class="text-right">\n <a ng-click="vm.deleteCp(item.id)" class="btn btn-danger"><i class="icon icon-remove"></i></a></td>\n </tr>\n</table>\n<div class="alert alert-error" ng-show="vm.contentProviderList.length == 0">\n No Content Provider defined\n</div>\n\n<div class="row">\n <div class="span12 text-right">\n <a class="btn btn-success"href="#/contentProvider/">Create</a>\n </div>\n</div>'),n.put("templates/cp_origin_server.html",'<div class="row-fluid">\n <div class="span6">\n <h1>{$ vm.cp.humanReadableName $}</h1>\n </div>\n <div class="span6 text-right">\n <cp-actions id="vm.cp.id"></cp-actions>\n </div>\n</div>\n<hr>\n<div class="row-fluid">\n <div class="span2">\n <div ng-include="\'templates/cp_side_nav.html\'"></div>\n </div>\n <div class="span10">\n <div ng-repeat="item in vm.cp_os" class="well">\n <div class="row-fluid">\n <div class="span4">\n {{item.humanReadableName}}\n </div>\n <div class="span6">\n <!-- TODO shoe the name instead that url -->\n {{item.defaultOriginServer}}\n </div>\n <div class="span2">\n <a ng-click="vm.removeOrigin(item)" class="btn btn-danger pull-right">\n <i class="icon icon-remove"></i>\n </a>\n </div>\n </div>\n </div>\n <hr>\n <form ng-submit="vm.addOrigin(vm.new_os)">\n <div class="row-fluid">\n <div class="span4">\n <label>Protocol</label>\n <select ng-model="vm.new_os.protocol" ng-options="k as v for (k,v) in vm.protocols" style="max-width: 100%;"></select>\n </div>\n <div class="span6">\n <label>Url</label>\n <input type="text" ng-model="vm.new_os.url" required>\n </div>\n <div class="span2 text-right">\n <button class="btn btn-success margin-wells">\n <i class="icon icon-plus"></i>\n </button>\n </div>\n </div>\n </form>\n <div class="alert" ng-show="vm.result" ng-class="{\'alert-success\': vm.result.status === 1,\'alert-error\': vm.result.status === 0}">\n {$ vm.result.msg $}\n </div>\n </div>\n</div>'),n.put("templates/cp_side_nav.html",'<ul class="nav nav-list">\n <li>\n <a class="btn" ng-class="{\'btn-primary\': vm.pageName == \'detail\'}" href="#/contentProvider/{$ vm.cp.id $}">Details</a>\n </li>\n <li>\n <a class="btn" ng-class="{\'btn-primary\': vm.pageName == \'cdn\'}" href="#/contentProvider/{$ vm.cp.id $}/cdn_prefix">Cdn Prexix</a>\n </li>\n <li>\n <a class="btn" ng-class="{\'btn-primary\': vm.pageName == \'server\'}" href="#/contentProvider/{$ vm.cp.id $}/origin_server">Origin Server</a>\n </li>\n <li>\n <a class="btn" ng-class="{\'btn-primary\': vm.pageName == \'user\'}" href="#/contentProvider/{$ vm.cp.id $}/users">Users</a>\n </li>\n</ul>'),n.put("templates/cp_user.html",'<div class="row-fluid">\n <div class="span6">\n <h1>{$ vm.cp.humanReadableName $}</h1>\n </div>\n <div class="span6 text-right">\n <cp-actions id="vm.cp.id"></cp-actions>\n </div>\n</div>\n<hr>\n<div class="row-fluid">\n <div class="span2">\n <div ng-include="\'templates/cp_side_nav.html\'"></div>\n </div>\n <div class="span10">\n <div ng-repeat="item in vm.cp.users" class="well">\n <div class="row-fluid">\n <div class="span3">\n {{item.firstname}}\n </div>\n <div class="span3">\n {{item.lastname}}\n </div>\n <div class="span4">\n {{item.email}}\n </div>\n <div class="span2">\n <a ng-click="vm.removeUserFromCp(item)" class="btn btn-danger pull-right">\n <i class="icon icon-remove"></i>\n </a>\n </div>\n </div>\n </div>\n <hr>\n <form ng-submit="vm.saveContentProvider(vm.cp)">\n <div class="row-fluid">\n <div class="span8">\n <label>Select user:</label>\n <select ng-model="vm.user" ng-options="u as u.username for u in vm.users" ng-change="vm.addUserToCp(vm.user)"></select>\n </div> \n <div class="span4 text-right">\n <button class="btn btn-success margin-wells">\n Save\n </button>\n </div>\n </div>\n </form>\n <div class="alert" ng-show="vm.result" ng-class="{\'alert-success\': vm.result.status === 1,\'alert-error\': vm.result.status === 0}">\n {$ vm.result.msg $}\n </div>\n </div>\n</div>')}]),angular.bootstrap(angular.element("#xosContentProvider"),["xos.contentProvider"]);
\ No newline at end of file
+"use strict";angular.module("xos.contentProvider",["ngResource","ngCookies","ngLodash","xos.helpers","ui.router","xos.xos"]).config(["$stateProvider","$urlRouterProvider",function(n,e){n.state("list",{url:"/",template:"<content-provider-list></content-provider-list>"}).state("details",{url:"/contentProvider/:id",template:"<content-provider-detail></content-provider-detail>"}).state("cdn",{url:"/contentProvider/:id/cdn_prefix",template:"<content-provider-cdn></content-provider-cdn>"}).state("server",{url:"/contentProvider/:id/origin_server",template:"<content-provider-server></content-provider-server>"}).state("users",{url:"/contentProvider/:id/users",template:"<content-provider-users></content-provider-users>"})}]).config(["$httpProvider",function(n){n.interceptors.push("SetCSRFToken"),n.interceptors.push("NoHyperlinks")}]).service("ContentProvider",["$resource",function(n){return n("/hpcapi/contentproviders/:id/",{id:"@id"},{update:{method:"PUT"}})}]).service("ServiceProvider",["$resource",function(n){return n("/hpcapi/serviceproviders/:id/",{id:"@id"})}]).service("CdnPrefix",["$resource",function(n){return n("/hpcapi/cdnprefixs/:id/",{id:"@id"})}]).service("OriginServer",["$resource",function(n){return n("/hpcapi/originservers/:id/",{id:"@id"})}]).service("User",["$resource",function(n){return n("/xos/users/:id/",{id:"@id"})}]).directive("cpActions",["ContentProvider","$location",function(n,e){return{restrict:"E",scope:{id:"=id"},bindToController:!0,controllerAs:"vm",templateUrl:"templates/cp_actions.html",controller:function(){this.deleteCp=function(t){n["delete"]({id:t}).$promise.then(function(){e.url("/")})}}}}]).directive("contentProviderList",["ContentProvider","lodash",function(n,e){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_list.html",controller:function(){var t=this;n.query().$promise.then(function(n){t.contentProviderList=n})["catch"](function(n){throw new Error(n)}),this.deleteCp=function(s){n["delete"]({id:s}).$promise.then(function(){e.remove(t.contentProviderList,{id:s})})}}}}]).directive("contentProviderDetail",["ContentProvider","ServiceProvider","$stateParams","$location",function(n,e,t,s){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_detail.html",controller:function(){this.pageName="detail";var i=this;t.id?n.get({id:t.id}).$promise.then(function(n){i.cp=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}):i.cp=new n,e.query().$promise.then(function(n){i.sp=n}),this.saveContentProvider=function(n){var e,t=!1;n.id?e=n.$update():(t=!0,n.name=n.humanReadableName,e=n.$save()),e.then(function(n){i.result={status:1,msg:"Content Provider Saved"},t&&s.url("contentProvider/"+n.id+"/")})["catch"](function(n){i.result={status:0,msg:n.data.detail}})}}}}]).directive("contentProviderCdn",["$stateParams","CdnPrefix","ContentProvider","lodash",function(n,e,t,s){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_cdn_prefix.html",controller:function(){var i=this;this.pageName="cdn",n.id&&t.get({id:n.id}).$promise.then(function(n){i.cp=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),e.query().$promise.then(function(e){i.prf=e,i.cp_prf=s.where(e,{contentProvider:parseInt(n.id)})})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),this.addPrefix=function(t){t.contentProvider=n.id;var s=new e(t);s.$save().then(function(n){i.cp_prf.push(n)})["catch"](function(n){i.result={status:0,msg:n.data.detail}})},this.removePrefix=function(n){n.$delete().then(function(){s.remove(i.cp_prf,n)})["catch"](function(n){i.result={status:0,msg:n.data.detail}})}}}}]).directive("contentProviderServer",["$stateParams","OriginServer","ContentProvider","lodash",function(n,e,t,s){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_origin_server.html",controller:function(){this.pageName="server",this.protocols={http:"HTTP",rtmp:"RTMP",rtp:"RTP",shout:"SHOUTcast"};var i=this;n.id&&t.get({id:n.id}).$promise.then(function(n){i.cp=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),e.query({contentProvider:n.id}).$promise.then(function(n){i.cp_os=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),this.addOrigin=function(t){t.contentProvider=n.id;var s=new e(t);s.$save().then(function(n){i.cp_os.push(n)})["catch"](function(n){i.result={status:0,msg:n.data.detail}})},this.removeOrigin=function(n){n.$delete().then(function(){s.remove(i.cp_os,n)})["catch"](function(n){i.result={status:0,msg:n.data.detail}})}}}}]).directive("contentProviderUsers",["$stateParams","ContentProvider","User","lodash",function(n,e,t,s){return{restrict:"E",controllerAs:"vm",scope:{},templateUrl:"templates/cp_user.html",controller:function(){var i=this;this.pageName="user",this.cp_users=[],n.id&&t.query().$promise.then(function(t){return i.users=t,e.get({id:n.id}).$promise}).then(function(n){return n.users=i.populateUser(n.users,i.users),n}).then(function(n){i.cp=n})["catch"](function(n){i.result={status:0,msg:n.data.detail}}),this.populateUser=function(n,e){for(var t=0;t<n.length;t++)n[t]=s.find(e,{id:n[t]});return n},this.addUserToCp=function(n){i.cp.users.push(n)},this.removeUserFromCp=function(n){s.remove(i.cp.users,n)},this.saveContentProvider=function(n){n.users=s.pluck(n.users,"id"),n.$update().then(function(n){i.cp.users=i.populateUser(n.users,i.users),i.result={status:1,msg:"Content Provider Saved"}})["catch"](function(n){i.result={status:0,msg:n.data.detail}})}}}}]),angular.module("xos.contentProvider").run(["$templateCache",function(n){n.put("templates/cp_actions.html",'<a href="#/" class="btn btn-default">\n <i class="icon icon-arrow-left"></i>Back\n</a>\n<a href="#/contentProvider/" class="btn btn-success">\n <i class="icon icon-plus"></i>Create\n</a>\n<a ng-click="vm.deleteCp(vm.id)" class="btn btn-danger">\n <i class="icon icon-remove"></i>Remove\n</a>'),n.put("templates/cp_cdn_prefix.html",'<div class="row-fluid">\n <div class="span6">\n <h1>{$ vm.cp.humanReadableName $}</h1>\n </div>\n <div class="span6 text-right">\n <cp-actions id="vm.cp.id"></cp-actions>\n </div>\n</div>\n<hr>\n<div class="row-fluid">\n <div class="span2">\n <div ng-include="\'templates/cp_side_nav.html\'"></div>\n </div>\n <div class="span10">\n <div ng-repeat="item in vm.cp_prf" class="well">\n <div class="row-fluid">\n <div class="span4">\n {{item.humanReadableName}}\n </div>\n <div class="span6">\n <!-- TODO show the name instead that id -->\n {{item.defaultOriginServer}}\n </div>\n <div class="span2">\n <a ng-click="vm.removePrefix(item)" class="btn btn-danger pull-right">\n <i class="icon icon-remove"></i>\n </a>\n </div>\n </div>\n </div>\n <hr>\n <form ng-submit="vm.addPrefix(vm.new_prf)">\n <div class="row-fluid">\n <div class="span4">\n <label>Prefix</label>\n <input type="text" ng-model="vm.new_prf.prefix" required style="max-width: 90%">\n </div>\n <div class="span6">\n <label>Default Origin Server</label>\n <select ng-model="vm.new_prf.defaultOriginServer" style="max-width: 100%">\n <option ng-repeat="prf in vm.prf" ng-value="prf.id">{$ prf.humanReadableName $}</option>\n </select>\n </div>\n <div class="span2 text-right">\n <button class="btn btn-success margin-wells">\n <i class="icon icon-plus"></i>\n </button>\n </div>\n </div>\n </form>\n <div class="alert" ng-show="vm.result" ng-class="{\'alert-success\': vm.result.status === 1,\'alert-error\': vm.result.status === 0}">\n {$ vm.result.msg $}\n </div>\n </div>\n</div>'),n.put("templates/cp_detail.html",'<div class="row-fluid">\n <div class="span6">\n <h1>{$ vm.cp.humanReadableName $}</h1>\n </div>\n <div class="span6 text-right">\n <cp-actions id="vm.cp.id"></cp-actions>\n </div>\n</div>\n<hr>\n<div class="row-fluid">\n <div ng-show="vm.cp.id" class="span2">\n <div ng-include="\'templates/cp_side_nav.html\'"></div>\n </div>\n <div ng-class="{span10: vm.cp.id, span12: !vm.cp.id}">\n <!-- TODO hide form on not found -->\n <form ng-submit="vm.saveContentProvider(vm.cp)">\n <fieldset>\n <div class="row-fluid">\n <div class="span6">\n <label>Name:</label>\n <input type="text" ng-model="vm.cp.humanReadableName" required/>\n </div>\n <div class="span6">\n <label class="checkbox">\n <input type="checkbox" ng-model="vm.cp.enabled" /> Enabled\n </label>\n </div>\n </div>\n <div class="row-fluid">\n <div class="span12">\n <label>Description</label>\n <textarea style="width: 100%" ng-model="vm.cp.description"></textarea>\n </div>\n </div>\n <div class="row-fluid">\n <div class="span12">\n <label>Service provider</label>\n <select required ng-model="vm.cp.serviceProvider" ng-options="sp.id as sp.humanReadableName for sp in vm.sp"></select>\n </div>\n </div>\n <div class="row-fluid">\n <div class="span12">\n <button class="btn btn-success">\n <span ng-show="vm.cp.id">Save</span>\n <span ng-show="!vm.cp.id">Create</span>\n </button>\n </div>\n </div>\n </fieldset>\n </form>\n <div class="alert" ng-show="vm.result" ng-class="{\'alert-success\': vm.result.status === 1,\'alert-error\': vm.result.status === 0}">\n {$ vm.result.msg $}\n </div>\n </div>\n</div>'),n.put("templates/cp_list.html",'<table class="table table-striped" ng-show="vm.contentProviderList.length > 0">\n <thead>\n <tr>\n <th>\n Name\n </th>\n <th>Description</th>\n <th>Status</th>\n <th></th>\n </tr>\n </thead>\n <tr ng-repeat="item in vm.contentProviderList">\n <td>\n <a ui-sref="details({ id: item.id })">{$ item.humanReadableName $}</a>\n </td>\n <td>\n {$ item.description $}\n </td>\n <td>\n {$ item.enabled $}\n </td>\n <td class="text-right">\n <a ng-click="vm.deleteCp(item.id)" class="btn btn-danger"><i class="icon icon-remove"></i></a></td>\n </tr>\n</table>\n<div class="alert alert-error" ng-show="vm.contentProviderList.length == 0">\n No Content Provider defined\n</div>\n\n<div class="row">\n <div class="span12 text-right">\n <a class="btn btn-success"href="#/contentProvider/">Create</a>\n </div>\n</div>'),n.put("templates/cp_origin_server.html",'<div class="row-fluid">\n <div class="span6">\n <h1>{$ vm.cp.humanReadableName $}</h1>\n </div>\n <div class="span6 text-right">\n <cp-actions id="vm.cp.id"></cp-actions>\n </div>\n</div>\n<hr>\n<div class="row-fluid">\n <div class="span2">\n <div ng-include="\'templates/cp_side_nav.html\'"></div>\n </div>\n <div class="span10">\n <div ng-repeat="item in vm.cp_os" class="well">\n <div class="row-fluid">\n <div class="span4">\n {{item.humanReadableName}}\n </div>\n <div class="span6">\n <!-- TODO shoe the name instead that url -->\n {{item.defaultOriginServer}}\n </div>\n <div class="span2">\n <a ng-click="vm.removeOrigin(item)" class="btn btn-danger pull-right">\n <i class="icon icon-remove"></i>\n </a>\n </div>\n </div>\n </div>\n <hr>\n <form ng-submit="vm.addOrigin(vm.new_os)">\n <div class="row-fluid">\n <div class="span4">\n <label>Protocol</label>\n <select ng-model="vm.new_os.protocol" ng-options="k as v for (k,v) in vm.protocols" style="max-width: 100%;"></select>\n </div>\n <div class="span6">\n <label>Url</label>\n <input type="text" ng-model="vm.new_os.url" required>\n </div>\n <div class="span2 text-right">\n <button class="btn btn-success margin-wells">\n <i class="icon icon-plus"></i>\n </button>\n </div>\n </div>\n </form>\n <div class="alert" ng-show="vm.result" ng-class="{\'alert-success\': vm.result.status === 1,\'alert-error\': vm.result.status === 0}">\n {$ vm.result.msg $}\n </div>\n </div>\n</div>'),n.put("templates/cp_side_nav.html",'<ul class="nav nav-list">\n <li>\n <a class="btn" ng-class="{\'btn-primary\': vm.pageName == \'detail\'}" href="#/contentProvider/{$ vm.cp.id $}">Details</a>\n </li>\n <li>\n <a class="btn" ng-class="{\'btn-primary\': vm.pageName == \'cdn\'}" href="#/contentProvider/{$ vm.cp.id $}/cdn_prefix">Cdn Prexix</a>\n </li>\n <li>\n <a class="btn" ng-class="{\'btn-primary\': vm.pageName == \'server\'}" href="#/contentProvider/{$ vm.cp.id $}/origin_server">Origin Server</a>\n </li>\n <li>\n <a class="btn" ng-class="{\'btn-primary\': vm.pageName == \'user\'}" href="#/contentProvider/{$ vm.cp.id $}/users">Users</a>\n </li>\n</ul>'),n.put("templates/cp_user.html",'<div class="row-fluid">\n <div class="span6">\n <h1>{$ vm.cp.humanReadableName $}</h1>\n </div>\n <div class="span6 text-right">\n <cp-actions id="vm.cp.id"></cp-actions>\n </div>\n</div>\n<hr>\n<div class="row-fluid">\n <div class="span2">\n <div ng-include="\'templates/cp_side_nav.html\'"></div>\n </div>\n <div class="span10">\n <div ng-repeat="item in vm.cp.users" class="well">\n <div class="row-fluid">\n <div class="span3">\n {{item.firstname}}\n </div>\n <div class="span3">\n {{item.lastname}}\n </div>\n <div class="span4">\n {{item.email}}\n </div>\n <div class="span2">\n <a ng-click="vm.removeUserFromCp(item)" class="btn btn-danger pull-right">\n <i class="icon icon-remove"></i>\n </a>\n </div>\n </div>\n </div>\n <hr>\n <form ng-submit="vm.saveContentProvider(vm.cp)">\n <div class="row-fluid">\n <div class="span8">\n <label>Select user:</label>\n <select ng-model="vm.user" ng-options="u as u.username for u in vm.users" ng-change="vm.addUserToCp(vm.user)"></select>\n </div> \n <div class="span4 text-right">\n <button class="btn btn-success margin-wells">\n Save\n </button>\n </div>\n </div>\n </form>\n <div class="alert" ng-show="vm.result" ng-class="{\'alert-success\': vm.result.status === 1,\'alert-error\': vm.result.status === 0}">\n {$ vm.result.msg $}\n </div>\n </div>\n</div>'),n.put("templates/users-list.tpl.html",'<div class="row">\n <h1>Users List</h1>\n <p>This is only an example view.</p>\n</div>\n<div class="row">\n <div class="span4">Email</div>\n <div class="span4">First Name</div>\n <div class="span4">Last Name</div>\n</div> \n<div class="row" ng-repeat="user in vm.users">\n <div class="span4">{{user.email}}</div>\n <div class="span4">{{user.firstname}}</div>\n <div class="span4">{{user.lastname}}</div>\n</div> ')}]),angular.module("xos.contentProvider").run(["$location",function(n){n.path("/")}]),angular.bootstrap(angular.element("#xosContentProvider"),["xos.contentProvider"]);
\ No newline at end of file
diff --git a/xos/core/xoslib/templates/xosCordSubscriber.html b/xos/core/xoslib/templates/xosCordSubscriber.html
index b7e2163..db42fb8 100644
--- a/xos/core/xoslib/templates/xosCordSubscriber.html
+++ b/xos/core/xoslib/templates/xosCordSubscriber.html
@@ -7,7 +7,8 @@
<table class="xos-detail-table cord-subscriber-table">
<tr><td class="xos-label-cell">Id:</td><td><%= model.attributes.id %></td></tr>
<tr><td class="xos-label-cell">Service Specific Id:</td><td><%= model.attributes.service_specific_id %></td></tr>
- <tr><td class="xos-label-cell">VLAN Id:</td><td><%= model.attributes.vlan_id %></td></tr>
+ <tr><td class="xos-label-cell">S-Tag:</td><td><%= model.attributes.s_tag %></td></tr>
+ <tr><td class="xos-label-cell">C-Tag:</td><td><%= model.attributes.c_tag %></td></tr>
</table>
</div>
diff --git a/xos/helloworld/models.py b/xos/helloworld/models.py
index a657f3a..9bb343e 100644
--- a/xos/helloworld/models.py
+++ b/xos/helloworld/models.py
@@ -11,7 +11,7 @@
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
- instance_backref = models.ForeignKey(Instance)
+ instance_backref = models.ForeignKey(Instance,related_name="hellos")
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
diff --git a/xos/helloworld/view.py b/xos/helloworld/view.py
index b3eec29..7024747 100644
--- a/xos/helloworld/view.py
+++ b/xos/helloworld/view.py
@@ -34,9 +34,9 @@
i.instance_name=None
i.enacted=None
i.save()
- h = Hello(name=hello_name,sliver_backref=i)
- w = World(hello=h,name=world_name)
+ h = Hello(name=hello_name,instance_backref=i)
h.save()
+ w = World(hello=h,name=world_name)
w.save()
t = template.Template(head_template + 'Done. New instance id: %r'%i.pk + self.tail_template)
diff --git a/xos/model-deps b/xos/model-deps
index ea32eb9..59bbe25 100644
--- a/xos/model-deps
+++ b/xos/model-deps
@@ -1,8 +1,6 @@
{
"Slice": [
"Site",
- "Service",
- "ServiceClass",
"User"
],
"ImageDeployments": [
@@ -65,9 +63,6 @@
"Reservation": [
"Slice"
],
- "ServiceResource": [
- "ServiceClass"
- ],
"Instance": [
"Image",
"User",
@@ -79,9 +74,6 @@
"Account": [
"Site"
],
- "ServiceAttribute": [
- "Service"
- ],
"ControllerSlicePrivilege": [
"Controller"
],
@@ -116,9 +108,6 @@
"Controller",
"DashboardView"
],
- "Tag": [
- "Service"
- ],
"Invoice": [
"Account"
],
diff --git a/xos/model_policies/model_policy_Image.py b/xos/model_policies/model_policy_Image.py
index 72f76fa..c77d5bb 100644
--- a/xos/model_policies/model_policy_Image.py
+++ b/xos/model_policies/model_policy_Image.py
@@ -2,6 +2,10 @@
from core.models import Controller, ControllerImages, Image
from collections import defaultdict
+ if (image.kind == "container"):
+ # container images do not get instantiated
+ return
+
controller_images = ControllerImages.objects.filter(image=image)
existing_controllers = [cs.controller for cs in controller_images]
diff --git a/xos/model_policies/model_policy_Instance.py b/xos/model_policies/model_policy_Instance.py
index a13428d..ffc9847 100644
--- a/xos/model_policies/model_policy_Instance.py
+++ b/xos/model_policies/model_policy_Instance.py
@@ -1,3 +1,44 @@
+def handle_container_on_metal(instance):
+ from core.models import Instance, Flavor, Port, Image
+
+ print "MODEL POLICY: instance", instance, "handle container_on_metal"
+
+ if instance.deleted:
+ return
+
+ if (instance.isolation in ["container"]):
+ # Our current docker-on-metal network strategy requires that there be some
+ # VM on the server that connects to the networks, so that
+ # the containers can piggyback off of that configuration.
+ if not Instance.objects.filter(slice=instance.slice, node=instance.node, isolation="vm").exists():
+ flavors = Flavor.objects.filter(name="m1.small")
+ if not flavors:
+ raise XOSConfigurationError("No m1.small flavor")
+
+ images = Image.objects.filter(kind="vm")
+
+ companion_instance = Instance(slice = instance.slice,
+ node = instance.node,
+ image = images[0],
+ creator = instance.creator,
+ deployment = instance.node.site_deployment.deployment,
+ flavor = flavors[0])
+ companion_instance.save()
+
+ print "MODEL POLICY: instance", instance, "created companion", companion_instance
+
+ # Add the ports for the container
+ for network in instance.slice.networks.all():
+ # hmmm... The NAT ports never become ready, because sync_ports never
+ # instantiates them. Need to think about this.
+ print "MODEL POLICY: instance", instance, "handling network", network
+ if (network.name.endswith("-nat")):
+ continue
+
+ if not Port.objects.filter(network=network, instance=instance).exists():
+ port = Port(network = network, instance=instance)
+ port.save()
+ print "MODEL POLICY: instance", instance, "created port", port
def handle(instance):
from core.models import Controller, ControllerSlice, ControllerNetwork, NetworkSlice
@@ -7,7 +48,11 @@
controller=instance.node.site_deployment.controller)
for cn in controller_networks:
- if (cn.lazy_blocked):
+ if (cn.lazy_blocked):
+ print "MODEL POLICY: instance", instance, "unblocking network", cn.network
cn.lazy_blocked=False
cn.backend_register = '{}'
cn.save()
+
+ if (instance.isolation in ["container", "container_vm"]):
+ handle_container_on_metal(instance)
diff --git a/xos/model_policy.py b/xos/model_policy.py
index ced785e..9462b35 100644
--- a/xos/model_policy.py
+++ b/xos/model_policy.py
@@ -105,7 +105,7 @@
def run_policy_once():
from core.models import Instance,Slice,Controller,Network,User,SlicePrivilege,Site,SitePrivilege,Image,ControllerSlice,ControllerUser,ControllerSite
- models = [Instance,Slice, Controller, Network, User, SlicePrivilege, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser]
+ models = [Controller, Site, SitePrivilege, Image, ControllerSlice, ControllerSite, ControllerUser, User, Slice, Network, Instance, SlicePrivilege]
objects = []
deleted_objects = []
diff --git a/xos/observers/base/SyncInstanceUsingAnsible.py b/xos/observers/base/SyncInstanceUsingAnsible.py
index 901bc97..5bb8250 100644
--- a/xos/observers/base/SyncInstanceUsingAnsible.py
+++ b/xos/observers/base/SyncInstanceUsingAnsible.py
@@ -44,9 +44,11 @@
return o.instance
- def run_playbook(self, o, fields):
+ def run_playbook(self, o, fields, template_name=None):
+ if not template_name:
+ template_name = self.template_name
tStart = time.time()
- run_template_ssh(self.template_name, fields)
+ run_template_ssh(template_name, fields)
logger.info("playbook execution time %d" % int(time.time()-tStart))
def pre_sync_hook(self, o, fields):
@@ -61,14 +63,83 @@
def prepare_record(self, o):
pass
+ def get_node(self,o):
+ return o.node
+
+ def get_node_key(self, node):
+ return "/root/setup/node_key"
+
+ def get_ansible_fields(self, instance):
+ # return all of the fields that tell Ansible how to talk to the context
+ # that's setting up the container.
+
+ if (instance.isolation == "vm"):
+ # legacy where container was configured by sync_vcpetenant.py
+
+ fields = { "instance_name": instance.name,
+ "hostname": instance.node.name,
+ "instance_id": instance.instance_id,
+ "username": "ubuntu",
+ }
+ key_name = self.service_key_name
+ elif (instance.isolation == "container"):
+ # container on bare metal
+ node = self.get_node(instance)
+ hostname = node.name
+ fields = { "hostname": hostname,
+ "baremetal_ssh": True,
+ "instance_name": "rootcontext",
+ "username": "root",
+ "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
+ }
+ key_name = self.get_node_key(node)
+ else:
+ # container in a VM
+ if not instance.parent:
+ raise Exception("Container-in-VM has no parent")
+ if not instance.parent.instance_id:
+ raise Exception("Container-in-VM parent is not yet instantiated")
+ if not instance.parent.slice.service:
+ raise Exception("Container-in-VM parent has no service")
+ if not instance.parent.slice.service.private_key_fn:
+ raise Exception("Container-in-VM parent service has no private_key_fn")
+ fields = { "hostname": instance.parent.node.name,
+ "instance_name": instance.parent.name,
+ "instance_id": instance.parent.instance_id,
+ "username": "ubuntu",
+ "nat_ip": instance.parent.get_ssh_ip(),
+ "container_name": "%s-%s" % (instance.slice.name, str(instance.id))
+ }
+ key_name = instance.parent.slice.service.private_key_fn
+
+ if not os.path.exists(key_name):
+ raise Exception("Node key %s does not exist" % node_key_name)
+
+ key = file(key_name).read()
+
+ fields["private_key"] = key
+
+ # now the ceilometer stuff
+
+ cslice = ControllerSlice.objects.get(slice=instance.slice)
+ if not cslice:
+ raise Exception("Controller slice object for %s does not exist" % instance.slice.name)
+
+ cuser = ControllerUser.objects.get(user=instance.creator)
+ if not cuser:
+ raise Exception("Controller user object for %s does not exist" % instance.creator)
+
+ fields.update({"keystone_tenant_id": cslice.tenant_id,
+ "keystone_user_id": cuser.kuser_id,
+ "rabbit_user": instance.controller.rabbit_user,
+ "rabbit_password": instance.controller.rabbit_password,
+ "rabbit_host": instance.controller.rabbit_host})
+
+ return fields
+
def sync_record(self, o):
logger.info("sync'ing object %s" % str(o))
- if not os.path.exists(self.service_key_name):
- raise Exception("Service key %s does not exist" % self.service_key_name)
-
- service_key = file(self.service_key_name).read()
-
self.prepare_record(o)
instance = self.get_instance(o)
@@ -92,25 +163,9 @@
self.defer_sync(o, "waiting on instance.instance_name")
return
- cslice = ControllerSlice.objects.get(slice=instance.slice)
- if not cslice:
- raise Exception("Controller slice object for %s does not exist" % instance.slice.name)
+ fields = self.get_ansible_fields(instance)
- cuser = ControllerUser.objects.get(user=instance.creator)
- if not cuser:
- raise Exception("Controller user object for %s does not exist" % instance.creator)
-
- fields = { "instance_name": instance.name,
- "hostname": instance.node.name,
- "instance_id": instance.instance_id,
- "private_key": service_key,
- "keystone_tenant_id": cslice.tenant_id,
- "keystone_user_id": cuser.kuser_id,
- "rabbit_user": instance.controller.rabbit_user,
- "rabbit_password": instance.controller.rabbit_password,
- "rabbit_host": instance.controller.rabbit_host,
- "ansible_tag": o.__class__.__name__ + "_" + str(o.id)
- }
+ fields["ansible_tag"] = o.__class__.__name__ + "_" + str(o.id)
# If 'o' defines a 'sync_attributes' list, then we'll copy those
# attributes into the Ansible recipe's field list automatically.
diff --git a/xos/observers/helloworld/helloworld_config b/xos/observers/helloworld/helloworld_config
index 671af51..e32ee0c 100644
--- a/xos/observers/helloworld/helloworld_config
+++ b/xos/observers/helloworld/helloworld_config
@@ -38,6 +38,7 @@
dependency_graph=/opt/xos/model-deps
logfile=/var/log/xos_backend.log
steps_dir=/opt/xos/observers/helloworld/steps
+applist=helloworld
[gui]
disable_minidashboard=True
diff --git a/xos/observers/helloworld/steps/sync_hello.py b/xos/observers/helloworld/steps/sync_hello.py
index 1fb8c2b..7071ea0 100644
--- a/xos/observers/helloworld/steps/sync_hello.py
+++ b/xos/observers/helloworld/steps/sync_hello.py
@@ -18,7 +18,7 @@
requested_interval=0
def sync_record(self, record):
- instance = record.sliver_backref
+ instance = record.instance_backref
instance.userData="packages:\n - apache2\nruncmd:\n - update-rc.d apache2 enable\n - service apache2 start\nwrite_files:\n- content: Hello %s\n path: /var/www/html/hello.txt"%record.name
instance.save()
diff --git a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
index bbe284f..fb4b73d 100644
--- a/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
+++ b/xos/observers/monitoring_channel/steps/sync_monitoringchannel.yaml
@@ -15,6 +15,7 @@
headnode_flat_lan_ip: {{ rabbit_host }}
ceilometer_client_acess_ip: {{ ceilometer_ip }}
ceilometer_client_acess_mac: {{ ceilometer_mac }}
+ ceilometer_host_port: {{ ceilometer_port }}
allowed_tenant_ids:
{% for allowed_tenant_id in allowed_tenant_ids %}
- {{ allowed_tenant_id }}
diff --git a/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2 b/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
index cba6f2a..4c712f1 100644
--- a/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
+++ b/xos/observers/monitoring_channel/templates/ceilometer_proxy_config.j2
@@ -9,6 +9,8 @@
admin_password={{ admin_password }}
[allowed_tenants]
+{% if allowed_tenant_ids %}
{% for tenant_id in allowed_tenant_ids %}
{{ tenant_id }}
{% endfor %}
+{% endif %}
diff --git a/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2 b/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
index 10d9ef5..f56c247 100755
--- a/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
+++ b/xos/observers/monitoring_channel/templates/start-monitoring-channel.sh.j2
@@ -15,13 +15,14 @@
MONITORING_CHANNEL=monitoring-channel-{{ unique_id }}
HEADNODEFLATLANIP={{ headnode_flat_lan_ip }}
+HOST_FORWARDING_PORT_FOR_CEILOMETER={{ ceilometer_host_port }}
docker inspect $MONITORING_CHANNEL > /dev/null 2>&1
if [ "$?" == 1 ]
then
#sudo docker build -t monitoring-channel -f Dockerfile.monitoring_channel .
sudo docker pull srikanthvavila/monitoring-channel
- docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p 8888:8000 srikanthvavila/monitoring-channel
+ docker run -d --name=$MONITORING_CHANNEL --add-host="ctl:$HEADNODEFLATLANIP" --privileged=true -p $HOST_FORWARDING_PORT_FOR_CEILOMETER:8000 srikanthvavila/monitoring-channel
else
docker start $MONITORING_CHANNEL
fi
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.py b/xos/observers/vcpe/steps/sync_vcpetenant.py
index 1a45b54..4f3886e 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.py
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.py
@@ -113,19 +113,26 @@
logger.info("neither bbs_slice nor bbs_server is configured in the vCPE")
vlan_ids = []
+ s_tags = []
+ c_tags = []
if o.volt:
- vlan_ids.append(o.volt.vlan_id)
+ vlan_ids.append(o.volt.vlan_id) # XXX remove this
+ s_tags.append(o.volt.s_tag)
+ c_tags.append(o.volt.c_tag)
try:
full_setup = Config().observer_full_setup
except:
full_setup = True
- fields = {"vlan_ids": vlan_ids,
+ fields = {"vlan_ids": vlan_ids, # XXX remove this
+ "s_tags": s_tags,
+ "c_tags": c_tags,
"dnsdemux_ip": dnsdemux_ip,
"cdn_prefixes": cdn_prefixes,
"bbs_addrs": bbs_addrs,
- "full_setup": full_setup}
+ "full_setup": full_setup,
+ "isolation": o.instance.isolation}
# add in the sync_attributes that come from the SubscriberRoot object
@@ -203,7 +210,10 @@
if quick_update:
logger.info("quick_update triggered; skipping ansible recipe")
else:
- super(SyncVCPETenant, self).run_playbook(o, fields)
+ if o.instance.isolation in ["container", "container_vm"]:
+ super(SyncVCPETenant, self).run_playbook(o, fields, "sync_vcpetenant_new.yaml")
+ else:
+ super(SyncVCPETenant, self).run_playbook(o, fields)
o.last_ansible_hash = ansible_hash
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.yaml b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
index b5a112a..c3b7246 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.yaml
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
@@ -13,6 +13,14 @@
{% for vlan_id in vlan_ids %}
- {{ vlan_id }}
{% endfor %}
+ c_tags:
+ {% for c_tag in c_tags %}
+ - {{ c_tag }}
+ {% endfor %}
+ s_tags:
+ {% for s_tag in s_tags %}
+ - {{ s_tag }}
+ {% endfor %}
firewall_rules:
{% for firewall_rule in firewall_rules.split("\n") %}
- {{ firewall_rule }}
@@ -109,27 +117,27 @@
{% endif %}
- name: vCPE upstart
- template: src=/opt/xos/observers/vcpe/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ vlan_ids[0] }}.conf
+ template: src=/opt/xos/observers/vcpe/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.conf
- name: vCPE startup script
- template: src=/opt/xos/observers/vcpe/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ vlan_ids[0] }}.sh mode=0755
+ template: src=/opt/xos/observers/vcpe/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh mode=0755
notify:
# - restart vcpe
- stop vcpe
- remove container
- start vcpe
- - name: create /etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d
- file: path=/etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d state=directory owner=root group=root
+ - name: create /etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d
+ file: path=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d state=directory owner=root group=root
- name: vCPE basic dnsmasq config
- copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
+ copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
notify:
- stop dnsmasq
- start dnsmasq
- name: dnsmasq config
- template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d/servers.conf owner=root group=root
+ template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/servers.conf owner=root group=root
notify:
- stop dnsmasq
- start dnsmasq
@@ -143,23 +151,23 @@
# template: src=/opt/xos/observers/vcpe/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
- name: Make sure vCPE service is running
- service: name=vcpe-{{ vlan_ids[0] }} state=started
+ service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
handlers:
- name: stop dnsmasq
- shell: docker exec vcpe-{{ vlan_ids[0] }} /usr/bin/killall dnsmasq
+ shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} /usr/bin/killall dnsmasq
- name: start dnsmasq
- shell: docker exec vcpe-{{ vlan_ids[0] }} /usr/sbin/service dnsmasq start
+ shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} /usr/sbin/service dnsmasq start
- name: restart vcpe
- shell: service vcpe-{{ vlan_ids[0] }} stop; sleep 1; service vcpe-{{ vlan_ids[0] }} start
+ shell: service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} stop; sleep 1; service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} start
- name: stop vcpe
- service: name=vcpe-{{ vlan_ids[0] }} state=stopped
+ service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=stopped
- name: remove container
- docker: name=vcpe-{{ vlan_ids[0] }} state=absent image=docker-vcpe
+ docker: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=absent image=docker-vcpe
- name: start vcpe
- service: name=vcpe-{{ vlan_ids[0] }} state=started
+ service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml b/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
new file mode 100644
index 0000000..e96f3c5
--- /dev/null
+++ b/xos/observers/vcpe/steps/sync_vcpetenant_new.yaml
@@ -0,0 +1,85 @@
+---
+- hosts: {{ instance_name }}
+ gather_facts: False
+ connection: ssh
+ user: {{ username }}
+ sudo: yes
+ vars:
+ container_name: {{ container_name }}
+ cdn_enable: {{ cdn_enable }}
+ dnsdemux_ip: {{ dnsdemux_ip }}
+ firewall_enable: {{ firewall_enable }}
+ url_filter_enable: {{ url_filter_enable }}
+ vlan_ids:
+ {% for vlan_id in vlan_ids %}
+ - {{ vlan_id }}
+ {% endfor %}
+ c_tags:
+ {% for c_tag in c_tags %}
+ - {{ c_tag }}
+ {% endfor %}
+ s_tags:
+ {% for s_tag in s_tags %}
+ - {{ s_tag }}
+ {% endfor %}
+ firewall_rules:
+ {% for firewall_rule in firewall_rules.split("\n") %}
+ - {{ firewall_rule }}
+ {% endfor %}
+ cdn_prefixes:
+ {% for prefix in cdn_prefixes %}
+ - {{ prefix }}
+ {% endfor %}
+ bbs_addrs:
+ {% for bbs_addr in bbs_addrs %}
+ - {{ bbs_addr }}
+ {% endfor %}
+ nat_ip: {{ nat_ip }}
+ nat_mac: {{ nat_mac }}
+ lan_ip: {{ lan_ip }}
+ lan_mac: {{ lan_mac }}
+ wan_ip: {{ wan_ip }}
+ wan_mac: {{ wan_mac }}
+ wan_container_mac: {{ wan_container_mac }}
+ wan_next_hop: 10.0.1.253 # FIX ME
+ private_ip: {{ private_ip }}
+ private_mac: {{ private_mac }}
+ hpc_client_ip: {{ hpc_client_ip }}
+ hpc_client_mac: {{ hpc_client_mac }}
+ keystone_tenant_id: {{ keystone_tenant_id }}
+ keystone_user_id: {{ keystone_user_id }}
+ rabbit_user: {{ rabbit_user }}
+ rabbit_password: {{ rabbit_password }}
+ rabbit_host: {{ rabbit_host }}
+
+ tasks:
+ - name: vCPE basic dnsmasq config
+ copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
+ notify:
+ - stop dnsmasq
+ - start dnsmasq
+
+ - name: dnsmasq config
+ template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
+ notify:
+ - stop dnsmasq
+ - start dnsmasq
+
+ handlers:
+ - name: stop dnsmasq
+ shell: docker exec {{ container_name }} /usr/bin/killall dnsmasq
+
+ - name: start dnsmasq
+ shell: docker exec {{ container_name }} /usr/sbin/service dnsmasq start
+
+ - name: restart vcpe
+ shell: service {{ container_name }} stop; sleep 1; service vcpe-{{ vlan_ids[0] }} start
+
+ - name: stop vcpe
+ service: name={{ container_name }} state=stopped
+
+ - name: remove container
+ docker: name={{ container_name }} state=absent image=docker-vcpe
+
+ - name: start vcpe
+ service: name={{ container_name }} state=started
diff --git a/xos/observers/vcpe/templates/start-vcpe.sh.j2 b/xos/observers/vcpe/templates/start-vcpe.sh.j2
index a3533fa..c4128f3 100755
--- a/xos/observers/vcpe/templates/start-vcpe.sh.j2
+++ b/xos/observers/vcpe/templates/start-vcpe.sh.j2
@@ -8,7 +8,9 @@
iptables -L > /dev/null
ip6tables -L > /dev/null
-VCPE=vcpe-{{ vlan_ids[0] }}
+STAG={{ s_tags[0] }}
+CTAG={{ c_tags[0] }}
+VCPE=vcpe-$STAG-$CTAG
docker inspect $VCPE > /dev/null 2>&1
if [ "$?" == 1 ]
@@ -23,14 +25,23 @@
WAN_IFACE=$( mac_to_iface {{ wan_mac }} )
docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_ip }}/24@{{ wan_next_hop }} {{ wan_container_mac }}
-LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
-docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE -i eth1 $VCPE 192.168.0.1/24 @{{ vlan_ids[0] }}
+# LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
+# Need to encapsulate VLAN traffic so that Neutron doesn't eat it
+# Assumes that br-lan has been set up appropriately by a previous step
+LAN_IFACE=br-lan
+ifconfig $LAN_IFACE >> /dev/null
+if [ "$?" == 0 ]
+then
+ ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
+ ifconfig $LAN_IFACE.$STAG up
+ docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
+fi
-HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
+#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
+#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
# Make sure VM's eth0 (hpc_client) has no IP address
-ifconfig $HPC_IFACE 0.0.0.0
+#ifconfig $HPC_IFACE 0.0.0.0
# Now can start up dnsmasq
docker exec $VCPE service dnsmasq start
diff --git a/xos/observers/vcpe/templates/vcpe.conf.j2 b/xos/observers/vcpe/templates/vcpe.conf.j2
index 1951322..fa7885e 100644
--- a/xos/observers/vcpe/templates/vcpe.conf.j2
+++ b/xos/observers/vcpe/templates/vcpe.conf.j2
@@ -6,5 +6,5 @@
respawn
script
- /usr/local/sbin/start-vcpe-{{ vlan_ids[0] }}.sh
+ /usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
end script
diff --git a/xos/openstack_observer/event_loop.py b/xos/openstack_observer/event_loop.py
index 17d5c7a..0ac626b 100644
--- a/xos/openstack_observer/event_loop.py
+++ b/xos/openstack_observer/event_loop.py
@@ -27,6 +27,24 @@
from toposort import toposort
from observer.error_mapper import *
from openstack_observer.openstacksyncstep import OpenStackSyncStep
+from observer.steps.sync_object import SyncObject
+
+# Load app models
+
+try:
+ app_module_names = Config().observer_applist.split(',')
+except AttributeError:
+ app_module_names = []
+
+if (type(app_module_names)!=list):
+ app_module_names=[app_module_names]
+
+app_modules = []
+
+for m in app_module_names:
+ model_path = m+'.models'
+ module = __import__(model_path,fromlist=[m])
+ app_modules.append(module)
debug_mode = False
@@ -166,6 +184,7 @@
provides_dict[m.__name__]=[s.__name__]
step_graph = {}
+ phantom_steps = []
for k,v in self.model_dependency_graph.items():
try:
for source in provides_dict[k]:
@@ -183,7 +202,12 @@
step_graph[source]=[dest]
except KeyError:
if (not provides_dict.has_key(m)):
- step_graph[source]='#%s'%m
+ try:
+ step_graph[source]+=['#%s'%m]
+ except:
+ step_graph[source]=['#%s'%m]
+
+ phantom_steps+=['#%s'%m]
pass
except KeyError:
@@ -196,7 +220,8 @@
pp = pprint.PrettyPrinter(indent=4)
logger.info(pp.pformat(step_graph))
- self.ordered_steps = toposort(self.dependency_graph, map(lambda s:s.__name__,self.sync_steps))
+ self.ordered_steps = toposort(self.dependency_graph, phantom_steps+map(lambda s:s.__name__,self.sync_steps))
+ self.ordered_steps = [i for i in self.ordered_steps if i!='SyncObject']
logger.info("Order of steps=%s" % self.ordered_steps)
@@ -245,15 +270,31 @@
for e in self.ordered_steps:
self.last_deletion_run_times[e]=0
+ def lookup_step_class(self,s):
+ if ('#' in s):
+ return SyncObject
+ else:
+ step = self.step_lookup[s]
+ return step
+
def lookup_step(self,s):
if ('#' in s):
objname = s[1:]
so = SyncObject()
- so.provides=[globals()[objname]]
- so.observes=globals()[objname]
+
+ try:
+ obj = globals()[objname]
+ except:
+ for m in app_modules:
+ if (hasattr(m,objname)):
+ obj = getattr(m,objname)
+
+ so.provides=[obj]
+ so.observes=[obj]
step = so
else:
- step = self.step_lookup[s]
+ step_class = self.step_lookup[s]
+ step = step_class(driver=self.driver,error_map=self.error_mapper)
return step
def save_run_times(self):
@@ -275,7 +316,7 @@
def sync(self, S, deletion):
try:
- step = self.lookup_step(S)
+ step = self.lookup_step_class(S)
start_time=time.time()
logger.info("Starting to work on step %s, deletion=%s" % (step.__name__, str(deletion)))
@@ -324,16 +365,20 @@
self.failed_steps.append(step)
my_status = STEP_STATUS_KO
else:
- sync_step = step(driver=self.driver,error_map=self.error_mapper)
+ sync_step = self.lookup_step(S)
sync_step. __name__= step.__name__
sync_step.dependencies = []
try:
mlist = sync_step.provides
- for m in mlist:
- lst = self.model_dependency_graph[m.__name__]
- nlst = map(lambda(a,b):b,lst)
- sync_step.dependencies.extend(nlst)
+ try:
+ for m in mlist:
+ lst = self.model_dependency_graph[m.__name__]
+ nlst = map(lambda(a,b):b,lst)
+ sync_step.dependencies.extend(nlst)
+ except Exception,e:
+ raise e
+
except KeyError:
pass
sync_step.debug_mode = debug_mode
diff --git a/xos/openstack_observer/steps/sync_container.py b/xos/openstack_observer/steps/sync_container.py
index de4a2ce..272e5f8 100644
--- a/xos/openstack_observer/steps/sync_container.py
+++ b/xos/openstack_observer/steps/sync_container.py
@@ -6,9 +6,10 @@
import time
from django.db.models import F, Q
from xos.config import Config
-from observer.syncstep import SyncStep
+from observers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from observer.syncstep import SyncStep, DeferredException
from observer.ansible import run_template_ssh
-from core.models import Service, Slice, Container
+from core.models import Service, Slice, Instance
from services.onos.models import ONOSService, ONOSApp
from util.logger import Logger, logging
@@ -18,88 +19,102 @@
logger = Logger(level=logging.INFO)
-class SyncContainer(SyncStep):
- provides=[Container]
- observes=Container
+class SyncContainer(SyncInstanceUsingAnsible):
+ provides=[Instance]
+ observes=Instance
requested_interval=0
template_name = "sync_container.yaml"
def __init__(self, *args, **kwargs):
super(SyncContainer, self).__init__(*args, **kwargs)
-# def fetch_pending(self, deleted):
-# if (not deleted):
-# objs = ONOSService.get_service_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
-# else:
-# objs = ONOSService.get_deleted_service_objects()
-#
-# return objs
-
- def get_node(self,o):
- return o.node
-
- def get_node_key(self, node):
- return "/root/setup/node_key"
- #return "/opt/xos/node-key"
+ def fetch_pending(self, deletion=False):
+ objs = super(SyncContainer, self).fetch_pending(deletion)
+ objs = [x for x in objs if x.isolation in ["container", "container_vm"]]
+ return objs
def get_instance_port(self, container_port):
- print container_port
- print container_port.network
for p in container_port.network.links.all():
- if (p.instance) and (p.instance.node == container_port.container.node) and (p.mac):
+ if (p.instance) and (p.instance.isolation=="vm") and (p.instance.node == container_port.instance.node) and (p.mac):
return p
return None
+ def get_parent_port_mac(self, instance, port):
+ if not instance.parent:
+ raise Exception("instance has no parent")
+ for parent_port in instance.parent.ports.all():
+ if parent_port.network == port.network:
+ if not parent_port.mac:
+ raise DeferredException("parent port on network %s does not have mac yet" % parent_port.network.name)
+ return parent_port.mac
+ raise Exception("failed to find corresponding parent port for network %s" % port.network.name)
+
def get_ports(self, o):
i=0
ports = []
for port in o.ports.all():
- if not port.mac:
- raise Exception("Port on network %s is not yet ready" % port.network.name)
+ if (not port.ip):
+ # 'unmanaged' ports may have an ip, but no mac
+ # XXX: are there any ports that have a mac but no ip?
+ raise DeferredException("Port on network %s is not yet ready" % port.network.name)
pd={}
- pd["device"] = "eth%d" % i
- pd["mac"] = port.mac
- pd["ip"] = port.ip
+ pd["mac"] = port.mac or ""
+ pd["ip"] = port.ip or ""
+ pd["xos_network_id"] = port.network.id
- instance_port = self.get_instance_port(port)
- if not instance_port:
- raise Exception("No instance on slice for port on network %s" % port.network.name)
+ if port.network.name == "wan_network":
+ if port.ip:
+ (a, b, c, d) = port.ip.split('.')
+ pd["mac"] = "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
- pd["snoop_instance_mac"] = instance_port.mac
- pd["snoop_instance_id"] = instance_port.instance.instance_id
+
+ if o.isolation == "container":
+ # container on bare metal
+ instance_port = self.get_instance_port(port)
+ if not instance_port:
+ raise DeferredException("No instance on slice for port on network %s" % port.network.name)
+
+ pd["snoop_instance_mac"] = instance_port.mac
+ pd["snoop_instance_id"] = instance_port.instance.instance_id
+ pd["src_device"] = ""
+ pd["bridge"] = "br-int"
+ else:
+ # container in VM
+ pd["snoop_instance_mac"] = ""
+ pd["snoop_instance_id"] = ""
+ pd["parent_mac"] = self.get_parent_port_mac(o, port)
+ pd["bridge"] = ""
+
+ for (k,v) in port.get_parameters().items():
+ pd[k] = v
ports.append(pd)
+
+ # for any ports that don't have a device, assign one
+ used_ports = [x["device"] for x in ports if ("device" in x)]
+ avail_ports = ["eth%d"%i for i in range(0,64) if ("eth%d"%i not in used_ports)]
+ for port in ports:
+ if not port.get("device",None):
+ port["device"] = avail_ports.pop(0)
+
return ports
def get_extra_attributes(self, o):
fields={}
fields["ansible_tag"] = "container-%s" % str(o.id)
- fields["baremetal_ssh"] = True
- fields["instance_name"] = "rootcontext"
- fields["container_name"] = "%s-%s" % (o.slice.name, str(o.id))
- fields["docker_image"] = o.docker_image
- fields["username"] = "root"
+ fields["docker_image"] = o.image.name
fields["ports"] = self.get_ports(o)
+ if o.volumes:
+ fields["volumes"] = [x.strip() for x in o.volumes.split(",")]
+ else:
+ fields["volumes"] = ""
return fields
- def sync_fields(self, o, fields):
- self.run_playbook(o, fields)
-
def sync_record(self, o):
logger.info("sync'ing object %s" % str(o))
- node = self.get_node(o)
- node_key_name = self.get_node_key(node)
-
- if not os.path.exists(node_key_name):
- raise Exception("Node key %s does not exist" % node_key_name)
-
- node_key = file(node_key_name).read()
-
- fields = { "hostname": node.name,
- "private_key": node_key,
- }
+ fields = self.get_ansible_fields(o)
# If 'o' defines a 'sync_attributes' list, then we'll copy those
# attributes into the Ansible recipe's field list automatically.
@@ -109,14 +124,33 @@
fields.update(self.get_extra_attributes(o))
- self.sync_fields(o, fields)
+ self.run_playbook(o, fields)
+
+ o.instance_id = fields["container_name"]
+ o.instance_name = fields["container_name"]
o.save()
- def run_playbook(self, o, fields):
+ def delete_record(self, o):
+ logger.info("delete'ing object %s" % str(o))
+
+ fields = self.get_ansible_fields(o)
+
+ # If 'o' defines a 'sync_attributes' list, then we'll copy those
+ # attributes into the Ansible recipe's field list automatically.
+ if hasattr(o, "sync_attributes"):
+ for attribute_name in o.sync_attributes:
+ fields[attribute_name] = getattr(o, attribute_name)
+
+ fields.update(self.get_extra_attributes(o))
+
+ self.run_playbook(o, fields, "teardown_container.yaml")
+
+ def run_playbook(self, o, fields, template_name=None):
+ if not template_name:
+ template_name = self.template_name
tStart = time.time()
- run_template_ssh(self.template_name, fields, path="container")
+ run_template_ssh(template_name, fields, path="container")
logger.info("playbook execution time %d" % int(time.time()-tStart))
- def delete_record(self, m):
- pass
+
diff --git a/xos/openstack_observer/steps/sync_container.yaml b/xos/openstack_observer/steps/sync_container.yaml
index a707d0b..b60ffb8 100644
--- a/xos/openstack_observer/steps/sync_container.yaml
+++ b/xos/openstack_observer/steps/sync_container.yaml
@@ -11,10 +11,20 @@
ports:
{% for port in ports %}
- device: {{ port.device }}
- mac: {{ port.mac }}
+ xos_network_id: {{ port.xos_network_id }}
+ mac: {{ port.mac|default("") }}
ip: {{ port.ip }}
snoop_instance_mac: {{ port.snoop_instance_mac }}
snoop_instance_id: {{ port.snoop_instance_id }}
+ parent_mac: {{ port.parent_mac|default("") }}
+ s_tag: {{ port.s_tag|default("") }}
+ c_tag: {{ port.c_tag|default("") }}
+ next_hop: {{ port.next_hop|default("") }}
+ bridge: {{ port.bridge }}
+ {% endfor %}
+ volumes:
+ {% for volume in volumes %}
+ - {{ volume }}
{% endfor %}
tasks:
@@ -74,6 +84,10 @@
# state: running
# image: {{ docker_image }}
+ - name: check if systemd is installed
+ stat: path=/usr/bin/systemctl
+ register: systemctl
+
- name: container upstart
template: src=/opt/xos/openstack_observer/templates/container.conf.j2 dest=/etc/init/container-{{ container_name }}.conf
@@ -83,8 +97,18 @@
- name: container startup script
template: src=/opt/xos/openstack_observer/templates/start-container.sh.j2 dest=/usr/local/sbin/start-container-{{ container_name }}.sh mode=0755
+ - name: container teardown script
+ template: src=/opt/xos/openstack_observer/templates/stop-container.sh.j2 dest=/usr/local/sbin/stop-container-{{ container_name }}.sh mode=0755
+
- name: restart systemd
shell: systemctl daemon-reload
+ when: systemctl.stat.exists == True
+
+{% if ports %}
+ - name: make sure bridges are setup
+ shell: ifconfig {{ '{{' }} item.bridge {{ '}}' }}
+ with_items: "ports"
+{% endif %}
- name: Make sure container is running
service: name=container-{{ container_name }} state=started
diff --git a/xos/openstack_observer/steps/sync_instances.py b/xos/openstack_observer/steps/sync_instances.py
index 1209448..815c83e 100644
--- a/xos/openstack_observer/steps/sync_instances.py
+++ b/xos/openstack_observer/steps/sync_instances.py
@@ -22,6 +22,11 @@
observes=Instance
playbook='sync_instances.yaml'
+ def fetch_pending(self, deletion=False):
+ objs = super(SyncInstances, self).fetch_pending(deletion)
+ objs = [x for x in objs if x.isolation=="vm"]
+ return objs
+
def get_userdata(self, instance, pubkeys):
userdata = '#cloud-config\n\nopencloud:\n slicename: "%s"\n hostname: "%s"\n restapi_hostname: "%s"\n restapi_port: "%s"\n' % (instance.slice.name, instance.node.name, RESTAPI_HOSTNAME, str(RESTAPI_PORT))
userdata += 'ssh_authorized_keys:\n'
@@ -61,7 +66,7 @@
if controller_network.network.template.visibility == 'private' and \
controller_network.network.template.translation == 'none':
if not controller_network.net_id:
- raise DeferredException("Private Network %s has no id; Try again later" % controller_network.network.name)
+ raise DeferredException("Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name))
nics.append(controller_network.net_id)
# now include network template
diff --git a/xos/openstack_observer/steps/sync_object.py b/xos/openstack_observer/steps/sync_object.py
index 5e70464..a289c95 100644
--- a/xos/openstack_observer/steps/sync_object.py
+++ b/xos/openstack_observer/steps/sync_object.py
@@ -17,4 +17,4 @@
observes=[] # Caller fills this in
def sync_record(self, r):
- raise Exception('Waiting for Service dependency')
+ raise DeferredException('Waiting for Service dependency: %r'%r)
diff --git a/xos/openstack_observer/steps/sync_ports.py b/xos/openstack_observer/steps/sync_ports.py
index 7b20d29..bfdde8c 100644
--- a/xos/openstack_observer/steps/sync_ports.py
+++ b/xos/openstack_observer/steps/sync_ports.py
@@ -144,14 +144,11 @@
# For ports that were created by the user, find that ones
# that don't have neutron ports, and create them.
- for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False) | Q(container__isnull=False)):
+ for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False) ):
logger.info("XXX working on port %s" % port)
- if port.instance:
- controller = port.instance.node.site_deployment.controller
- slice = port.instance.slice
- else:
- controller = port.container.node.site_deployment.controller
- slice = port.container.slice
+ controller = port.instance.node.site_deployment.controller
+ slice = port.instance.slice
+
if controller:
cn=port.network.controllernetworks.filter(controller=controller)
if not cn:
@@ -188,10 +185,6 @@
if neutron_port["fixed_ips"]:
port.ip = neutron_port["fixed_ips"][0]["ip_address"]
port.mac = neutron_port["mac_address"]
-
- neutron_network = driver.shell.quantum.list_networks(cn.net_id)["networks"][0]
- if "provider:segmentation_id" in neutron_network:
- port.segmentation_id = neutron_network["provider:segmentation_id"]
except:
logger.log_exc("failed to create neutron port for %s" % port)
continue
diff --git a/xos/openstack_observer/steps/teardown_container.yaml b/xos/openstack_observer/steps/teardown_container.yaml
new file mode 100644
index 0000000..5cabc78
--- /dev/null
+++ b/xos/openstack_observer/steps/teardown_container.yaml
@@ -0,0 +1,33 @@
+---
+- hosts: {{ instance_name }}
+ gather_facts: False
+ connection: ssh
+ user: {{ username }}
+ sudo: yes
+
+ vars:
+ container_name: {{ container_name }}
+ docker_image: {{ docker_image }}
+ ports:
+ {% for port in ports %}
+ - device: {{ port.device }}
+ xos_network_id: {{ port.xos_network_id }}
+ mac: {{ port.mac|default("") }}
+ ip: {{ port.ip }}
+ snoop_instance_mac: {{ port.snoop_instance_mac }}
+ snoop_instance_id: {{ port.snoop_instance_id }}
+ parent_mac: {{ port.parent_mac|default("") }}
+ s_tag: {{ port.s_tag|default("") }}
+ c_tag: {{ port.c_tag|default("") }}
+ next_hop: {{ port.next_hop|default("") }}
+ bridge: {{ port.bridge }}
+ {% endfor %}
+ volumes:
+ {% for volume in volumes %}
+ - {{ volume }}
+ {% endfor %}
+
+ tasks:
+ - name: Make sure container is stopped
+ service: name=container-{{ container_name }} state=stopped
+
diff --git a/xos/openstack_observer/syncstep.py b/xos/openstack_observer/syncstep.py
index 7accbfa..21327d7 100644
--- a/xos/openstack_observer/syncstep.py
+++ b/xos/openstack_observer/syncstep.py
@@ -8,6 +8,7 @@
from core.models import *
from django.db import reset_queries
from observer.ansible import *
+from dependency_walker import *
import json
import time
@@ -101,12 +102,26 @@
for dep in self.dependencies:
peer_name = dep[0].lower() + dep[1:] # django names are camelCased with the first letter lower
+ peer_objects=[]
try:
- peer_object = deepgetattr(obj, peer_name)
+ peer_names = plural(peer_name)
+ peer_object_list=[]
+
try:
- peer_objects = peer_object.all()
- except AttributeError:
- peer_objects = [peer_object]
+ peer_object_list.append(deepgetattr(obj, peer_name))
+ except:
+ pass
+
+ try:
+ peer_object_list.append(deepgetattr(obj, peer_names))
+ except:
+ pass
+
+ for peer_object in peer_object_list:
+ try:
+ peer_objects.extend(peer_object.all())
+ except AttributeError:
+ peer_objects.append(peer_object)
except:
peer_objects = []
@@ -174,6 +189,9 @@
pass
def call(self, failed=[], deletion=False):
+ #if ('Instance' in self.__class__.__name__):
+ # pdb.set_trace()
+
pending = self.fetch_pending(deletion)
for o in pending:
diff --git a/xos/openstack_observer/templates/container.conf.j2 b/xos/openstack_observer/templates/container.conf.j2
index d3ef42d..7cbb880 100644
--- a/xos/openstack_observer/templates/container.conf.j2
+++ b/xos/openstack_observer/templates/container.conf.j2
@@ -6,6 +6,9 @@
respawn
script
- /usr/local/sbin/start-container-{{ container_name }}.sh
+ /usr/local/sbin/start-container-{{ container_name }}.sh ATTACH
end script
+post-stop script
+ /usr/local/sbin/stop-container-{{ container_name }}.sh
+end script
\ No newline at end of file
diff --git a/xos/openstack_observer/templates/container.service.j2 b/xos/openstack_observer/templates/container.service.j2
index 9e2b83c..817d6d7 100644
--- a/xos/openstack_observer/templates/container.service.j2
+++ b/xos/openstack_observer/templates/container.service.j2
@@ -3,7 +3,9 @@
After=docker.service
[Service]
-ExecStart=/bin/bash -c "/usr/local/sbin/start-container-{{ container_name }}.sh"
+ExecStart=/bin/bash -c "/usr/local/sbin/start-container-{{ container_name }}.sh ATTACH"
+ExecStop=/bin/bash -c "/usr/local/sbin/stop-container-{{ container_name }}.sh"
+SuccessExitStatus=0 137
[Install]
WantedBy=multi-user.target
diff --git a/xos/openstack_observer/templates/start-container.sh.j2 b/xos/openstack_observer/templates/start-container.sh.j2
index 5656992..260666c 100644
--- a/xos/openstack_observer/templates/start-container.sh.j2
+++ b/xos/openstack_observer/templates/start-container.sh.j2
@@ -6,39 +6,125 @@
CONTAINER={{ container_name }}
IMAGE={{ docker_image }}
+function mac_to_iface {
+ PARENT_MAC=$1
+ ifconfig|grep $PARENT_MAC| awk '{print $1}'|grep -v '\.'
+}
+
+function encapsulate_stag {
+ LAN_IFACE=$1
+ STAG=$2
+ ifconfig $LAN_IFACE >> /dev/null
+ if [ "$?" == 0 ]; then
+ STAG_IFACE=$LAN_IFACE.$STAG
+ ifconfig $LAN_IFACE up
+ ifconfig $STAG_IFACE
+ if [ "$?" == 0 ]; then
+ echo $STAG_IFACE is already created
+ else
+ ifconfig $STAG_IFACE >> /dev/null || ip link add link $LAN_IFACE name $STAG_IFACE type vlan id $STAG
+ fi
+ ifconfig $STAG_IFACE up
+ else
+ echo There is no $LAN_IFACE. Aborting.
+ exit -1
+ fi
+}
+
+
+{% if volumes %}
+{% for volume in volumes %}
+DEST_DIR=/var/container_volumes/$CONTAINER/{{ volume }}
+mkdir -p $DEST_DIR
+VOLUME_ARGS="$VOLUME_ARGS -v $DEST_DIR:{{ volume }}"
+{% endfor %}
+{% endif %}
+
docker inspect $CONTAINER > /dev/null 2>&1
if [ "$?" == 1 ]
then
docker pull $IMAGE
- docker run -d --name=$CONTAINER --privileged=true --net=none $IMAGE
+ docker run -d --name=$CONTAINER --privileged=true --net=none $VOLUME_ARGS $IMAGE
else
docker start $CONTAINER
fi
{% if ports %}
{% for port in ports %}
+
+{% if port.next_hop %}
+NEXTHOP_ARG="@{{ port.next_hop }}"
+{% else %}
+NEXTHOP_ARG=""
+{% endif %}
+
+{% if port.c_tag %}
+CTAG_ARG="@{{ port.c_tag }}"
+{% else %}
+CTAG_ARG=""
+{% endif %}
+
+{% if port.parent_mac %}
+# container-in-VM
+SRC_DEV=$( mac_to_iface "{{ port.parent_mac }}" )
+CMD="docker exec $CONTAINER ifconfig $SRC_DEV >> /dev/null || pipework $SRC_DEV -i {{ port.device }} $CONTAINER {{ port.ip }}/24$NEXTHOP_ARG {{ port.mac }} $CTAG_ARG"
+echo $CMD
+eval $CMD
+
+{% else %}
+# container-on-metal
IP="{{ port.ip }}"
+{% if port.mac %}
MAC="{{ port.mac }}"
+{% else %}
+MAC=""
+{% endif %}
+
DEVICE="{{ port.device }}"
+BRIDGE="{{ port.bridge }}"
+{% if port.s_tag %}
+# This is intended for lan_network. Assume that BRIDGE is set to br_lan. We
+# create a device that strips off the S-TAG.
+STAG="{{ port.s_tag }}"
+encapsulate_stag $BRIDGE $STAG
+SRC_DEV=$STAG_IFACE
+{% else %}
+# This is for a standard neutron private network. We use a donor VM to setup
+# openvswitch for us, and we snoop at its devices and create a tap using the
+# same settings.
+XOS_NETWORK_ID="{{ port.xos_network_id }}"
INSTANCE_MAC="{{ port.snoop_instance_mac }}"
INSTANCE_ID="{{ port.snoop_instance_id }}"
INSTANCE_TAP=`virsh domiflist $INSTANCE_ID | grep -i $INSTANCE_MAC | awk '{print $1}'`
INSTANCE_TAP=${INSTANCE_TAP:3}
VLAN_ID=`ovs-vsctl show | grep -i -A 1 port.*$INSTANCE_TAP | grep -i tag | awk '{print $2}'`
-TAP="con`echo $CONTAINER_$DEVICE|md5sum|awk '{print $1}'`"
-TAP=${TAP:0:12}
+# One tap for all containers per XOS/neutron network. Included the VLAN_ID in the
+# hash, to cover the case where XOS is reinstalled and the XOS network ids
+# get reused.
+TAP="con`echo ${XOS_NETWORK_ID}_$VLAN_ID|md5sum|awk '{print $1}'`"
+TAP=${TAP:0:10}
echo im=$INSTANCE_MAC ii=$INSTANCE_ID it=$INSTANCE_TAP vlan=$VLAN_ID tap=$TAP con=$CONTAINER dev=$DEVICE mac=$MAC
ovs-vsctl show | grep -i $TAP
if [[ $? == 1 ]]; then
echo creating tap
- ovs-vsctl add-port br-int $TAP tag=$VLAN_ID -- set interface $TAP type=internal
+ ovs-vsctl add-port $BRIDGE $TAP tag=$VLAN_ID -- set interface $TAP type=internal
else
echo tap exists
fi
+SRC_DEV=$TAP
+{% endif %}
-docker exec $CONTAINER ifconfig $DEVICE >> /dev/null || pipework $TAP -i $DEVICE $CONTAINER $IP/24 $MAC
+CMD="docker exec $CONTAINER ifconfig $DEVICE >> /dev/null || pipework $SRC_DEV -i $DEVICE $CONTAINER $IP/24$NEXTHOP_ARG $MAC $CTAG_ARG"
+echo $CMD
+eval $CMD
+{% endif %}
{% endfor %}
{% endif %}
# Attach to container
-# docker start -a $CONTAINER
+# (this is only done when using upstart, since upstart expects to be attached
+# to a running service)
+if [[ "$1" == "ATTACH" ]]; then
+ docker start -a $CONTAINER
+fi
+
diff --git a/xos/openstack_observer/templates/stop-container.sh.j2 b/xos/openstack_observer/templates/stop-container.sh.j2
new file mode 100644
index 0000000..9cabb00
--- /dev/null
+++ b/xos/openstack_observer/templates/stop-container.sh.j2
@@ -0,0 +1,4 @@
+CONTAINER={{ container_name }}
+
+docker stop $CONTAINER
+docker rm $CONTAINER
diff --git a/xos/templates/admin/base.html b/xos/templates/admin/base.html
index 04ec651..4c433c1 100644
--- a/xos/templates/admin/base.html
+++ b/xos/templates/admin/base.html
@@ -1,17 +1,20 @@
-{% load admin_static %}{% load suit_tags %}{% load url from future %}<!DOCTYPE html>
+{% load admin_static %}{% load suit_tags %}{% load url from future %}
+<!DOCTYPE html>
<html lang="{{ LANGUAGE_CODE|default:"en-us" }}" {% if LANGUAGE_BIDI %}dir="rtl"{% endif %}>
<head>
- <title>{% block title %} {%if title %} {{ title }} | {% endif %} {{ 'ADMIN_NAME'|suit_conf }}{% endblock %}</title>
+ <title>{% block title %} {%if title %} {{ title }} | {% endif %} {{ 'ADMIN_NAME'|suit_conf }}{% endblock %}</title>
<link rel="stylesheet" type="text/css" href="{% block stylesheet %}{% endblock %}"/>
- <link rel="stylesheet" type="text/css" href="{% static 'suit/bootstrap/css/bootstrap.min.css' %}" media="all"/>
+ <link rel="stylesheet" type="text/css" href="{% static 'suit/bootstrap/dist/css/bootstrap.min.css' %}" media="all"/>
<link rel="stylesheet" type="text/css" href="{% static 'suit/css/suit.css' %}" media="all">
<link rel="stylesheet" type="text/css" href="{% static 'xos.css' %}" media="all">
- <link rel="stylesheet" type="text/css" href="{% static 'cord.css' %}" media="all">
{% if XOS_BRANDING_CSS %}
- <link rel="stylesheet" type="text/css" href="{{ XOS_BRANDING_CSS }}">
+ <link rel="stylesheet" type="text/css" href="{% static 'cord.css' %}" media="all">
+ <link rel="stylesheet" type="text/css" href="{{ XOS_BRANDING_CSS }}">
{% endif %}
{% block extrastyle %}{% endblock %}
- {% if LANGUAGE_BIDI %}<link rel="stylesheet" type="text/css" href="{% block stylesheet_rtl %}{% static "admin/css/rtl.css" %}{% endblock %}"/>{% endif %}
+ {% if LANGUAGE_BIDI %}
+ <link rel="stylesheet" type="text/css" href="{% block stylesheet_rtl %}{% static "admin/css/rtl.css" %}{% endblock %}"/>
+ {% endif %}
<script type="text/javascript">window.__admin_media_prefix__ = "{% filter escapejs %}{% static "admin/" %}{% endfilter %}";</script>
<script src="{% static 'suit/js/jquery-1.9.1.min.js' %}"></script>
<script src="http://code.jquery.com/ui/1.10.4/jquery-ui.js"></script>
@@ -31,269 +34,289 @@
{% endif %}
{% block extrahead %}{% endblock %}
{% block blockbots %}
- <meta name="robots" content="NONE,NOARCHIVE"/>{% endblock %}
- <link rel="shortcut icon" href="{{ XOS_BRANDING_ICON }}">
-</head>
-{% load i18n %}
-
+ <meta name="robots" content="NONE,NOARCHIVE"/>
+ {% endblock %}
+ <link rel="shortcut icon" href="{{ XOS_BRANDING_ICON }}"></head>
+ {% load i18n %}
<body class="{% if is_popup %}popup {% endif %}{% block bodyclass %}{% endblock %}">
-<div id="dialog-placeholder">
-<!-- This is a placeholder for dialog boxes, like the observer calendar -->
-</div>
+ <div id="dialog-placeholder">
+ <!-- This is a placeholder for dialog boxes, like the observer calendar -->
+ </div>
-<!-- Sticky footer wrap -->
-<div id="wrap">
+ <!-- Sticky footer wrap -->
+ <div id="wrap">
- <!-- Container -->
- {% block container %}
- <div id="container">
+ <!-- Container -->
+ {% block container %}
+ <!-- <div id="container"> -->
+
+ <!-- </div> -->
+ <!-- END Header -->
+ <div id="wrapper">
- {% block logo %}
- <a href="{% url 'admin:index' %}"><h1 id="site-name"><img class="logo" height="70" width="259" src="{% static 'cord_logo_3.png' %}"/></h1></a>
- {% endblock %}
- {% block header %}
- {% if not is_popup %}
- <!-- Header -->
- <div id="header" class="header">
-
- <div id="branding">
- {% block quick-search %}
- {% with 'SEARCH_URL'|suit_conf as search_url %}
- {% if search_url %}
- <form class="form-search nav-quick-search" autocomplete="off" action="{% if '/' in search_url %}{{ search_url }}{% else %}{% url search_url %}{% endif %}" method="GET">
- <i class="input-icon icon-search"></i>
- <input type="text" name="q" class="input-medium search-query" id="quick-search">
- <input type="submit" class="submit" value="">
- </form>
+ <!-- Sidebar -->
+ <div id="sidebar-wrapper">
+ <a href="{% url 'admin:index' %}" class="hidden-xs">
+ <img class="logo" src="{% static 'cord_logo_3.png' %}"/>
+ </a>
+ {% include 'suit/menu.html' %}
+ <button class="navbar-toggle collapsed visible-xs" type="button">
+ <i class="glyphicon glyphicon-arrow-left"></i>
+ </button>
+ </div>
+ <!-- /#sidebar-wrapper -->
+
+ <!-- Page Content -->
+ <div id="page-content-wrapper">
+ <div class="container-fluid">
+ <div class="row">
+ <div class="col-xs-12">
+ {% block header %}
+ {% if not is_popup %}
+ <!-- Header -->
+ <div id="header" class="header">
+ <button class="navbar-toggle collapsed" type="button">
+ <span class="icon-bar"></span>
+ <span class="icon-bar"></span>
+ <span class="icon-bar"></span>
+ </button>
+ {% block logo %}
+ <a href="{% url 'admin:index' %}" class="visible-xs">
+ <img class="logo" src="{% static 'cord_logo_3.png' %}"/>
+ </a>
+ {% endblock %}
+ <!-- <div id="branding">
+ {% block quick-search %}
+ {% with 'SEARCH_URL'|suit_conf as search_url %}
+ {% if search_url %}
+ <form class="form-search nav-quick-search" autocomplete="off" action="{% if '/' in search_url %}{{ search_url }}{% else %}{% url search_url %}{% endif %}" method="GET"> <i class="input-icon icon-search"></i>
+ <input type="text" name="q" class="input-medium search-query" id="quick-search">
+ <input type="submit" class="submit" value="">
+ </form>
+ {% endif %}
+ {% endwith %}
+ {% endblock %}
+ </div> -->
+ {% block header_time %}
+ <!-- <div id="branding2">
+ <div class="header-content header-content-first">
+ <div class="header-column icon"> <i class="icon-time"></i></div>
+ <div class="header-column">
+ <span class="date">{% suit_date %}</span>
+ <br>
+ <span class="time" id="clock">{% suit_time %}</span>
+ </div>
+ </div>
+ </div> -->
+ {% endblock %}
+
+ {% block header_content %}
+ <div class="header-content">
+ <div class="header-column icon">
+ <i class="icon-comment"></i>
+ </div>
+ <div class="header-column">
+ <a href="" class="grey"> <b>2</b>
+ new messages
+ </a>
+ </div>
+ </div>
+ {% endblock %}
+
+ {% if user.is_active and user.is_staff %}
+ <div id="user-tools">
+ {% trans 'Welcome,' %}
+ <a href="http://{{ request.get_host}}/admin/core/user/{{user.id}}">{{user.email}}</a>
+ <span id="observer-status"></span>
+ <span class="user-links">
+ {% block userlinks %}
+ {% url 'django-admindocs-docroot' as docsroot %}
+ {% if docsroot %}
+ <a href="http://guide.xosproject.org/">{% trans 'Documentation' %}</a>
+ <span class="separator">|</span>
+ {% endif %}
+ <a href="{% url 'admin:password_change' %}">{% trans 'Change password' %}</a>
+ <span class="separator">|</span>
+ <a href="{% url 'admin:logout' %}">{% trans 'Log out' %}</a>
+ {% endblock %}
+ </span>
+ </div>
+ {% endif %}
+
+ {% block nav-global %}
+ {% endblock %}
+ </div>
+ {% endif %}
+ {% endblock %}
+ </div>
+ </div>
+ <div class="row">
+ <div class="col-lg-12">
+ <div class="suit-columns {{ is_popup|yesno:'one-column,two-columns' }}">
+ {% block content-center %}
+ {% if not is_popup %}
+ {% block minidash %}
+ <div id="openCloudTopPage">
+ {% include "admin/newminidashboard.html" %}
+ </div>
+ {% endblock %}
+
+ {% block breadcrumbs %}
+ <ul class="breadcrumb">
+ <li>
+ <a href="{% url 'admin:index' %}">{% trans 'Home' %}</a>
+ {% if title %}
+ <span class="divider">»</span>
+ </li>
+ <li class="active">
+ {{ title }}
+ {% endif %}
+ </li>
+ </ul>
+ {% endblock %}
+ {% endif %}
+
+ {% block messages %}
+ {% if messages %}
+ {% for message in messages %}
+ <div class="alert alert-{% firstof message.tags 'info' %}">
+ <button class="close" data-dismiss="alert">×</button>
+ <strong>{% if message.tags %}{{ message.tags|capfirst }}{% else %}Message{% endif %}!</strong>
+ {{ message }}
+ </div>
+ {% endfor %}
+ {% endif %}
+ {% endblock messages %}
+
+ <!-- Content -->
+ <div id="content" class="{% block coltype %}colM{% endblock %} row-fluid">
+ {% block pretitle %}
+ {% endblock %}
+ {% block content_title %}
+ {% if title %}
+ <h2 class="content-title">{{ title }}</h2>
+ {% endif %}
+ {% endblock %}
+ {% block content %}
+ {% block object-tools %}
+ {% endblock %}
+ {{ content }}
+ {% endblock %}
+ {% block sidebar_content %}
+ {% block sidebar %}{% endblock %}
+ {% endblock %}
+ </div>
+ <!-- END Content -->
+ <span class="clearfix"></span>
+ <!-- </div>
+ -->
+ {% endblock %}
+ </div>
+ </div>
+ </div>
+ </div>
+ </div>
+ <!-- /#page-content-wrapper -->
+ <!-- /#wrapper -->
+ {% endblock %}
+ </div>
+ {% if not is_popup %}
+ <!-- Sticky footer push -->
+ <div id="push"></div>
+ {% endif %}
+
+ {% block footer %}
+ {% if not is_popup %}
+ <div id="footer" class="footer">
+ <div class="content">
+ <div class="tools">
+ {% block footer_links %}
+ <a href="http://djangosuit.com/support/" target="_blank" class="icon">
+ <i class="icon-question-sign"></i>
+ Support
+ </a>
+ <a href="http://djangosuit.com/pricing/" target="_blank" class="icon">
+ <i class="icon-bookmark"></i>
+ Licence
+ </a>
+ <a href="http://github.com/darklow/django-suit/issues" target="_blank" class="icon">
+ <i class="icon-comment"></i>
+ Report a bug
+ </a>
+ {% endblock %}
+ </div>
+
+ <div class="statusMsg" id="statusMsg">
+ <!-- this is a placeholder for xoslib views to display status messages -->
+ </div>
+
+ <div class="branding">
+ {% block footer_branding %}
+ {% with 'ADMIN_NAME'|suit_conf as admin_name %}
+ {{ admin_name }}
+ {% if admin_name == 'Django Suit' %}
+ v{{ 'VERSION'|suit_conf }}
{% endif %}
{% endwith %}
{% endblock %}
</div>
-
- {% block header_time %}
- <div id="branding2">
- <!--<div class="header-content header-content-first">
- <div class="header-column icon">
- <i class="icon-time"></i>
- </div>
- <div class="header-column">
- <span class="date"> {% suit_date %}</span><br>
- <span class="time" id="clock">{% suit_time %}</span>
- </div>
- </div>-->
-
- {% endblock %}
-
- {% block header_content %}
- <!--<div class="header-content">
- <div class="header-column icon">
- <i class="icon-comment"></i>
- </div>
- <div class="header-column">
- <a href="" class="grey"><b>2</b> new messages</a>
- </div>
- </div>-->
- {% endblock %}
-
- {% if user.is_active and user.is_staff %}
- <div id="user-tools">
- {% trans 'Welcome,' %}
- <a href="http://{{ request.get_host}}/admin/core/user/{{user.id}}">{{user.email}}</a>
- <span id="observer-status"></span>
- <span class="user-links">
- {% block userlinks %}
- {% url 'django-admindocs-docroot' as docsroot %}
- {% if docsroot %}
- <a href="http://guide.xosproject.org/">{% trans 'Documentation' %}</a>
- <span class="separator">|</span>
- {% endif %}
- <a href="{% url 'admin:password_change' %}">{% trans 'Change password' %}</a>
- <span class="separator">|</span>
- <a href="{% url 'admin:logout' %}">{% trans 'Log out' %}</a>
- </span>
- {% endblock %}
- </div>
- {% endif %}
-
- {% block nav-global %}{% endblock %}
-
</div>
- </div>
- {% endif %}
- <!-- END Header -->
- {% endblock %}
-
-
- <div class="suit-columns {{ is_popup|yesno:'one-column,two-columns' }}">
-
- {% block content-center %}
- <div id="suit-center" class="suit-column">
-
- {% if not is_popup %}
- {% block minidash %}
- <div id=openCloudTopPage>
- {% include "admin/newminidashboard.html" %}
- </div>
- {% endblock %}
-
- {% block breadcrumbs %}
- <ul class="breadcrumb">
- <li><a href="{% url 'admin:index' %}">{% trans 'Home' %}</a>
- {% if title %}
- <span class="divider">»</span>
- </li>
- <li class="active">
- {{ title }}
- {% endif %}
- </li>
- </ul>
- {% endblock %}
- {% endif %}
-
- {% block messages %}
- {% if messages %}
-
- {% for message in messages %}
- <div class="alert alert-{% firstof message.tags 'info' %}">
- <button class="close" data-dismiss="alert">×</button>
- <strong>
- {% if message.tags %}{{ message.tags|capfirst }}{% else %}
- Message{% endif %}!</strong>
- {{ message }}
- </div>
- {% endfor %}
- {% endif %}
- {% endblock messages %}
-
- <!-- Content -->
- <div id="content" class="{% block coltype %}colM{% endblock %} row-fluid">
- {% block pretitle %}{% endblock %}
- {% block content_title %}{% if title %}
- <h2 class="content-title">{{ title }}</h2>
- {% endif %}{% endblock %}
- {% block content %}
- {% block object-tools %}{% endblock %}
- {{ content }}
- {% endblock %}
- {% block sidebar_content %}
- {% block sidebar %}{% endblock %}
- {% endblock %}
- </div>
- <!-- END Content -->
- <span class="clearfix"></span>
- </div>
- {% endblock %}
-
-
- {% block content-left %}
- {% if not user.is_appuser %}
- {% if not is_popup %}
- <div id="suit-left" class="suit-column">
-
-
- {% include 'suit/menu.html' %}
-
- </div>
- {% endif %}
- {% endif %}
- {% endblock %}
-
- </div>
- </div>
- {% endblock %}
-
- {% if not is_popup %}
- <!-- Sticky footer push -->
- <div id="push"></div>
- {% endif %}
-
-</div>
-
-{% block footer %}
- {% if not is_popup %}
- <div id="footer" class="footer">
- <div class="content">
- <div class="tools">
- {% block footer_links %}
- <a href="http://djangosuit.com/support/" target="_blank" class="icon"><i class="icon-question-sign"></i>Support</a>
- <a href="http://djangosuit.com/pricing/" target="_blank" class="icon"><i class="icon-bookmark"></i>Licence</a>
- <a href="http://github.com/darklow/django-suit/issues" target="_blank" class="icon"><i class="icon-comment"></i>Report a bug</a>
- {% endblock %}
- </div>
-
- <div class="statusMsg" id="statusMsg">
- <!-- this is a placeholder for xoslib views to display status messages -->
- </div>
-
- <!-- <div class="copyright">
- {% block copyright %}
- Copyright © 2013 DjangoSuit.com<br>Developed by <a href="http://djangosuit.com" target="_blank">DjangoSuit.com</a>
- {% endblock %}
- </div> -->
-
- <div class="branding">{% block footer_branding %}
- {% with 'ADMIN_NAME'|suit_conf as admin_name %}
- {{ admin_name }}
- {% if admin_name == 'Django Suit' %}
- v{{ 'VERSION'|suit_conf }}
- {% endif %}
- {% endwith %}
- {% endblock %}</div>
- </div>
+ </div>
+ {% endif %}
+ {% endblock %}
</div>
- {% endif %}
-{% endblock %}
- <script src="{% static 'suit/bootstrap/js/bootstrap.min.js' %}"></script>
+ <script src="{% static 'suit/bootstrap/dist/js/bootstrap.min.js' %}"></script>
<script src="{% static 'suit/js/suit.js' %}"></script>
<script type="text/javascript" src="//www.google.com/jsapi"></script>
- <!-- src="{% static 'xos_graphs.js' %}" -->
+ {% block extrajs %}
+ {% endblock %}
+ <script src="http://d3js.org/d3.v3.js"></script>
+ <div class="modal fade hide" id="chartsModal" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">
+ <div class="modal-dialog">
+ <div class="modal-content">
+ <!--<div class="modal-header">
+ <button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
+ <h4 class="modal-title" id="myModalLabel">OpenCloud</h4>
+ </div>-->
+ <div class="modal-body" style="overflow-y:hidden; overflow-x:hidden;">
+ <div class="chartContainer">
+ <div class="row">
+ <div class=" padding"></div>
+ </div>
- {% block extrajs %}{% endblock %}
-<script src="http://d3js.org/d3.v3.js"></script>
- <div class="modal fade hide" id="chartsModal" tabindex="-1" role="dialog" aria-labelledby="myModalLabel" aria-hidden="true">
- <div class="modal-dialog">
- <div class="modal-content">
- <!--<div class="modal-header">
- <button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
- <h4 class="modal-title" id="myModalLabel">OpenCloud</h4>
- </div>-->
- <div class="modal-body" style="overflow-y:hidden; overflow-x:hidden;">
- <div class="chartContainer">
- <div class="row">
- <div class=" padding">
- </div>
- </div>
+ <div class="row">
+ <div class=" heading">
+ <p id="chartHeading" class="heading">OpenCloud</p>
+ </div>
+ </div>
+ <div class="row">
+ <div class="padding"></div>
+ <div class="padding"></div>
+ </div>
+ <div class="row">
+ <div id="graph" class="graph"></div>
+ </div>
+ </div>
+ <div id="graph_work" style="display:none"></div>
+ </div>
+ <!--<div class="modal-footer">
+ <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
+ </div>
+ -->
+ </div>
+ <!-- /.modal-content -->
+ </div>
+ <!-- /.modal-dialog -->
+ </div>
+ <!-- /.modal -->
- <div class="row">
- <div class=" heading">
- <p id="chartHeading" class="heading">OpenCloud</p>
- </div>
- </div>
- <div class="row">
- <div class="padding"></div>
- <div class="padding"></div>
- </div>
- <div class="row">
- <div id="graph" class="graph">
- </div>
- </div>
- </div>
- <div id="graph_work" style="display:none"></div>
- </div>
- <!--<div class="modal-footer">
- <button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
- </div>-->
- </div><!-- /.modal-content -->
- </div><!-- /.modal-dialog -->
- </div><!-- /.modal -->
-
-
-<script>
-
-
-
-
-</script>
+ <!-- Menu Toggle Script -->
+ <script>
+ $(".navbar-toggle").click(function(e) {
+ e.preventDefault();
+ $("#wrapper").toggleClass("toggled");
+ });
+ </script>
</body>
-</html>
+</html>
\ No newline at end of file
diff --git a/xos/templates/admin/dashboard/customize.html b/xos/templates/admin/dashboard/customize.html
index e9ef6d8..e74d6f0 100644
--- a/xos/templates/admin/dashboard/customize.html
+++ b/xos/templates/admin/dashboard/customize.html
@@ -9,8 +9,8 @@
</div>
</div>
<form>
- <div class="customize_row">
- <div class="customize_column">
+ <div class="row">
+ <div class="col-xs-4">
<div>Available Dashboard Views</div>
<select name="selectfrom" id="select-from" multiple size="5">
{% for cp in unusedDashboards %}
@@ -18,14 +18,14 @@
{% endfor %}
</select>
</div>
- <div class="customize_column">
+ <div class="col-xs-2">
<br>
<div class="btn btn-success" id="customize-btn-add">Add »</div>
<br>
<br>
<div class="btn btn-success" id="customize-btn-remove">« Remove</div>
</div>
- <div class="customize_column">
+ <div class="col-xs-4">
<div>Selected Dashboard Views</div>
<select name="selectto" id="select-to" multiple size="5">
{% for cp in dashboards %}
@@ -36,7 +36,7 @@
<div class="btn btn-high btn-info" id="customize-btn-save">Save</div>
<div style="display: none" id="customize-msg-saving">Saving...</div>
</div>
- <div class="customize_column">
+ <div class="col-xs-2">
<br>
<div class="btn btn-success" id="customize-btn-up">Up</div>
<br>
diff --git a/xos/templates/admin/login.html b/xos/templates/admin/login.html
index da44628..5b7ec4d 100644
--- a/xos/templates/admin/login.html
+++ b/xos/templates/admin/login.html
@@ -2,7 +2,6 @@
{% load i18n admin_static %}
{% block extrastyle %}{{ block.super }}
-<link rel="stylesheet" type="text/css" href="/static/suit/bootstrap/css/bootstrap.min.css" media="all"/>
<link rel="stylesheet" type="text/css" href="{% static "xos.css" %}" />
{% if XOS_BRANDING_CSS %}
<link rel="stylesheet" type="text/css" href="{{ XOS_BRANDING_CSS }}">
diff --git a/xos/tosca/custom_types/xos.m4 b/xos/tosca/custom_types/xos.m4
index a661af7..4f726fc 100644
--- a/xos/tosca/custom_types/xos.m4
+++ b/xos/tosca/custom_types/xos.m4
@@ -47,6 +47,10 @@
type: string
required: false
description: Public key to install into Instances to allows Services to SSH into them.
+ private_key_fn:
+ type: string
+ required: false
+ description: Location of private key file
versionNumber:
type: string
required: false
@@ -136,6 +140,9 @@
dependencies:
type: string
required: false
+ config_network-cfg.json:
+ type: string
+ required: false
tosca.nodes.VCPEService:
description: >
@@ -233,10 +240,14 @@
specific vlan_id.
properties:
xos_base_tenant_props
- vlan_id:
+ s_tag:
type: string
required: false
- description: vlan_id for connection to subscriber household.
+ description: s_tag, identifies which volt port
+ c_tag:
+ type: string
+ required: false
+ description: c_tag, identifies which subscriber within s_tag
tosca.nodes.User:
derived_from: tosca.nodes.Root
@@ -285,6 +296,17 @@
required: false
description: Indicates what page the user should go to on login.
+ tosca.nodes.NetworkParameterType:
+ derived_from: tosca.nodes.Root
+
+ description: >
+ An XOS network parameter type. May be applied to Networks and/or
+ Ports.
+
+ capabilities:
+ network_parameter_type:
+ type: tosca.capabilities.xos.NetworkParameterType
+
tosca.nodes.NetworkTemplate:
derived_from: tosca.nodes.Root
@@ -440,6 +462,10 @@
image:
type: tosca.capabilities.xos.Image
properties:
+ kind:
+ type: string
+ required: false
+ description: Type of image (container | VM)
disk_format:
type: string
required: false
@@ -562,6 +588,10 @@
type: integer
default: 10
description: Quota of instances that this slice may create.
+ default_isolation:
+ type: string
+ required: false
+ description: default isolation to use when bringing up instances (default to 'vm')
tosca.nodes.Node:
derived_from: tosca.nodes.Root
@@ -591,6 +621,31 @@
required: false
description: URL to the dashboard
+ tosca.nodes.Compute.Container:
+ derived_from: tosca.nodes.Compute
+ description: >
+ The TOSCA Compute node represents a container on bare metal.
+ attributes:
+ private_address:
+ type: string
+ public_address:
+ type: string
+ capabilities:
+ host:
+ type: tosca.capabilities.Container
+ binding:
+ type: tosca.capabilities.network.Bindable
+ os:
+ type: tosca.capabilities.OperatingSystem
+ scalable:
+ type: tosca.capabilities.Scalable
+ requirements:
+ - local_storage:
+ capability: tosca.capabilities.Attachment
+ node: tosca.nodes.BlockStorage
+ relationship: tosca.relationships.AttachesTo
+ occurrences: [0, UNBOUNDED]
+
tosca.relationships.MemberOfSlice:
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Slice ]
@@ -631,6 +686,10 @@
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Network ]
+ tosca.relationships.UsesImage:
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.xos.Image ]
+
tosca.relationships.SupportsImage:
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Image ]
@@ -726,3 +785,7 @@
tosca.capabilities.xos.DashboardView:
derived_from: tosca.capabilities.Root
description: An XOS DashboardView
+
+ tosca.capabilities.xos.NetworkParameterType:
+ derived_from: tosca.capabilities.Root
+ description: An XOS NetworkParameterType
diff --git a/xos/tosca/custom_types/xos.yaml b/xos/tosca/custom_types/xos.yaml
index 9170ecf..246c922 100644
--- a/xos/tosca/custom_types/xos.yaml
+++ b/xos/tosca/custom_types/xos.yaml
@@ -52,6 +52,10 @@
type: string
required: false
description: Public key to install into Instances to allows Services to SSH into them.
+ private_key_fn:
+ type: string
+ required: false
+ description: Location of private key file
versionNumber:
type: string
required: false
@@ -90,6 +94,10 @@
type: string
required: false
description: Public key to install into Instances to allows Services to SSH into them.
+ private_key_fn:
+ type: string
+ required: false
+ description: Location of private key file
versionNumber:
type: string
required: false
@@ -154,6 +162,9 @@
dependencies:
type: string
required: false
+ config_network-cfg.json:
+ type: string
+ required: false
tosca.nodes.VCPEService:
description: >
@@ -188,6 +199,10 @@
type: string
required: false
description: Public key to install into Instances to allows Services to SSH into them.
+ private_key_fn:
+ type: string
+ required: false
+ description: Location of private key file
versionNumber:
type: string
required: false
@@ -230,6 +245,10 @@
type: string
required: false
description: Public key to install into Instances to allows Services to SSH into them.
+ private_key_fn:
+ type: string
+ required: false
+ description: Location of private key file
versionNumber:
type: string
required: false
@@ -272,6 +291,10 @@
type: string
required: false
description: Public key to install into Instances to allows Services to SSH into them.
+ private_key_fn:
+ type: string
+ required: false
+ description: Location of private key file
versionNumber:
type: string
required: false
@@ -361,10 +384,14 @@
type: string
required: false
description: Service specific ID opaque to XOS but meaningful to service
- vlan_id:
+ s_tag:
type: string
required: false
- description: vlan_id for connection to subscriber household.
+ description: s_tag, identifies which volt port
+ c_tag:
+ type: string
+ required: false
+ description: c_tag, identifies which subscriber within s_tag
tosca.nodes.User:
derived_from: tosca.nodes.Root
@@ -413,6 +440,17 @@
required: false
description: Indicates what page the user should go to on login.
+ tosca.nodes.NetworkParameterType:
+ derived_from: tosca.nodes.Root
+
+ description: >
+ An XOS network parameter type. May be applied to Networks and/or
+ Ports.
+
+ capabilities:
+ network_parameter_type:
+ type: tosca.capabilities.xos.NetworkParameterType
+
tosca.nodes.NetworkTemplate:
derived_from: tosca.nodes.Root
@@ -579,6 +617,10 @@
image:
type: tosca.capabilities.xos.Image
properties:
+ kind:
+ type: string
+ required: false
+ description: Type of image (container | VM)
disk_format:
type: string
required: false
@@ -734,6 +776,10 @@
type: integer
default: 10
description: Quota of instances that this slice may create.
+ default_isolation:
+ type: string
+ required: false
+ description: default isolation to use when bringing up instances (default to 'vm')
tosca.nodes.Node:
derived_from: tosca.nodes.Root
@@ -785,6 +831,31 @@
required: false
description: URL to the dashboard
+ tosca.nodes.Compute.Container:
+ derived_from: tosca.nodes.Compute
+ description: >
+ The TOSCA Compute node represents a container on bare metal.
+ attributes:
+ private_address:
+ type: string
+ public_address:
+ type: string
+ capabilities:
+ host:
+ type: tosca.capabilities.Container
+ binding:
+ type: tosca.capabilities.network.Bindable
+ os:
+ type: tosca.capabilities.OperatingSystem
+ scalable:
+ type: tosca.capabilities.Scalable
+ requirements:
+ - local_storage:
+ capability: tosca.capabilities.Attachment
+ node: tosca.nodes.BlockStorage
+ relationship: tosca.relationships.AttachesTo
+ occurrences: [0, UNBOUNDED]
+
tosca.relationships.MemberOfSlice:
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Slice ]
@@ -825,6 +896,10 @@
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Network ]
+ tosca.relationships.UsesImage:
+ derived_from: tosca.relationships.Root
+ valid_target_types: [ tosca.capabilities.xos.Image ]
+
tosca.relationships.SupportsImage:
derived_from: tosca.relationships.Root
valid_target_types: [ tosca.capabilities.xos.Image ]
@@ -920,3 +995,7 @@
tosca.capabilities.xos.DashboardView:
derived_from: tosca.capabilities.Root
description: An XOS DashboardView
+
+ tosca.capabilities.xos.NetworkParameterType:
+ derived_from: tosca.capabilities.Root
+ description: An XOS NetworkParameterType
diff --git a/xos/tosca/resources/VOLTTenant.py b/xos/tosca/resources/VOLTTenant.py
index f00b515..20c4dfb 100644
--- a/xos/tosca/resources/VOLTTenant.py
+++ b/xos/tosca/resources/VOLTTenant.py
@@ -7,14 +7,14 @@
import pdb
from core.models import User
-from cord.models import VOLTTenant, VOLTService, CordSubscriberRoot
+from cord.models import VOLTTenant, VOLTService, CordSubscriberRoot, VOLT_KIND
from xosresource import XOSResource
class XOSVOLTTenant(XOSResource):
provides = "tosca.nodes.VOLTTenant"
xos_model = VOLTTenant
- copyin_props = ["service_specific_id", "vlan_id"]
+ copyin_props = ["service_specific_id", "s_tag", "c_tag"]
name_field = None
def get_xos_args(self, throw_exception=True):
@@ -32,10 +32,12 @@
def get_existing_objs(self):
args = self.get_xos_args(throw_exception=False)
- provider_service = args.get("provider", None)
+ provider_service = args.get("provider_service", None)
service_specific_id = args.get("service_specific_id", None)
if (provider_service) and (service_specific_id):
- return [ self.get_xos_object(provider_service=provider_service, service_specific_id=service_specific_id) ]
+ existing_obj = self.get_xos_object(VOLTTenant, kind=VOLT_KIND, provider_service=provider_service, service_specific_id=service_specific_id, throw_exception=False)
+ if existing_obj:
+ return [ existing_obj ]
return []
def postprocess(self, obj):
diff --git a/xos/tosca/resources/compute.py b/xos/tosca/resources/compute.py
index f01a401..37ba390 100644
--- a/xos/tosca/resources/compute.py
+++ b/xos/tosca/resources/compute.py
@@ -13,7 +13,7 @@
from xosresource import XOSResource
class XOSCompute(XOSResource):
- provides = "tosca.nodes.Compute"
+ provides = ["tosca.nodes.Compute", "tosca.nodes.Compute.Container"]
xos_model = Instance
def select_compute_node(self, user, v, hostname=None):
@@ -60,11 +60,15 @@
colocate_host = colocate_instances[0].node.name
self.info("colocating on %s" % colocate_host)
+ imageName = self.get_requirement("tosca.relationships.UsesImage", throw_exception=False)
+ if imageName:
+ image = self.get_xos_object(Image, name=imageName)
+
capabilities = nodetemplate.get_capabilities()
for (k,v) in capabilities.items():
- if (k=="host"):
+ if (k=="host") and (not host):
(compute_node, flavor) = self.select_compute_node(self.user, v, hostname=colocate_host)
- elif (k=="os"):
+ elif (k=="os") and (not image):
image = self.select_image(self.user, v)
if not compute_node:
@@ -80,6 +84,9 @@
args["node"] = compute_node
args["deployment"] = compute_node.site_deployment.deployment
+ if nodetemplate.type == "tosca.nodes.Compute.Container":
+ args["isolation"] = "container"
+
return args
def create(self, name = None, index = None):
@@ -120,3 +127,4 @@
else:
return super(XOSCompute,self).get_existing_objs()
+
diff --git a/xos/tosca/resources/image.py b/xos/tosca/resources/image.py
index bdc66b6..938c5cd 100644
--- a/xos/tosca/resources/image.py
+++ b/xos/tosca/resources/image.py
@@ -15,7 +15,7 @@
class XOSImage(XOSResource):
provides = "tosca.nodes.Image"
xos_model = Image
- copyin_props = ["disk_format", "container_format", "path"]
+ copyin_props = ["disk_format", "container_format", "path", "kind"]
def get_xos_args(self):
args = super(XOSImage, self).get_xos_args()
diff --git a/xos/tosca/resources/networkparametertype.py b/xos/tosca/resources/networkparametertype.py
new file mode 100644
index 0000000..e0cc93e
--- /dev/null
+++ b/xos/tosca/resources/networkparametertype.py
@@ -0,0 +1,38 @@
+import os
+import pdb
+import sys
+import tempfile
+sys.path.append("/opt/tosca")
+from translator.toscalib.tosca_template import ToscaTemplate
+
+from core.models import Slice,User,Network,NetworkParameterType
+
+from xosresource import XOSResource
+
+class XOSNetworkParameterType(XOSResource):
+ provides = "tosca.nodes.NetworkParameterType"
+ xos_model = NetworkParameterType
+ copyin_props = []
+
+ def get_xos_args(self):
+ args = super(XOSNetworkParameterType, self).get_xos_args()
+
+ return args
+
+ def create(self):
+ xos_args = self.get_xos_args()
+
+ networkParameterType = NetworkParameterType(**xos_args)
+ networkParameterType.caller = self.user
+ networkParameterType.save()
+
+ self.info("Created NetworkParameterType '%s' " % (str(networkParameterType), ))
+
+ def delete(self, obj):
+ if obj.networkparameters.exists():
+ return
+
+ super(XOSNetworkParameterType, self).delete(obj)
+
+
+
diff --git a/xos/tosca/resources/onosapp.py b/xos/tosca/resources/onosapp.py
index 111cf9a..648bb09 100644
--- a/xos/tosca/resources/onosapp.py
+++ b/xos/tosca/resources/onosapp.py
@@ -43,9 +43,9 @@
if attrs:
attr = attrs[0]
if attr.value != value:
- self.info("updating attribute %s" % k)
- attrs.value = value
- attrs.save()
+ self.info("updating attribute %s" % prop_name)
+ attr.value = value
+ attr.save()
else:
self.info("adding attribute %s" % prop_name)
ta = TenantAttribute(tenant=obj, name=prop_name, value=value)
diff --git a/xos/tosca/resources/service.py b/xos/tosca/resources/service.py
index 884c6db..247be08 100644
--- a/xos/tosca/resources/service.py
+++ b/xos/tosca/resources/service.py
@@ -13,7 +13,7 @@
class XOSService(XOSResource):
provides = "tosca.nodes.Service"
xos_model = Service
- copyin_props = ["view_url", "icon_url", "kind", "enabled", "published", "public_key", "versionNumber"]
+ copyin_props = ["view_url", "icon_url", "kind", "enabled", "published", "public_key", "private_key_fn", "versionNumber"]
def postprocess(self, obj):
for provider_service_name in self.get_requirements("tosca.relationships.TenantOfService"):
diff --git a/xos/tosca/resources/slice.py b/xos/tosca/resources/slice.py
index 2c02365..e37bfc8 100644
--- a/xos/tosca/resources/slice.py
+++ b/xos/tosca/resources/slice.py
@@ -12,7 +12,7 @@
class XOSSlice(XOSResource):
provides = "tosca.nodes.Slice"
xos_model = Slice
- copyin_props = ["enabled", "description", "slice_url", "max_instances"]
+ copyin_props = ["enabled", "description", "slice_url", "max_instances", "default_isolation"]
def get_xos_args(self):
args = super(XOSSlice, self).get_xos_args()
diff --git a/xos/tosca/resources/vcpeservice.py b/xos/tosca/resources/vcpeservice.py
index 6cc7390..8df7231 100644
--- a/xos/tosca/resources/vcpeservice.py
+++ b/xos/tosca/resources/vcpeservice.py
@@ -12,5 +12,5 @@
class XOSVcpeService(XOSService):
provides = "tosca.nodes.VCPEService"
xos_model = VCPEService
- copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "versionNumber", "backend_network_label"]
+ copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key", "private_key_fn", "versionNumber", "backend_network_label"]
diff --git a/xos/tosca/resources/xosresource.py b/xos/tosca/resources/xosresource.py
index 3553ab1..9c4f479 100644
--- a/xos/tosca/resources/xosresource.py
+++ b/xos/tosca/resources/xosresource.py
@@ -77,9 +77,6 @@
def get_existing_objs(self):
return self.xos_model.objects.filter(**{self.name_field: self.nodetemplate.name})
- def get_xos_args(self):
- return {}
-
def get_model_class_name(self):
return self.xos_model.__name__
diff --git a/xos/tosca/samples/container.yaml b/xos/tosca/samples/container.yaml
new file mode 100644
index 0000000..bd69fbe
--- /dev/null
+++ b/xos/tosca/samples/container.yaml
@@ -0,0 +1,42 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Template for deploying a single server with predefined properties.
+
+imports:
+ - custom_types/xos.yaml
+
+topology_template:
+ node_templates:
+ mysite:
+ type: tosca.nodes.Site
+
+ mysite_contest:
+ type: tosca.nodes.Slice
+ requirements:
+ - slice:
+ node: mysite
+ relationship: tosca.relationships.MemberOfSite
+
+ andybavier/docker-vcpe:
+ type: tosca.nodes.Image
+ properties:
+ kind: container
+ container_format: na
+ disk_format: na
+
+ my_container:
+ type: tosca.nodes.Compute.Container
+ capabilities:
+ # Host container properties
+ host:
+ properties:
+ num_cpus: 1
+ disk_size: 10 GB
+ mem_size: 4 MB
+ requirements:
+ - slice:
+ node: mysite_contest
+ relationship: tosca.relationships.MemberOfSlice
+ - image:
+ node: andybavier/docker-vcpe
+ relationship: tosca.relationships.UsesImage
diff --git a/xos/tosca/samples/container_slice.yaml b/xos/tosca/samples/container_slice.yaml
new file mode 100644
index 0000000..520bec0
--- /dev/null
+++ b/xos/tosca/samples/container_slice.yaml
@@ -0,0 +1,24 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+ * Create a new deployment, controller, and site.
+ * Add a SiteDeployment from the site to the deployment using the controller.
+ * Create a Slice in the Site, with one Instance
+
+imports:
+ - custom_types/xos.yaml
+
+topology_template:
+ node_templates:
+ mysite:
+ type: tosca.nodes.Site
+
+ mysite_containers:
+ type: tosca.nodes.Slice
+ requirements:
+ - slice:
+ node: mysite
+ relationship: tosca.relationships.MemberOfSite
+ properties:
+ default_isolation: container
+
diff --git a/xos/tosca/samples/cord.yaml b/xos/tosca/samples/cord.yaml
index 477be2f..567ced0 100644
--- a/xos/tosca/samples/cord.yaml
+++ b/xos/tosca/samples/cord.yaml
@@ -125,7 +125,8 @@
type: tosca.nodes.VOLTTenant
properties:
service_specific_id: 1234
- vlan_id: 4321
+ s_tag: 222
+ c_tag: 432
requirements:
- provider_service:
node: service_volt
diff --git a/xos/tosca/tests/allObserverTests.py b/xos/tosca/tests/allObserverTests.py
index d06daba..6a566a9 100644
--- a/xos/tosca/tests/allObserverTests.py
+++ b/xos/tosca/tests/allObserverTests.py
@@ -1,11 +1,13 @@
-from observerComputeTest import ObserverComputeTest
+from observerVMTest import ObserverVMTest
+from observerContainerTest import ObserverContainerTest
from observerImageTest import ObserverImageTest
from observerUserTest import ObserverUserTest
from observerSiteTest import ObserverSiteTest
from observerSliceTest import ObserverSliceTest
if __name__ == "__main__":
- ObserverComputeTest()
+ ObserverVMTest()
+ ObserverContainerTest()
ObserverImageTest()
ObserverSiteTest()
ObserverUserTest()
diff --git a/xos/tosca/tests/basetest.py b/xos/tosca/tests/basetest.py
index f7f04eb..d9701d7 100644
--- a/xos/tosca/tests/basetest.py
+++ b/xos/tosca/tests/basetest.py
@@ -67,15 +67,23 @@
return yml
- def make_compute(self, slice, name, caps={}, props={}, reqs=[], num_cpus="1", disk_size="10 GB", mem_size="4 MB"):
+ def make_compute(self, slice, name, caps={}, props={}, reqs=[], num_cpus="1", disk_size="10 GB", mem_size="4 MB", isolation="vm"):
reqs = reqs[:]
+ props = props.copy()
caps = caps.copy()
+ if isolation=="container":
+ type = "tosca.nodes.Compute.Container"
+ elif isolation=="container_vm":
+ type = "tosca.nodes.Compute.ContainerVM"
+ else:
+ type = "tosca.nodes.Compute"
+
caps.update( {"host": {"num_cpus": num_cpus, "disk_size": disk_size, "mem_size": mem_size},
"os": {"architecture": "x86_64", "type": "linux", "distribution": "rhel", "version": "6.5"}} )
reqs.append( (slice, "tosca.relationships.MemberOfSlice") )
- return self.make_nodetemplate(name, "tosca.nodes.Compute",
+ return self.make_nodetemplate(name, type,
caps= caps,
props = props,
reqs= reqs)
diff --git a/xos/tosca/tests/observerContainerTest.py b/xos/tosca/tests/observerContainerTest.py
new file mode 100644
index 0000000..a31b866
--- /dev/null
+++ b/xos/tosca/tests/observerContainerTest.py
@@ -0,0 +1,95 @@
+from observertest import BaseObserverToscaTest
+
+from core.models import Instance, Site
+
+# Note that as a side effect, these tests will also create a Site
+
+class ObserverContainerTest(BaseObserverToscaTest):
+ tests = ["create_container"]
+ # hide_observer_output = False # uncomment to display lots of stuff to screen
+
+ def cleanup(self):
+ # We don't want to leak resources, so we make sure to let the observer
+ # attempt to delete these objects.
+ self.try_to_delete(Instance, purge=False, name="test_compute1")
+ self.try_to_delete(Site, purge=False, name="testsite")
+ self.run_observer()
+ # The site objects don't seem to go away nicely, they linger about and
+ # cause an IntegrityError due to a duplicate login_base
+ self.try_to_delete(Site, purge=True, name="testsite")
+
+ def get_base_templates(self):
+ return self.make_nodetemplate("testsite", "tosca.nodes.Site") + \
+ self.make_nodetemplate("testsite_slice1", "tosca.nodes.Slice", reqs=[("testsite", "tosca.relationships.MemberOfSite")]) + \
+ self.make_nodetemplate("andybavier/docker-vcpe", "tosca.nodes.Image", props={"kind": "container", "container_format": "na", "disk_format": "na"})
+
+ def create_container(self):
+ self.assert_noobj(Instance, "test_compute1")
+ self.execute(self.get_base_templates() +
+ self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="513 MB", isolation="container",
+ reqs=[("andybavier/docker-vcpe", "tosca.relationships.UsesImage")],
+ ))
+ instance = self.assert_obj(Instance, "test_compute1")
+ assert(instance.flavor.name == "m1.small")
+
+ # first pass makes the Networks
+ self.run_model_policy(save_output="/tmp/instancetest:create_container:model_policy_first")
+
+ # XXX deal with bug where
+ instance = self.assert_obj(Instance, "test_compute1")
+ instance.save()
+
+ # second pass makes the NetworkControllers
+ self.run_model_policy(save_output="/tmp/instancetest:create_container:model_policy_second")
+
+ # first observer pass should make any necessary networks or ports
+ self.run_observer(save_output="/tmp/instancetest:create_container:observer_first")
+
+ # reset the exponential backoff
+ instance = self.assert_obj(Instance, "test_compute1")
+ instance.backend_register="{}"
+ instance.save()
+
+ # we need to reset the companion instance's exponential backoff too
+ companion_instance = Instance.objects.filter(slice=instance.slice, isolation="vm")
+ assert(companion_instance)
+ companion_instance = companion_instance[0]
+ companion_instance.backend_register="{}"
+ companion_instance.save()
+
+ # third pass reset lazy_blocked
+ self.run_model_policy(save_output="/tmp/instancetest:create_container:model_policy_third")
+
+ # second observer pass should instantiate the controller networks
+ # (might instantiate the instance, too)
+ self.run_observer(save_output="/tmp/instancetest:create_container:observer_second")
+
+ # reset the exponential backoff
+ instance = self.assert_obj(Instance, "test_compute1")
+ instance.backend_register="{}"
+ instance.save()
+
+ # we need to reset the companion instance's exponential backoff too
+ companion_instance = Instance.objects.filter(slice=instance.slice, isolation="vm")
+ assert(companion_instance)
+ companion_instance = companion_instance[0]
+ companion_instance.backend_register="{}"
+ companion_instance.save()
+
+ # third observer pass should instantiate the companion instance
+ self.run_observer(save_output="/tmp/instancetest:create_container:observer_third")
+
+ # third observer pass should instantiate the instance
+ self.run_observer(save_output="/tmp/instancetest:create_container:observer_fourth")
+
+ instance = self.assert_obj(Instance, "test_compute1")
+
+ assert(instance.instance_id is not None)
+ assert(instance.instance_name is not None)
+
+ # there should be one port on the private network
+ assert(instance.ports.count() == 1)
+
+if __name__ == "__main__":
+ ObserverContainerTest()
+
diff --git a/xos/tosca/tests/observerComputeTest.py b/xos/tosca/tests/observerVMTest.py
similarity index 86%
rename from xos/tosca/tests/observerComputeTest.py
rename to xos/tosca/tests/observerVMTest.py
index 972b62c..65cbde5 100644
--- a/xos/tosca/tests/observerComputeTest.py
+++ b/xos/tosca/tests/observerVMTest.py
@@ -4,8 +4,8 @@
# Note that as a side effect, these tests will also create a Site
-class ObserverComputeTest(BaseObserverToscaTest):
- tests = ["create_instance"]
+class ObserverVMTest(BaseObserverToscaTest):
+ tests = ["create_vm"]
# hide_observer_output = False # uncomment to display lots of stuff to screen
def cleanup(self):
@@ -22,7 +22,7 @@
return self.make_nodetemplate("testsite", "tosca.nodes.Site") + \
self.make_nodetemplate("testsite_slice1", "tosca.nodes.Slice", reqs=[("testsite", "tosca.relationships.MemberOfSite")])
- def create_instance(self):
+ def create_vm(self):
self.assert_noobj(Instance, "test_compute1")
self.execute(self.get_base_templates() +
self.make_compute("testsite_slice1", "test_compute1", disk_size="1 GB", mem_size="513 MB"))
@@ -30,13 +30,13 @@
assert(instance.flavor.name == "m1.small")
# first pass makes the Networks
- self.run_model_policy(save_output="/tmp/instancetest:create_instance:model_policy_first")
+ self.run_model_policy(save_output="/tmp/instancetest:create_vm:model_policy_first")
# second pass makes the NetworkControllers
- self.run_model_policy(save_output="/tmp/instancetest:create_instance:model_policy_second")
+ self.run_model_policy(save_output="/tmp/instancetest:create_vm:model_policy_second")
# first observer pass should make any necessary networks or ports
- self.run_observer(save_output="/tmp/instancetest:create_instance:observer_first")
+ self.run_observer(save_output="/tmp/instancetest:create_vm:observer_first")
# reset the exponential backoff
instance = self.assert_obj(Instance, "test_compute1")
@@ -44,11 +44,11 @@
instance.save()
# third pass reset lazy_blocked
- self.run_model_policy(save_output="/tmp/instancetest:create_instance:model_policy_third")
+ self.run_model_policy(save_output="/tmp/instancetest:create_vm:model_policy_third")
# second observer pass should instantiate the controller networks
# (might instantiate the instance, too)
- self.run_observer(save_output="/tmp/instancetest:create_instance:observer_second")
+ self.run_observer(save_output="/tmp/instancetest:create_vm:observer_second")
# reset the exponential backoff
instance = self.assert_obj(Instance, "test_compute1")
@@ -56,13 +56,16 @@
instance.save()
# third observer pass should instantiate the instance
- self.run_observer(save_output="/tmp/instancetest:create_instance:observer_third")
+ self.run_observer(save_output="/tmp/instancetest:create_vm:observer_third")
instance = self.assert_obj(Instance, "test_compute1")
assert(instance.instance_id is not None)
assert(instance.instance_name is not None)
+ # there should be a port on the private network and a port on nat-net
+ assert(instance.ports.count() == 2)
+
if __name__ == "__main__":
- ObserverComputeTest()
+ ObserverVMTest()
diff --git a/xos/xos/apps.py b/xos/xos/apps.py
new file mode 100644
index 0000000..3462990
--- /dev/null
+++ b/xos/xos/apps.py
@@ -0,0 +1,13 @@
+from suit.apps import DjangoSuitConfig
+
+class MyDjangoSuitConfig(DjangoSuitConfig):
+ admin_name = 'XOS'
+ menu_position = 'vertical'
+ menu_open_first_child = False
+ menu = (
+ {'label': 'Deployments', 'icon':'icon-deployment', 'url': '/admin/core/deployment/'},
+ {'label': 'Sites', 'icon':'icon-site', 'url': '/admin/core/site/'},
+ {'label': 'Slices', 'icon':'icon-slice', 'url': '/admin/core/slice/'},
+ {'label': 'Users', 'icon':'icon-user', 'url': '/admin/core/user/'},
+ {'label': 'Services', 'icon':'icon-cog', 'url': '/serviceGrid/'},
+ )
\ No newline at end of file
diff --git a/xos/xos/settings.py b/xos/xos/settings.py
index 3e64d15..5942d2c 100644
--- a/xos/xos/settings.py
+++ b/xos/xos/settings.py
@@ -143,14 +143,14 @@
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
- 'suit',
+ # 'suit',
+ 'xos.apps.MyDjangoSuitConfig',
'admin_customize',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'rest_framework',
'django_extensions',
- 'django_evolution',
'core',
'hpc',
'cord',
@@ -165,11 +165,9 @@
)
if DJANGO_VERSION[1]>=7:
- # if django >= 1.7, then remove evolution and change the admin module
+ # if django >= 1.7, then change the admin module
INSTALLED_APPS = list(INSTALLED_APPS)
INSTALLED_APPS[INSTALLED_APPS.index('django.contrib.admin')] = 'django.contrib.admin.apps.SimpleAdminConfig'
- INSTALLED_APPS.remove('django_evolution')
- INSTALLED_APPS = tuple(INSTALLED_APPS)
# Added for django-suit form
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
@@ -177,68 +175,6 @@
'core.context_processors.xos',
)
-# Django Suit configuration example
-SUIT_CONFIG = {
- # header
- 'ADMIN_NAME': getattr(config, "gui_branding_name", "OpenCloud"),
- # 'HEADER_DATE_FORMAT': 'l, j. F Y',
- # 'HEADER_TIME_FORMAT': 'H:i',
-
- # forms
- #'SHOW_REQUIRED_ASTERISK': True, # Default True
- 'CONFIRM_UNSAVED_CHANGES': True, # Default True
-
- # menu
- # 'SEARCH_URL': '/admin/auth/user/',
- # 'MENU_ICONS': {
- # 'sites': 'icon-leaf',
- # 'auth': 'icon-lock',
- # },
- # 'MENU_OPEN_FIRST_CHILD': True, # Default True
- 'MENU_EXCLUDE': (
- 'auth.group',
- 'auth',
- 'core.network',
- 'core.instance',
- 'core.node',
- 'core.image',
- 'core.deploymentrole',
- 'core.siterole',
- 'core.slicerole',
- 'core.networktemplate',
- 'core.networkparametertype',
- 'core.router',
- 'core.tag',
- 'core.account',
- 'core.invoice',
- 'core.serviceclass',
- ),
- 'MENU': (
- #{'app': 'core', 'icon':'icon-lock'},
- #{'app': 'core', 'icon': 'icon-lock', 'models': ('core.site', 'core.deployment', 'core.service', 'core.slice', 'core.user', 'core.reservation', 'core.account', 'core.invoice', 'core.payment', 'core.usableobject')},
- {'label': 'Deployments', 'icon':'icon-deployment', 'url': '/admin/core/deployment/'},
- {'label': 'Sites', 'icon':'icon-site', 'url': '/admin/core/site/'},
- {'label': 'Slices', 'icon':'icon-slice', 'url': '/admin/core/slice/'},
- {'label': 'Users', 'icon':'icon-user', 'url': '/admin/core/user/'},
- {'label': 'Services', 'icon':'icon-cog', 'url': '/serviceGrid/'},
-# {'label': 'RequestRouter', 'icon':'icon-cog', 'app': 'requestrouter'},
-# {'label': 'HyperCache', 'icon':'icon-cog', 'app': 'hpc'},
-# {'label': 'Syndicate', 'icon':'icon-cog', 'app': 'syndicate_storage'},
-# {'label': 'URL Filter', 'icon': 'icon-cog', 'app': 'urlfilter'},
-# {'label': 'Service Comp', 'icon': 'icon-cog', 'app': 'servcomp'},
-
- #{'label': 'Configured Services', 'icon':'icon-cog', 'models': [{'label': 'Content Delivery Network', 'app':'hpc'}]},
- # 'sites',
- # {'app': 'auth', 'icon':'icon-lock', 'models': ('user', 'group')},
- # {'label': 'Support', 'icon':'icon-question-sign', 'url': '/support/'},
- # {'label': 'Settings', 'icon':'icon-cog', 'models': ('core.user', 'core.site')},
- # ),
- ),
-
- # misc
- # 'LIST_PER_PAGE': 15
-}
-
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.