Merge branch 'master' of github.com:open-cloud/xos
diff --git a/xos/configurations/cord-pod/cdn/README.md b/xos/configurations/cord-pod/cdn/README.md
new file mode 100644
index 0000000..a7730f9
--- /dev/null
+++ b/xos/configurations/cord-pod/cdn/README.md
@@ -0,0 +1,67 @@
+## Set up a new CDN
+
+### CDN on VTN - headnode
+
+1. nova flavor-create --is-public true m1.cdnnode auto 8192 110 4
+2. in XOS create flavor m1.cdnnode and add to deployment
+
+### CDN on VTN - CMI
+
+1. Make sure plenty of glance space on ctl node
+2. Make sure plenty of instance space on compute nodes
+3. Install cmi-0.3.img into XOS images/ directory
+4. Install CentOS-6-cdnnode-0.3.img into XOS images/ directory
+5. Wait for these two images to be loaded into glance (check glance image-list for status)
+6. XOS UI: Add cmi and CentOS images to MyDeployment
+7. Run recipe xos/configurations/cord-pod/pod-cdn.yaml
+ * this will create mysite_cdn slice, cdn-public network, and add management and cdn-public networks to slice
+8. Instantiate CMI instance in mysite_cdn
+ * flavor: m1.large
+ * image: cmi-0.3.img
+9. edit configurations/cord-pod/cdn/cmi-settings.sh
+ * update COMPUTE_NODE and MGMT_IP to match CMI instance
+ * update NODE_KEY to match ssh key for root @ the compute node
+ * do not change VM_KEY; the pubkey is baked into the instance
+10. edit configurations/cord-pod/cdn/cmi.yaml
+ * update gateway_ip and gateway_mac to reflect public internet gateway CMI will use
+11. copy the keygen and allkeys.template to the private/ directory
+12. copy cmi_id_rsa
+13. run setup-cmi.sh
+ * this will SSH into the CMI and run setup, then modify some settings.
+ * it may take a long time, 10-20 minutes or more
+ * takeover script will be saved to takeovers/. Takeover script will be used in the next phase.
+
+### CDN on VTN - cdnnode
+
+1. Instantiate cdnnode instance in mysite_cdn
+ * flavor: m1.cdnnode
+ * CenOS-6-cdnnode-0.3.img
+2. Log into compute node and Attach disk
+ * virsh attach-disk <instance_name> /dev/sdc vdc --cache none
+ * (make sure this disk wasn't used anywhere else!)
+3. log into cdnnode VM
+ * make sure default gateway is good (check public connectivity)
+ * make sure arp table is good
+ * make sure CMI is reachable from cdnnode
+ * run takeover script that was created by the CMI
+ * (I suggest commenting out the final reboot -f, and make sure the rest of it worked right before rebooting)
+ * Node will take a long time to install
+4. log into cdnnode
+ * to SSH into cdnnode, go into CMI, vserver coplc, cd /etc/planetlab, and use debug_ssh_key.rsa w/ root user
+ * check default gateway
+ * fix arp entry for default gateway
+
+### CDN on VTN - cmi part 2
+
+1. run setup-logicalinterfaces.sh
+
+### Test Commands
+
+* First, make sure the vSG is the only DNS server available in the test client.
+* Second, make sure cdn_enable bit is set in CordSubscriber object for your vSG.
+* curl -L -vvvv http://downloads.onosproject.org/vm/onos-tutorial-1.1.0r220-ovf.zip > /dev/null
+* curl -L -vvvv http://onlab.vicci.org/onos-videos/Nov-planning-day1/Day1+00+Bill+-+Community+Growth.mp4 > /dev/null
+
+## Restart CDN after power-down
+
+To do...
diff --git a/xos/synchronizers/vcpe/steps/sync_vcpetenant_vtn.yaml b/xos/synchronizers/vcpe/steps/sync_vcpetenant_vtn.yaml
index f29f04c..f042e5d 100644
--- a/xos/synchronizers/vcpe/steps/sync_vcpetenant_vtn.yaml
+++ b/xos/synchronizers/vcpe/steps/sync_vcpetenant_vtn.yaml
@@ -122,41 +122,60 @@
shell: touch /root/network_is_setup
{% if full_setup %}
+ - name: Check to see if environment is setup
+ stat: path=/root/environment_is_setup
+ register: environment_is_setup
+
- name: Docker repository
copy: src=/opt/xos/synchronizers/vcpe/files/docker.list
dest=/etc/apt/sources.list.d/docker.list
+ when: environment_is_setup.stat.exists == False
- name: Import the repository key
apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
+ when: environment_is_setup.stat.exists == False
+
+ - name: Update cache
+ apt: update_cache=yes
+ when: environment_is_setup.stat.exists == False
- name: install Docker
- apt: name=lxc-docker state=present update_cache=yes
+ apt: name=lxc-docker state=present
+ when: environment_is_setup.stat.exists == False
- name: install python-setuptools
apt: name=python-setuptools state=present
+ when: environment_is_setup.stat.exists == False
- name: install pip
easy_install: name=pip
+ when: environment_is_setup.stat.exists == False
- name: install docker-py
pip: name=docker-py version=0.5.3
+ when: environment_is_setup.stat.exists == False
- name: install Pipework
get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
dest=/usr/local/bin/pipework
mode=0755
+ when: environment_is_setup.stat.exists == False
- - name: make sure /etc/dnsmasq.d exists
- file: path=/etc/dnsmasq.d state=directory owner=root group=root
+ - name: Stop resolvconf service
+ service: name=resolvconf state=stopped
+ when: environment_is_setup.stat.exists == False
- name: Disable resolvconf service
- shell: service resolvconf stop
- shell: echo manual > /etc/init/resolvconf.override
- shell: rm -f /etc/resolv.conf
+ copy: dest=/etc/init/resolvconf.override content="manual"
+ when: environment_is_setup.stat.exists == False
- name: Install resolv.conf
copy: src=/opt/xos/synchronizers/vcpe/files/vm-resolv.conf
dest=/etc/resolv.conf
+ when: environment_is_setup.stat.exists == False
+
+ - name: Remember that the environment is setup, so we never do the above again
+ shell: touch /root/environment_is_setup
- name: Verify if vcpe_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
shell: pgrep -f [v]cpe_stats_notifier | wc -l
@@ -196,8 +215,8 @@
- remove container
- start vcpe
- - name: create /var/container_volumes/{{ container_name }}/etc/dnsmasq.d
- file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d state=directory owner=root group=root
+ - name: create /var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/
+ file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory owner=root group=root
- name: vCPE basic dnsmasq config
copy: src=/opt/xos/synchronizers/vcpe/files/vcpe.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/vcpe.conf owner=root group=root
@@ -209,28 +228,22 @@
notify:
- restart dnsmasq
- - name: create directory for "safe" config
- file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory
-
- name: dnsmasq "safe" config
template: src=/opt/xos/synchronizers/vcpe/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
notify:
- restart dnsmasq
- - name: copy base ufw files
- copy: src=/opt/xos/synchronizers/vcpe/files/etc/ufw/ dest=/var/container_volumes/{{ container_name }}/etc/ufw/
- notify:
- - reload ufw
+ - name: create /var/container_volumes/{{ container_name }}/mount/
+ file: path=/var/container_volumes/{{ container_name }}/mount state=directory owner=root group=root
- name: redirection rules for safe DNS
- template: src=/opt/xos/synchronizers/vcpe/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/etc/ufw/before.rules owner=root group=root mode=0644
+ template: src=/opt/xos/synchronizers/vcpe/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/mount/before.rules owner=root group=root mode=0644
notify:
- reload ufw
- name: base ufw setup uses /etc/rc.local
- template: src=/opt/xos/synchronizers/vcpe/templates/rc.local.j2 dest=/var/container_volumes/{{ container_name }}/etc/rc.local owner=root group=root mode=0755
+ template: src=/opt/xos/synchronizers/vcpe/templates/rc.local.j2 dest=/var/container_volumes/{{ container_name }}/mount/rc.local owner=root group=root mode=0755
notify:
- - copy in /etc/rc.local
- rerun /etc/rc.local
- name: create directory for local programs
@@ -241,6 +254,9 @@
notify:
- reset bwlimits
+ - name: create directory for simple webserver
+ file: path=/var/container_volumes/{{ container_name }}/etc/service/message state=directory
+
- name: copy simple webserver
copy: src=/opt/xos/synchronizers/vcpe/files/etc/service/ dest=/var/container_volumes/{{ container_name }}/etc/service/ owner=root group=root
when: status != "enabled"
@@ -252,24 +268,17 @@
- name: generate the message page
template: src=/opt/xos/synchronizers/vcpe/templates/message.html.j2 dest=/var/container_volumes/{{ container_name }}/etc/service/message/message.html owner=root group=root mode=0644
when: status != "enabled"
- notify: restart vcpe
+ #notify: restart vcpe
- name: remove simple webserver
file: path=/var/container_volumes/{{ container_name }}/etc/service/message/run state=absent
when: status == "enabled"
- notify: restart vcpe
+ #notify: restart vcpe
- name: Make sure vCPE service is running
service: name={{ container_name }} state=started
handlers:
- # Use docker cp instead of single-file volume
- # The reason is that changes to external file volume don't show up inside the container
- # Probably Ansible deletes and then recreates the external file, and container has old version
- # Do this handler first, e.g., before restarting the container
- - name: copy in /etc/rc.local
- shell: docker cp /var/container_volumes/{{ container_name }}/etc/rc.local {{ container_name }}:/etc/
-
# Dnsmasq is automatically restarted in the container
- name: restart dnsmasq
shell: docker exec {{ container_name }} killall dnsmasq
diff --git a/xos/synchronizers/vcpe/templates/start-vcpe-vtn.sh.j2 b/xos/synchronizers/vcpe/templates/start-vcpe-vtn.sh.j2
index 62c504e..dfdce0a 100644
--- a/xos/synchronizers/vcpe/templates/start-vcpe-vtn.sh.j2
+++ b/xos/synchronizers/vcpe/templates/start-vcpe-vtn.sh.j2
@@ -17,10 +17,10 @@
then
docker pull andybavier/docker-vcpe
docker run -d --name=$VCPE --privileged=true --net=none \
+ -v /var/container_volumes/$VCPE/mount:/mount:ro \
-v /var/container_volumes/$VCPE/etc/dnsmasq.d:/etc/dnsmasq.d:ro \
- -v /var/container_volumes/$VCPE/usr/local/sbin:/usr/local/sbin:ro \
- -v /var/container_volumes/$VCPE/etc/ufw:/etc/ufw \
-v /var/container_volumes/$VCPE/etc/service/message:/etc/service/message \
+ -v /var/container_volumes/$VCPE/usr/local/sbin:/usr/local/sbin:ro \
andybavier/docker-vcpe
else
docker start $VCPE