Merge branch 'master' of github.com:open-cloud/xos
diff --git a/xos/configurations/cord/cord.yaml b/xos/configurations/cord/cord.yaml
index 344cf40..46acde9 100644
--- a/xos/configurations/cord/cord.yaml
+++ b/xos/configurations/cord/cord.yaml
@@ -127,14 +127,15 @@
               node: service_volt
               relationship: tosca.relationships.UsedByService
       properties:
-          dependencies: org.onosproject.olt
+          dependencies: org.onosproject.openflow, org.onosproject.olt
           config_network-cfg.json: >
             {
               "devices" : {
                 "of:0000000000000001" : {
                   "accessDevice" : {
                     "uplink" : "2",
-                    "vlan"   : "400"
+                    "vlan"   : "222",
+                    "defaultVlan" : "1"
                   },
                   "basic" : {
                     "driver" : "default"
diff --git a/xos/configurations/cord/dataplane/dataplane.yaml b/xos/configurations/cord/dataplane/dataplane.yaml
index 4799515..026ec89 100644
--- a/xos/configurations/cord/dataplane/dataplane.yaml
+++ b/xos/configurations/cord/dataplane/dataplane.yaml
@@ -58,6 +58,7 @@
   vars:
     controller_ip: "{{ hostvars['onos_volt']['ansible_ssh_host'] }}"
     controller_port: 6653
+    vcpe_lan_ip: "{{ hostvars['vcpe']['lan_ip'] }}"
   tags:
   - volt
   tasks:
@@ -73,6 +74,7 @@
     with_items:
     - git
     - python-netifaces
+    - openvswitch-switch
 
   - name: Checkout the Mininet repo
     git: repo=https://github.com/mininet/mininet.git
@@ -87,14 +89,151 @@
     script: scripts/if_from_ip.py {{ subscriber_ip }}
     register: subscriber_net
 
-  - name: Find lan_network interface
-    script: scripts/if_from_ip.py {{ lan_ip }}
-    register: lan_net
+  - name: Create bridge br-sub
+    openvswitch_bridge:
+      bridge=br-sub
+      state=present
+
+  - name: Add subscriber_net to br-sub
+    openvswitch_port:
+      bridge=br-sub
+      port={{ subscriber_net.stdout }}
+      state=present
+
+  # The CPqD switch is expecting that packets coming from the client have
+  # VLAN tag 1.  However Neutron's OvS configuration eats VLAN-tagged packets.
+  # So tag them with VLAN 1 here before sending to CPqD.
+  #
+  # Note that the VLAN tag is 0 in the real-world setup, but the CPqD switch
+  # seems to have a problem with these packets.
+
+  # Using OvS to tag packets with VLAN ID 1 is not quite working for some reason.
+  # The packets from the client get tagged OK, but only the first packet from the
+  # VCPE gets its tag stripped off.  Very weird.  That's why we are using veth
+  # devices instead.
+  #- name: Add tag 1 to br-sub port
+  #  shell: ovs-vsctl set port {{ subscriber_net.stdout }} tag=1
+
+  - name: Create a pair of veth devices
+    shell: ifconfig veth0 >> /dev/null || ip link add veth0 type veth peer name veth1
+
+  - name: Create veth0.1
+    shell: ifconfig veth0.1 >> /dev/null || ip link add link veth0 name veth0.1 type vlan id 1
+
+  - name: Bring the interfaces up
+    shell: ip link set {{ item }} up
+    with_items:
+    - veth0
+    - veth1
+    - veth0.1
+
+  - name: Add veth0.1 to br-sub
+    openvswitch_port:
+      bridge=br-sub
+      port=veth0.1
+      state=present
+
+  - name: Create bridge br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port on br-lan
+    openvswitch_port:
+      bridge=br-lan
+      port=gre0
+      state=present
+
+  - name: Set up GRE tunnel to vCPE
+    shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ vcpe_lan_ip }}
+
+  - name: Check if br-lan has an IPv6 address
+    shell: ip addr show br-lan|grep inet6|awk '{print $2}'
+    register: ipv6
+
+  - name: Remove br-lan IPv6 address if present
+    shell: ifconfig br-lan inet6 del {{ ipv6.stdout }}
+    when: ipv6.stdout != ""
 
   - name: Run the datapath
-    command: /usr/local/bin/ofdatapath -i {{ subscriber_net.stdout_lines[0] }},{{ lan_net.stdout_lines[0] }} punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
+    command: /usr/local/bin/ofdatapath -i veth1,br-lan punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
       creates=/usr/local/var/run/ofdatapath.pid
 
   - name: Run the control program
     command: /usr/local/bin/ofprotocol unix:/tmp/s1 tcp:{{ controller_ip }}:{{ controller_port }} --fail=closed --listen=punix:/tmp/s1.listen -D -P
       creates=/usr/local/var/run/ofprotocol.pid
+
+- hosts: client
+  sudo: yes
+  tags:
+  - client
+  tasks:
+
+  - name: Fix /etc/hosts
+    lineinfile:
+      dest=/etc/hosts
+      regexp="127.0.0.1 localhost"
+      line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+    - python-netifaces
+
+  - name: Create br-sub
+    openvswitch_bridge:
+      bridge=br-sub
+      state=present
+
+  - name: Find subscriber_network interface
+    script: scripts/if_from_ip.py {{ subscriber_ip }}
+    register: client_net
+
+  - name: Hook up subscriber-network to OvS
+    openvswitch_port:
+      bridge=br-sub
+      port={{ client_net.stdout }}
+      state=present
+
+  # Run dhclient on br-sub internal interface to issue DHCP request to vCPE
+
+#
+# This play is just for testing.  The vCPE configuration below will be
+# integrated with the vCPE Synchronizer.
+#
+# Need to change the data model to store both s-tag and c-tag
+#
+- hosts: vcpe
+  sudo: yes
+  vars:
+    volt_lan_ip: "{{ hostvars['switch_volt']['lan_ip'] }}"
+  tags:
+  - vcpe
+  tasks:
+
+  - name: Install packages
+    apt: name={{ item }}
+      state=latest
+      update_cache=yes
+    with_items:
+    - openvswitch-switch
+
+  - name: Create br-lan
+    openvswitch_bridge:
+      bridge=br-lan
+      state=present
+
+  - name: Create tunnel port
+    openvswitch_port:
+      bridge=br-lan
+      port=gre0
+      state=present
+
+  - name: Configure GRE tunnel to vOLT switch
+    shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ volt_lan_ip }}
+
+  - name: Restart vCPEs
+    script: scripts/restart-vcpes.sh
diff --git a/xos/configurations/cord/dataplane/scripts/if_from_ip.py b/xos/configurations/cord/dataplane/scripts/if_from_ip.py
index be1da48..28524fe 100644
--- a/xos/configurations/cord/dataplane/scripts/if_from_ip.py
+++ b/xos/configurations/cord/dataplane/scripts/if_from_ip.py
@@ -8,7 +8,7 @@
     for iface in netifaces.interfaces():
         addrs = netifaces.ifaddresses(iface)
         if 2 in addrs and addrs[2][0]['addr'] == addr:
-            print iface
-    
+            sys.stdout.write(iface)
+
 if __name__ == "__main__":
     main(sys.argv[1:])
diff --git a/xos/configurations/cord/dataplane/scripts/restart-vcpes.sh b/xos/configurations/cord/dataplane/scripts/restart-vcpes.sh
new file mode 100644
index 0000000..d1c9fce
--- /dev/null
+++ b/xos/configurations/cord/dataplane/scripts/restart-vcpes.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+for VCPE in $( docker ps|grep vcpe|awk '{print $NF}' )
+do
+  service $VCPE stop
+  sleep 1
+  service $VCPE start
+done
diff --git a/xos/observers/vcpe/steps/sync_vcpetenant.yaml b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
index e4d3167..c3b7246 100644
--- a/xos/observers/vcpe/steps/sync_vcpetenant.yaml
+++ b/xos/observers/vcpe/steps/sync_vcpetenant.yaml
@@ -117,27 +117,27 @@
 {% endif %}
 
   - name: vCPE upstart
-    template: src=/opt/xos/observers/vcpe/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ vlan_ids[0] }}.conf
+    template: src=/opt/xos/observers/vcpe/templates/vcpe.conf.j2 dest=/etc/init/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.conf
 
   - name: vCPE startup script
-    template: src=/opt/xos/observers/vcpe/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ vlan_ids[0] }}.sh mode=0755
+    template: src=/opt/xos/observers/vcpe/templates/start-vcpe.sh.j2 dest=/usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh mode=0755
     notify:
 #    - restart vcpe
      - stop vcpe
      - remove container
      - start vcpe
 
-  - name: create /etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d
-    file: path=/etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d state=directory owner=root group=root
+  - name: create /etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d
+    file: path=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d state=directory owner=root group=root
 
   - name: vCPE basic dnsmasq config
-    copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
+    copy: src=/opt/xos/observers/vcpe/files/vcpe.dnsmasq dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/vcpe.conf owner=root group=root
     notify:
     - stop dnsmasq
     - start dnsmasq
 
   - name: dnsmasq config
-    template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ vlan_ids[0] }}/dnsmasq.d/servers.conf owner=root group=root
+    template: src=/opt/xos/observers/vcpe/templates/dnsmasq_servers.j2 dest=/etc/vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/servers.conf owner=root group=root
     notify:
     - stop dnsmasq
     - start dnsmasq
@@ -151,23 +151,23 @@
 #    template: src=/opt/xos/observers/vcpe/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
 
   - name: Make sure vCPE service is running
-    service: name=vcpe-{{ vlan_ids[0] }} state=started
+    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
 
   handlers:
   - name: stop dnsmasq
-    shell: docker exec vcpe-{{ vlan_ids[0] }} /usr/bin/killall dnsmasq
+    shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} /usr/bin/killall dnsmasq
 
   - name: start dnsmasq
-    shell: docker exec vcpe-{{ vlan_ids[0] }} /usr/sbin/service dnsmasq start
+    shell: docker exec vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} /usr/sbin/service dnsmasq start
 
   - name: restart vcpe
-    shell: service vcpe-{{ vlan_ids[0] }} stop; sleep 1; service vcpe-{{ vlan_ids[0] }} start
+    shell: service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} stop; sleep 1; service vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} start
 
   - name: stop vcpe
-    service: name=vcpe-{{ vlan_ids[0] }} state=stopped
+    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=stopped
 
   - name: remove container
-    docker: name=vcpe-{{ vlan_ids[0] }} state=absent image=docker-vcpe
+    docker: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=absent image=docker-vcpe
 
   - name: start vcpe
-    service: name=vcpe-{{ vlan_ids[0] }} state=started
+    service: name=vcpe-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
diff --git a/xos/observers/vcpe/templates/start-vcpe.sh.j2 b/xos/observers/vcpe/templates/start-vcpe.sh.j2
index a3533fa..c4128f3 100755
--- a/xos/observers/vcpe/templates/start-vcpe.sh.j2
+++ b/xos/observers/vcpe/templates/start-vcpe.sh.j2
@@ -8,7 +8,9 @@
 iptables -L > /dev/null
 ip6tables -L > /dev/null
 
-VCPE=vcpe-{{ vlan_ids[0] }}
+STAG={{ s_tags[0] }}
+CTAG={{ c_tags[0] }}
+VCPE=vcpe-$STAG-$CTAG
 
 docker inspect $VCPE > /dev/null 2>&1
 if [ "$?" == 1 ]
@@ -23,14 +25,23 @@
 WAN_IFACE=$( mac_to_iface {{ wan_mac }} )
 docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_ip }}/24@{{ wan_next_hop }} {{ wan_container_mac }}
 
-LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
-docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE -i eth1 $VCPE 192.168.0.1/24 @{{ vlan_ids[0] }}
+# LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
+# Need to encapsulate VLAN traffic so that Neutron doesn't eat it
+# Assumes that br-lan has been set up appropriately by a previous step
+LAN_IFACE=br-lan
+ifconfig $LAN_IFACE >> /dev/null
+if [ "$?" == 0 ]
+then
+    ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
+    ifconfig $LAN_IFACE.$STAG up
+    docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
+fi
 
-HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
+#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
+#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
 
 # Make sure VM's eth0 (hpc_client) has no IP address
-ifconfig $HPC_IFACE 0.0.0.0
+#ifconfig $HPC_IFACE 0.0.0.0
 
 # Now can start up dnsmasq
 docker exec $VCPE service dnsmasq start
diff --git a/xos/observers/vcpe/templates/vcpe.conf.j2 b/xos/observers/vcpe/templates/vcpe.conf.j2
index 1951322..fa7885e 100644
--- a/xos/observers/vcpe/templates/vcpe.conf.j2
+++ b/xos/observers/vcpe/templates/vcpe.conf.j2
@@ -6,5 +6,5 @@
 respawn
 
 script
-  /usr/local/sbin/start-vcpe-{{ vlan_ids[0] }}.sh
+  /usr/local/sbin/start-vcpe-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
 end script
diff --git a/xos/openstack_observer/event_loop.py b/xos/openstack_observer/event_loop.py
index db7459a..0ac626b 100644
--- a/xos/openstack_observer/event_loop.py
+++ b/xos/openstack_observer/event_loop.py
@@ -32,7 +32,7 @@
 # Load app models
 
 try:
-    app_module_names = Config().observer_applist
+    app_module_names = Config().observer_applist.split(',')
 except AttributeError:
     app_module_names = []
 
diff --git a/xos/xos_config b/xos/xos_config
index c47fac6..b86d618 100644
--- a/xos/xos_config
+++ b/xos/xos_config
@@ -37,7 +37,6 @@
 images_directory=/opt/xos/images
 dependency_graph=/opt/xos/model-deps
 logfile=/var/log/xos_backend.log
-applist=helloworld
 
 [gui]
 disable_minidashboard=True