Datapath changes for OLT
diff --git a/xos/configurations/cord/cord.yaml b/xos/configurations/cord/cord.yaml
index 344cf40..46acde9 100644
--- a/xos/configurations/cord/cord.yaml
+++ b/xos/configurations/cord/cord.yaml
@@ -127,14 +127,15 @@
node: service_volt
relationship: tosca.relationships.UsedByService
properties:
- dependencies: org.onosproject.olt
+ dependencies: org.onosproject.openflow, org.onosproject.olt
config_network-cfg.json: >
{
"devices" : {
"of:0000000000000001" : {
"accessDevice" : {
"uplink" : "2",
- "vlan" : "400"
+ "vlan" : "222",
+ "defaultVlan" : "1"
},
"basic" : {
"driver" : "default"
diff --git a/xos/configurations/cord/dataplane/dataplane.yaml b/xos/configurations/cord/dataplane/dataplane.yaml
index 4799515..052abda 100644
--- a/xos/configurations/cord/dataplane/dataplane.yaml
+++ b/xos/configurations/cord/dataplane/dataplane.yaml
@@ -58,6 +58,7 @@
vars:
controller_ip: "{{ hostvars['onos_volt']['ansible_ssh_host'] }}"
controller_port: 6653
+ vcpe_lan_ip: "{{ hostvars['vcpe']['lan_ip'] }}"
tags:
- volt
tasks:
@@ -73,6 +74,7 @@
with_items:
- git
- python-netifaces
+ - openvswitch-switch
- name: Checkout the Mininet repo
git: repo=https://github.com/mininet/mininet.git
@@ -87,14 +89,151 @@
script: scripts/if_from_ip.py {{ subscriber_ip }}
register: subscriber_net
- - name: Find lan_network interface
- script: scripts/if_from_ip.py {{ lan_ip }}
- register: lan_net
+ - name: Create bridge br-sub
+ openvswitch_bridge:
+ bridge=br-sub
+ state=present
+
+ - name: Add subscriber_net to br-sub
+ openvswitch_port:
+ bridge=br-sub
+ port={{ subscriber_net.stdout }}
+ state=present
+
+ # The CPqD switch is expecting that packets coming from the client have
+ # VLAN tag 1. However Neutron's OvS configuration eats VLAN-tagged packets.
+ # So tag them with VLAN 1 here before sending to CPqD.
+ #
+ # Note that the VLAN tag is 0 in the real-world setup, but the CPqD switch
+ # seems to have a problem with these packets.
+
+ # Using OvS to tag packets with VLAN ID 1 is not quite working for some reason.
+ # The packets from the client get tagged OK, but only the first packet from the
+ # VCPE gets its tag stripped off. Very weird. That's why we are using veth
+ # devices instead.
+ #- name: Add tag 1 to br-sub port
+ # shell: ovs-vsctl set port {{ subscriber_net.stdout }} tag=1
+
+ - name: Create a pair of veth devices
+ shell: ifconfig veth0 >> /dev/null || ip link add veth0 type veth peer name veth1
+
+ - name: Create veth0.1
+ shell: ifconfig veth0.1 >> /dev/null || ip link add link veth0 name veth0.1 type vlan id 1
+
+ - name: Bring the interfaces up
+ shell: ip link set {{ item }} up
+ with_items:
+ - veth0
+ - veth1
+ - veth0.1
+
+ - name: Add veth0.1 to br-sub
+ openvswitch_port:
+ bridge=br-sub
+ port=veth0.1
+ state=present
+
+ - name: Create bridge br-lan
+ openvswitch_bridge:
+ bridge=br-lan
+ state=present
+
+ - name: Create tunnel port on br-lan
+ openvswitch_port:
+ bridge=br-lan
+ port=gre0
+ state=present
+
+ - name: Set up GRE tunnel to vCPE
+ shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ vcpe_lan_ip }}
+
+ - name: Check if br-lan has an IPv6 address
+ shell: ip addr show br-lan|grep inet6|awk '{print $2}'
+ register: ipv6
+
+ - name: Remove br-lan IPv6 address if present
+ shell: ifconfig br-lan inet6 del {{ ipv6.stdout }}
+ when: ipv6.stdout != ""
- name: Run the datapath
- command: /usr/local/bin/ofdatapath -i {{ subscriber_net.stdout_lines[0] }},{{ lan_net.stdout_lines[0] }} punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
+ command: /usr/local/bin/ofdatapath -i veth1,br-lan punix:/tmp/s1 -d 000000000001 --no-slicing -D -P
creates=/usr/local/var/run/ofdatapath.pid
- name: Run the control program
command: /usr/local/bin/ofprotocol unix:/tmp/s1 tcp:{{ controller_ip }}:{{ controller_port }} --fail=closed --listen=punix:/tmp/s1.listen -D -P
creates=/usr/local/var/run/ofprotocol.pid
+
+- hosts: client
+ sudo: yes
+ tags:
+ - client
+ tasks:
+
+ - name: Fix /etc/hosts
+ lineinfile:
+ dest=/etc/hosts
+ regexp="127.0.0.1 localhost"
+ line="127.0.0.1 localhost {{ ansible_hostname }}"
+
+ - name: Install packages
+ apt: name={{ item }}
+ state=latest
+ update_cache=yes
+ with_items:
+ - openvswitch-switch
+ - python-netifaces
+
+ - name: Create br-sub
+ openvswitch_bridge:
+ bridge=br-sub
+ state=present
+
+ - name: Find subscriber_network interface
+ script: scripts/if_from_ip.py {{ subscriber_ip }}
+ register: client_net
+
+ - name: Hook up subscriber-network to OvS
+ openvswitch_port:
+ bridge=br-sub
+ port={{ client_net.stdout }}
+ state=present
+
+ # Run dhclient on br-sub internal interface to issue DHCP request to vCPE
+
+#
+# This play is just for testing. The vCPE configuration below will be
+# integrated with the vCPE Synchronizer.
+#
+# Need to change the data model to store both s-tag and c-tag
+#
+- hosts: vcpe
+ sudo: yes
+ vars:
+ volt_lan_ip: "{{ hostvars['switch_volt']['lan_ip'] }}"
+ tags:
+ - vcpe
+ tasks:
+
+ - name: Install packages
+ apt: name={{ item }}
+ state=latest
+ update_cache=yes
+ with_items:
+ - openvswitch-switch
+
+ - name: Create br-lan
+ openvswitch_bridge:
+ bridge=br-lan
+ state=present
+
+ - name: Create tunnel port
+ openvswitch_port:
+ bridge=br-lan
+ port=gre0
+ state=present
+
+ - name: Configure GRE tunnel to vOLT switch
+ shell: ovs-vsctl set Interface gre0 type=gre options:remote_ip={{ volt_lan_ip }}
+
+ - name: Restart vCPEs
+ debug: msg="Need to write a script that restarts all vCPEs"
diff --git a/xos/configurations/cord/dataplane/scripts/if_from_ip.py b/xos/configurations/cord/dataplane/scripts/if_from_ip.py
index be1da48..28524fe 100644
--- a/xos/configurations/cord/dataplane/scripts/if_from_ip.py
+++ b/xos/configurations/cord/dataplane/scripts/if_from_ip.py
@@ -8,7 +8,7 @@
for iface in netifaces.interfaces():
addrs = netifaces.ifaddresses(iface)
if 2 in addrs and addrs[2][0]['addr'] == addr:
- print iface
-
+ sys.stdout.write(iface)
+
if __name__ == "__main__":
main(sys.argv[1:])
diff --git a/xos/observers/vcpe/templates/start-vcpe.sh.j2 b/xos/observers/vcpe/templates/start-vcpe.sh.j2
index a3533fa..c4128f3 100755
--- a/xos/observers/vcpe/templates/start-vcpe.sh.j2
+++ b/xos/observers/vcpe/templates/start-vcpe.sh.j2
@@ -8,7 +8,9 @@
iptables -L > /dev/null
ip6tables -L > /dev/null
-VCPE=vcpe-{{ vlan_ids[0] }}
+STAG={{ s_tags[0] }}
+CTAG={{ c_tags[0] }}
+VCPE=vcpe-$STAG-$CTAG
docker inspect $VCPE > /dev/null 2>&1
if [ "$?" == 1 ]
@@ -23,14 +25,23 @@
WAN_IFACE=$( mac_to_iface {{ wan_mac }} )
docker exec $VCPE ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VCPE {{ wan_ip }}/24@{{ wan_next_hop }} {{ wan_container_mac }}
-LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
-docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE -i eth1 $VCPE 192.168.0.1/24 @{{ vlan_ids[0] }}
+# LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
+# Need to encapsulate VLAN traffic so that Neutron doesn't eat it
+# Assumes that br-lan has been set up appropriately by a previous step
+LAN_IFACE=br-lan
+ifconfig $LAN_IFACE >> /dev/null
+if [ "$?" == 0 ]
+then
+ ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
+ ifconfig $LAN_IFACE.$STAG up
+ docker exec $VCPE ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VCPE 192.168.0.1/24 @$CTAG
+fi
-HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
-docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
+#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
+#docker exec $VCPE ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VCPE {{ hpc_client_ip }}/24
# Make sure VM's eth0 (hpc_client) has no IP address
-ifconfig $HPC_IFACE 0.0.0.0
+#ifconfig $HPC_IFACE 0.0.0.0
# Now can start up dnsmasq
docker exec $VCPE service dnsmasq start