move over vtr service from XOS repo
diff --git a/xos/synchronizer/files/run_tcpdump.sh b/xos/synchronizer/files/run_tcpdump.sh
new file mode 100644
index 0000000..ed75bf0
--- /dev/null
+++ b/xos/synchronizer/files/run_tcpdump.sh
@@ -0,0 +1,9 @@
+#! /bin/bash
+INTERFACE=$1
+tcpdump -n -e -i $INTERFACE -c 100 &
+PID_TCPDUMP=$!
+curl http://www.xosproject.org/ &> /dev/null &
+PID_CURL=$!
+sleep 30s
+kill $PID_TCPDUMP
+kill $PIUD_CURL
diff --git a/xos/synchronizer/manifest b/xos/synchronizer/manifest
new file mode 100644
index 0000000..61ffb39
--- /dev/null
+++ b/xos/synchronizer/manifest
@@ -0,0 +1,10 @@
+manifest
+vtr-synchronizer.py
+vtn_vtr_synchronizer_config
+steps/sync_vtrtenant.py
+steps/sync_vtrtenant.yaml
+vtr_synchronizer_config
+files/run_tcpdump.sh
+run-vtn.sh
+model-deps
+run.sh
diff --git a/xos/synchronizer/model-deps b/xos/synchronizer/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/synchronizer/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/synchronizer/run-vtn.sh b/xos/synchronizer/run-vtn.sh
new file mode 100755
index 0000000..b2f9518
--- /dev/null
+++ b/xos/synchronizer/run-vtn.sh
@@ -0,0 +1,4 @@
+export XOS_DIR=/opt/xos
+cp /root/setup/node_key $XOS_DIR/synchronizers/vtr/node_key
+chmod 0600 $XOS_DIR/synchronizers/vtr/node_key
+python vtr-synchronizer.py  -C $XOS_DIR/synchronizers/vtr/vtn_vtr_synchronizer_config
diff --git a/xos/synchronizer/run.sh b/xos/synchronizer/run.sh
new file mode 100755
index 0000000..388fdf9
--- /dev/null
+++ b/xos/synchronizer/run.sh
@@ -0,0 +1,2 @@
+export XOS_DIR=/opt/xos
+python vtr-synchronizer.py  -C $XOS_DIR/synchronizers/vtr/vtr_synchronizer_config
diff --git a/xos/synchronizer/steps/sync_vtrtenant.py b/xos/synchronizer/steps/sync_vtrtenant.py
new file mode 100644
index 0000000..f0f7ef3
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vtrtenant.py
@@ -0,0 +1,147 @@
+import os
+import socket
+import sys
+import base64
+import time
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice, Tag
+from services.vsg.models import VSGService, VCPE_KIND
+from services.vtr.models import VTRService, VTRTenant
+from services.hpc.models import HpcService, CDNPrefix
+from xos.logger import Logger, logging
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+logger = Logger(level=logging.INFO)
+
+CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
+
+class SyncVTRTenant(SyncInstanceUsingAnsible):
+    provides=[VTRTenant]
+    observes=VTRTenant
+    requested_interval=0
+    template_name = "sync_vtrtenant.yaml"
+    #service_key_name = "/opt/xos/services/vtr/vcpe_private_key"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncVTRTenant, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = VTRTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = VTRTenant.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_vtr_service(self, o):
+        if not o.provider_service:
+            return None
+
+        vtrs = VTRService.get_service_objects().filter(id=o.provider_service.id)
+        if not vtrs:
+            return None
+
+        return vtrs[0]
+
+    def get_vcpe_service(self, o):
+        if o.target:
+            # o.target is a CordSubscriberRoot
+            if o.target.volt and o.target.volt.vcpe:
+                vcpes = VSGService.get_service_objects().filter(id=o.target.volt.vcpe.provider_service.id)
+                if not vcpes:
+                    return None
+                return vcpes[0]
+        return None
+
+    def get_instance(self, o):
+        if o.target and o.target.volt and o.target.volt.vcpe:
+            return o.target.volt.vcpe.instance
+        else:
+            return None
+
+    def get_key_name(self, instance):
+        if instance.slice.service and (instance.slice.service.kind==VCPE_KIND):
+            # We need to use the vsg service's private key. Onboarding won't
+            # by default give us another service's private key, so let's assume
+            # onboarding has been configured to add vsg_rsa to the vtr service.
+            return "/opt/xos/services/vtr/keys/vsg_rsa"
+        else:
+            raise Exception("VTR doesn't know how to get the private key for this instance")
+
+    def get_extra_attributes(self, o):
+        vtr_service = self.get_vtr_service(o)
+        vcpe_service = self.get_vcpe_service(o)
+
+        if not vcpe_service:
+            raise Exception("No vcpeservice")
+
+        instance = self.get_instance(o)
+
+        if not instance:
+            raise Exception("No instance")
+
+        s_tags = []
+        c_tags = []
+        if o.target and o.target.volt:
+            s_tags.append(o.target.volt.s_tag)
+            c_tags.append(o.target.volt.c_tag)
+
+        fields = {"s_tags": s_tags,
+                "c_tags": c_tags,
+                "isolation": instance.isolation,
+                "container_name": "vcpe-%s-%s" % (s_tags[0], c_tags[0]),
+                "dns_servers": [x.strip() for x in vcpe_service.dns_servers.split(",")],
+
+                "result_fn": "%s-vcpe-%s-%s" % (o.test, s_tags[0], c_tags[0]),
+                "resultcode_fn": "code-%s-vcpe-%s-%s" % (o.test, s_tags[0], c_tags[0]) }
+
+        # add in the sync_attributes that come from the vSG object
+        # this will be wan_ip, wan_mac, wan_container_ip, wan_container_mac, ...
+        if o.target and o.target.volt and o.target.volt.vcpe:
+            for attribute_name in o.target.volt.vcpe.sync_attributes:
+                fields[attribute_name] = getattr(o.target.volt.vcpe, attribute_name)
+
+        # add in the sync_attributes that come from the SubscriberRoot object
+        if o.target and hasattr(o.target, "sync_attributes"):
+            for attribute_name in o.target.sync_attributes:
+                fields[attribute_name] = getattr(o.target, attribute_name)
+
+        for attribute_name in o.sync_attributes:
+            fields[attribute_name] = getattr(o,attribute_name)
+
+        return fields
+
+    def sync_fields(self, o, fields):
+        # the super causes the playbook to be run
+
+        super(SyncVTRTenant, self).sync_fields(o, fields)
+
+    def run_playbook(self, o, fields):
+        o.result = ""
+
+        result_fn = os.path.join("/opt/xos/synchronizers/vtr/result", fields["result_fn"])
+        if os.path.exists(result_fn):
+            os.remove(result_fn)
+
+        resultcode_fn = os.path.join("/opt/xos/synchronizers/vtr/result", fields["resultcode_fn"])
+        if os.path.exists(resultcode_fn):
+            os.remove(resultcode_fn)
+
+        super(SyncVTRTenant, self).run_playbook(o, fields)
+
+        if os.path.exists(result_fn):
+            o.result = open(result_fn).read()
+
+        if os.path.exists(resultcode_fn):
+            o.result_code = open(resultcode_fn).read()
+
+
+    def delete_record(self, m):
+        pass
diff --git a/xos/synchronizer/steps/sync_vtrtenant.yaml b/xos/synchronizer/steps/sync_vtrtenant.yaml
new file mode 100644
index 0000000..35d9032
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vtrtenant.yaml
@@ -0,0 +1,123 @@
+---
+- hosts: {{ instance_name }}
+  #gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  vars:
+      container_name: {{ container_name }}
+      wan_container_ip: {{ wan_container_ip }}
+      wan_container_netbits: {{ wan_container_netbits }}
+      wan_container_mac: {{ wan_container_mac }}
+      wan_container_gateway_ip: {{ wan_container_gateway_ip }}
+      wan_vm_ip: {{ wan_vm_ip }}
+      wan_vm_mac: {{ wan_vm_mac }}
+
+      scope: {{ scope }}
+      test: {{ test }}
+      argument: {{ argument }}
+      result_fn: {{ result_fn }}
+      resultcode_fn: {{ resultcode_fn }}
+
+
+  tasks:
+  - name: Remove any old result file
+    shell: rm -f /tmp/{{ result_fn }}
+
+  - name: Copy run_tcpdump.sh to VM
+    copy: src=/opt/xos/synchronizers/vtr/files/run_tcpdump.sh dest=/root/run_tcpdump.sh mode=0755
+    when: (test=="tcpdump")
+
+
+# -----------------
+# scope == VM
+# -----------------
+
+  - name: Send the pings from VM
+    shell: ping -c 10 {{ argument }} 2>&1 > /tmp/{{ result_fn }}
+    ignore_errors: yes
+    register: vm_ping_result
+    when: (scope=="vm") and (test=="ping")
+
+  - name: Store VM ping resultcode to file
+    shell: echo "{{ '{{' }} vm_ping_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
+    when: (scope=="vm") and (test=="ping")
+
+  - name: Install traceroute
+    apt: name=traceroute state=present
+    when: (scope=="vm") and (test=="traceroute")
+
+  - name: Send traceroute from VM
+    shell: traceroute {{ argument }} 2>&1 > /tmp/{{ result_fn }}
+    ignore_errors: yes
+    register: vm_traceroute_result
+    when: (scope=="vm") and (test=="traceroute")
+
+  - name: Store VM traceroute resultcode to file
+    shell: echo "{{ '{{' }} vm_traceroute_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
+    when: (scope=="vm") and (test=="traceroute")
+
+  - name: Run tcpdump for 30 seconds on VM
+    shell: /root/run_tcpdump.sh {{ argument }} 2>&1 > /tmp/{{ result_fn }}
+    ignore_errors: yes
+    register: vm_tcpdump_result
+    when: (scope=="vm") and (test=="tcpdump")
+
+  - name: Store VM tcpdump resultcode to file
+    shell: echo "{{ '{{' }} vm_tcpdump_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
+    when: (scope=="vm") and (test=="tcpdump")
+
+# ------------------
+# scope == container
+# ------------------
+
+  - name: Send the pings from Container
+    shell: docker exec {{ container_name }} ping -c 10 {{ argument }} 2>&1 > /tmp/{{ result_fn }}
+    ignore_errors: yes
+    register: ctr_ping_result
+    when: (scope=="container") and (test=="ping")
+
+  - name: Store ctr ping resultcode to file
+    shell: echo "{{ '{{' }} ctr_ping_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
+    when: (scope=="container") and (test=="ping")
+
+  - name: Install traceroute into Container
+    shell: docker exec {{ container_name }} apt-get -y install traceroute
+    when: (scope=="container") and (test=="traceroute")
+
+  - name: Send traceroute from Container
+    shell: docker exec {{ container_name }} traceroute {{ argument }} 2>&1 > /tmp/{{ result_fn }}
+    ignore_errors: yes
+    register: ctr_traceroute_result
+    when: (scope=="container") and (test=="traceroute")
+
+  - name: Store ctr traceroute resultcode to file
+    shell: echo "{{ '{{' }} ctr_traceroute_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
+    when: (scope=="container") and (test=="traceroute")
+
+  - name: Copy run_tcpdump.sh to container
+    command: docker cp /root/run_tcpdump.sh {{ container_name }}:/root/run_tcpdump.sh
+    when: (scope=="container") and (test=="tcpdump")
+
+  - name: Run tcpdump for 30 seconds from Container
+    shell: docker exec {{ container_name }} /root/run_tcpdump.sh {{ argument }} 2>&1 > /tmp/{{ result_fn }}
+    ignore_errors: yes
+    register: diagresult
+    when: (scope=="container") and (test=="tcpdump")
+
+  - name: Store ctr tcpdump resultcode to file
+    shell: echo "{{ '{{' }} ctr_tcpdump_result.rc {{ '}}' }}" > /tmp/{{ resultcode_fn }}
+    when: (scope=="container") and (test=="tcpdump")
+
+# ------------------
+# scope == *
+# ------------------
+  - name: Fetch the result
+    fetch: src=/tmp/{{ result_fn }} dest=/opt/xos/synchronizers/vtr/result/{{ result_fn }} flat=yes
+
+  - name: Fetch the resultcode
+    fetch: src=/tmp/{{ resultcode_fn }} dest=/opt/xos/synchronizers/vtr/result/{{ resultcode_fn }} flat=yes
+
+
+
+
diff --git a/xos/synchronizer/vtn_vtr_synchronizer_config b/xos/synchronizer/vtn_vtr_synchronizer_config
new file mode 100644
index 0000000..2c9140a
--- /dev/null
+++ b/xos/synchronizer/vtn_vtr_synchronizer_config
@@ -0,0 +1,47 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=vtr
+dependency_graph=/opt/xos/synchronizers/vtr/model-deps
+steps_dir=/opt/xos/synchronizers/vtr/steps
+sys_dir=/opt/xos/synchronizers/vtr/sys
+deleters_dir=/opt/xos/synchronizers/vtr/deleters
+log_file=console
+#/var/log/hpc.log
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+full_setup=True
+proxy_ssh=True
+proxy_ssh_key=/opt/xos/synchronizers/vtr/node_key
+proxy_ssh_user=root
+
+[networking]
+use_vtn=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/xos/synchronizer/vtr-synchronizer.py b/xos/synchronizer/vtr-synchronizer.py
new file mode 100755
index 0000000..84bec4f
--- /dev/null
+++ b/xos/synchronizer/vtr-synchronizer.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-synchronizer")
+mod.main()
diff --git a/xos/synchronizer/vtr_synchronizer_config b/xos/synchronizer/vtr_synchronizer_config
new file mode 100644
index 0000000..51bf25a
--- /dev/null
+++ b/xos/synchronizer/vtr_synchronizer_config
@@ -0,0 +1,41 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=vtr
+dependency_graph=/opt/xos/synchronizers/vtr/model-deps
+steps_dir=/opt/xos/synchronizers/vtr/steps
+sys_dir=/opt/xos/synchronizers/vtr/sys
+deleters_dir=/opt/xos/synchronizers/vtr/deleters
+log_file=console
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+proxy_ssh=False
+full_setup=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'