Adding first implementation of vEG service

Change-Id: I2feeccd98e6e8932fe4552c575915d2f20fe5176
diff --git a/xos/synchronizer/steps/ansible_test/README b/xos/synchronizer/steps/ansible_test/README
new file mode 100644
index 0000000..d3b2c54
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/README
@@ -0,0 +1,4 @@
+Some scripts used while testing the Ansible instance configuraiton observer
+
+xos.py was probably the prototype of an XOS SSH module for Ansible, that understood how to SSH into the instances
+without needing to play config file and environment tricks. 
diff --git a/xos/synchronizer/steps/ansible_test/inventory.txt b/xos/synchronizer/steps/ansible_test/inventory.txt
new file mode 100644
index 0000000..bd5b542
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/inventory.txt
@@ -0,0 +1,16 @@
+[onlab_hpc-355]
+node67.washington.vicci.org instance_id=instance-00000045 instance_name=onlab_hpc-355
+
+[onlab_test405-372]
+node67.washington.vicci.org instance_id=instance-0000004c instance_name=onlab_test405-372
+
+[onlab_test405-376]
+node1.cs.arizona.edu
+
+[onlab_test405-378]
+node67.washington.vicci.org ansible_ssh_private_key_file=/home/smbaker/.ssh/id_rsa
+#/home/smbaker/projects/vicci/keys/test_service_key_rsa
+
+[mysite_test2-48]
+cordcompute02.onlab.us ansible_ssh_private_key_file=/home/smbaker/projects/vicci/keys/demo_admin.rsa
+
diff --git a/xos/synchronizer/steps/ansible_test/test.sh b/xos/synchronizer/steps/ansible_test/test.sh
new file mode 100755
index 0000000..157ba9c
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/test.sh
@@ -0,0 +1,2 @@
+#! /bin/bash
+ansible-playbook --private-key /home/smbaker/.ssh/id_rsa -i ./inventory.txt test.yaml
diff --git a/xos/synchronizer/steps/ansible_test/test.yaml b/xos/synchronizer/steps/ansible_test/test.yaml
new file mode 100644
index 0000000..6a29d56
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/test.yaml
@@ -0,0 +1,12 @@
+---
+- hosts: onlab_test405-372
+  connection: xos
+  user: ubuntu
+  vars:
+     foo: 25
+#  instance_name: instance-00000045
+#  slice_name: onlab_hpc-355
+
+  tasks:
+    - name: foobar
+      shell: echo foo > /tmp/foobar
diff --git a/xos/synchronizer/steps/ansible_test/xos.py b/xos/synchronizer/steps/ansible_test/xos.py
new file mode 100755
index 0000000..eb4f3eb
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/xos.py
@@ -0,0 +1,444 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import re
+import subprocess
+import shlex
+import pipes
+import random
+import select
+import fcntl
+import hmac
+import pwd
+import gettext
+import pty
+from hashlib import sha1
+import ansible.constants as C
+from ansible.callbacks import vvv
+from ansible import errors
+from ansible import utils
+
+class Connection(object):
+    ''' ssh based connections '''
+
+    def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
+        self.runner = runner
+        self.host = host
+        self.ipv6 = ':' in self.host
+        self.port = port
+        self.user = str(user)
+        self.password = password
+        self.private_key_file = private_key_file
+        self.HASHED_KEY_MAGIC = "|1|"
+        self.has_pipelining = True
+        #self.instance_id = "instance-00000045" # C.get_config(C.p, "xos", "instance_id", "INSTANCE_ID", None)
+        #self.instance_name = "onlab_hpc-355" # C.get_config(C.p, "xos", "instance_name", "SLIVER_NAME", None)
+
+        inject={}
+        inject= utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
+
+        self.instance_id = inject["instance_id"]
+        self.instance_name = inject["instance_name"]
+
+        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
+        self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
+        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+
+    def connect(self):
+        ''' connect to the remote host '''
+
+        vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
+
+        self.common_args = []
+        extra_args = C.ANSIBLE_SSH_ARGS
+        if extra_args is not None:
+            # make sure there is no empty string added as this can produce weird errors
+            self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
+        else:
+            self.common_args += ["-o", "ControlMaster=auto",
+                                 "-o", "ControlPersist=60s",
+                                 "-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
+
+        self.common_args += ["-o", "ProxyCommand ssh -q -i %s %s@%s" % (self.private_key_file, self.instance_id, self.host)]
+
+        cp_in_use = False
+        cp_path_set = False
+        for arg in self.common_args:
+            if "ControlPersist" in arg:
+                cp_in_use = True
+            if "ControlPath" in arg:
+                cp_path_set = True
+
+        if cp_in_use and not cp_path_set:
+            self.common_args += ["-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
+
+        if not C.HOST_KEY_CHECKING:
+            self.common_args += ["-o", "StrictHostKeyChecking=no"]
+
+        if self.port is not None:
+            self.common_args += ["-o", "Port=%d" % (self.port)]
+        if self.private_key_file is not None:
+            self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
+        elif self.runner.private_key_file is not None:
+            self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
+        if self.password:
+            self.common_args += ["-o", "GSSAPIAuthentication=no",
+                                 "-o", "PubkeyAuthentication=no"]
+        else:
+            self.common_args += ["-o", "KbdInteractiveAuthentication=no",
+                                 "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
+                                 "-o", "PasswordAuthentication=no"]
+        if self.user != pwd.getpwuid(os.geteuid())[0]:
+            self.common_args += ["-o", "User="+self.user]
+        self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
+
+        return self
+
+    def _run(self, cmd, indata):
+        if indata:
+            # do not use pseudo-pty
+            p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            stdin = p.stdin
+        else:
+            # try to use upseudo-pty
+            try:
+                # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
+                master, slave = pty.openpty()
+                p = subprocess.Popen(cmd, stdin=slave,
+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                stdin = os.fdopen(master, 'w', 0)
+                os.close(slave)
+            except:
+                p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                stdin = p.stdin
+
+        return (p, stdin)
+
+    def _password_cmd(self):
+        if self.password:
+            try:
+                p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
+                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                p.communicate()
+            except OSError:
+                raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
+            (self.rfd, self.wfd) = os.pipe()
+            return ["sshpass", "-d%d" % self.rfd]
+        return []
+
+    def _send_password(self):
+        if self.password:
+            os.close(self.rfd)
+            os.write(self.wfd, "%s\n" % self.password)
+            os.close(self.wfd)
+
+    def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
+        fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+        fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+        # We can't use p.communicate here because the ControlMaster may have stdout open as well
+        stdout = ''
+        stderr = ''
+        rpipes = [p.stdout, p.stderr]
+        if indata:
+            try:
+                stdin.write(indata)
+                stdin.close()
+            except:
+                raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
+        # Read stdout/stderr from process
+        while True:
+            rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
+
+            # fail early if the sudo/su password is wrong
+            if self.runner.sudo and sudoable:
+                if self.runner.sudo_pass:
+                    incorrect_password = gettext.dgettext(
+                        "sudo", "Sorry, try again.")
+                    if stdout.endswith("%s\r\n%s" % (incorrect_password,
+                                                     prompt)):
+                        raise errors.AnsibleError('Incorrect sudo password')
+
+                if stdout.endswith(prompt):
+                    raise errors.AnsibleError('Missing sudo password')
+
+            if self.runner.su and su and self.runner.su_pass:
+                incorrect_password = gettext.dgettext(
+                    "su", "Sorry")
+                if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
+                    raise errors.AnsibleError('Incorrect su password')
+
+            if p.stdout in rfd:
+                dat = os.read(p.stdout.fileno(), 9000)
+                stdout += dat
+                if dat == '':
+                    rpipes.remove(p.stdout)
+            if p.stderr in rfd:
+                dat = os.read(p.stderr.fileno(), 9000)
+                stderr += dat
+                if dat == '':
+                    rpipes.remove(p.stderr)
+            # only break out if no pipes are left to read or
+            # the pipes are completely read and
+            # the process is terminated
+            if (not rpipes or not rfd) and p.poll() is not None:
+                break
+            # No pipes are left to read but process is not yet terminated
+            # Only then it is safe to wait for the process to be finished
+            # NOTE: Actually p.poll() is always None here if rpipes is empty
+            elif not rpipes and p.poll() == None:
+                p.wait()
+                # The process is terminated. Since no pipes to read from are
+                # left, there is no need to call select() again.
+                break
+        # close stdin after process is terminated and stdout/stderr are read
+        # completely (see also issue #848)
+        stdin.close()
+        return (p.returncode, stdout, stderr)
+
+    def not_in_host_file(self, host):
+        if 'USER' in os.environ:
+            user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+        else:
+            user_host_file = "~/.ssh/known_hosts"
+        user_host_file = os.path.expanduser(user_host_file)
+        
+        host_file_list = []
+        host_file_list.append(user_host_file)
+        host_file_list.append("/etc/ssh/ssh_known_hosts")
+        host_file_list.append("/etc/ssh/ssh_known_hosts2")
+        
+        hfiles_not_found = 0
+        for hf in host_file_list:
+            if not os.path.exists(hf):
+                hfiles_not_found += 1
+                continue
+            try:
+                host_fh = open(hf)
+            except IOError, e:
+                hfiles_not_found += 1
+                continue
+            else:
+                data = host_fh.read()
+                host_fh.close()
+                
+            for line in data.split("\n"):
+                if line is None or " " not in line:
+                    continue
+                tokens = line.split()
+                if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
+                    # this is a hashed known host entry
+                    try:
+                        (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
+                        hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
+                        hash.update(host)
+                        if hash.digest() == kn_host.decode('base64'):
+                            return False
+                    except:
+                        # invalid hashed host key, skip it
+                        continue
+                else:
+                    # standard host file entry
+                    if host in tokens[0]:
+                        return False
+
+        if (hfiles_not_found == len(host_file_list)):
+            vvv("EXEC previous known host file not found for %s" % host)
+        return True
+
+    def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False):
+        ''' run a command on the remote host '''
+
+        ssh_cmd = self._password_cmd()
+        ssh_cmd += ["ssh", "-C"]
+        if not in_data:
+            # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
+            # inside a tty automatically invokes the python interactive-mode but the modules are not
+            # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
+            ssh_cmd += ["-tt"]
+        if utils.VERBOSITY > 3:
+            ssh_cmd += ["-vvv"]
+        else:
+            ssh_cmd += ["-q"]
+        ssh_cmd += self.common_args
+
+        if self.ipv6:
+            ssh_cmd += ['-6']
+        #ssh_cmd += [self.host]
+        ssh_cmd += [self.instance_name]
+
+        if su and su_user:
+            sudocmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
+            prompt_re = re.compile(prompt)
+            ssh_cmd.append(sudocmd)
+        elif not self.runner.sudo or not sudoable:
+            prompt = None
+            if executable:
+                ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
+            else:
+                ssh_cmd.append(cmd)
+        else:
+            sudocmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd)
+            ssh_cmd.append(sudocmd)
+
+        vvv("EXEC %s" % ssh_cmd, host=self.host)
+
+        not_in_host_file = self.not_in_host_file(self.host)
+
+        if C.HOST_KEY_CHECKING and not_in_host_file:
+            # lock around the initial SSH connectivity so the user prompt about whether to add 
+            # the host to known hosts is not intermingled with multiprocess output.
+            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
+            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
+
+        # create process
+        (p, stdin) = self._run(ssh_cmd, in_data)
+
+        self._send_password()
+
+        if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
+                (self.runner.su and su and self.runner.su_pass):
+            # several cases are handled for sudo privileges with password
+            # * NOPASSWD (tty & no-tty): detect success_key on stdout
+            # * without NOPASSWD:
+            #   * detect prompt on stdout (tty)
+            #   * detect prompt on stderr (no-tty)
+            fcntl.fcntl(p.stdout, fcntl.F_SETFL,
+                        fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+            fcntl.fcntl(p.stderr, fcntl.F_SETFL,
+                        fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+            sudo_output = ''
+            sudo_errput = ''
+
+            while True:
+                if success_key in sudo_output or \
+                    (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
+                    (self.runner.su_pass and prompt_re.match(sudo_output)):
+                    break
+
+                rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
+                                              [p.stdout], self.runner.timeout)
+                if p.stderr in rfd:
+                    chunk = p.stderr.read()
+                    if not chunk:
+                        raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
+                    sudo_errput += chunk
+                    incorrect_password = gettext.dgettext(
+                        "sudo", "Sorry, try again.")
+                    if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
+                        raise errors.AnsibleError('Incorrect sudo password')
+                    elif sudo_errput.endswith(prompt):
+                        stdin.write(self.runner.sudo_pass + '\n')
+
+                if p.stdout in rfd:
+                    chunk = p.stdout.read()
+                    if not chunk:
+                        raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
+                    sudo_output += chunk
+
+                if not rfd:
+                    # timeout. wrap up process communication
+                    stdout = p.communicate()
+                    raise errors.AnsibleError('ssh connection error waiting for sudo or su password prompt')
+
+            if success_key not in sudo_output:
+                if sudoable:
+                    stdin.write(self.runner.sudo_pass + '\n')
+                elif su:
+                    stdin.write(self.runner.su_pass + '\n')
+
+        (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
+
+        if C.HOST_KEY_CHECKING and not_in_host_file:
+            # lock around the initial SSH connectivity so the user prompt about whether to add 
+            # the host to known hosts is not intermingled with multiprocess output.
+            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
+            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+        controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
+                              'unknown configuration option: ControlPersist' in stderr
+
+        if C.HOST_KEY_CHECKING:
+            if ssh_cmd[0] == "sshpass" and p.returncode == 6:
+                raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this.  Please add this host\'s fingerprint to your known_hosts file to manage this host.')
+
+        if p.returncode != 0 and controlpersisterror:
+            raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
+        if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
+            raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
+
+        return (p.returncode, '', stdout, stderr)
+
+    def put_file(self, in_path, out_path):
+        ''' transfer a file from local to remote '''
+        vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+        if not os.path.exists(in_path):
+            raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+        cmd = self._password_cmd()
+
+        host = self.host
+        if self.ipv6:
+            host = '[%s]' % host
+
+        if C.DEFAULT_SCP_IF_SSH:
+            cmd += ["scp"] + self.common_args
+            cmd += [in_path,host + ":" + pipes.quote(out_path)]
+            indata = None
+        else:
+            cmd += ["sftp"] + self.common_args + [host]
+            indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
+
+        (p, stdin) = self._run(cmd, indata)
+
+        self._send_password()
+
+        (returncode, stdout, stderr) = self._communicate(p, stdin, indata)
+
+        if returncode != 0:
+            raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
+
+    def fetch_file(self, in_path, out_path):
+        ''' fetch a file from remote to local '''
+        vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+        cmd = self._password_cmd()
+
+        host = self.host
+        if self.ipv6:
+            host = '[%s]' % host
+
+        if C.DEFAULT_SCP_IF_SSH:
+            cmd += ["scp"] + self.common_args
+            cmd += [host + ":" + in_path, out_path]
+            indata = None
+        else:
+            cmd += ["sftp"] + self.common_args + [host]
+            indata = "get %s %s\n" % (in_path, out_path)
+
+        p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        self._send_password()
+        stdout, stderr = p.communicate(indata)
+
+        if p.returncode != 0:
+            raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
+
+    def close(self):
+        ''' not applicable since we're executing openssh binaries '''
+        pass
+
diff --git a/xos/synchronizer/steps/sync_monitoring_agent.yaml b/xos/synchronizer/steps/sync_monitoring_agent.yaml
new file mode 100644
index 0000000..36b7221
--- /dev/null
+++ b/xos/synchronizer/steps/sync_monitoring_agent.yaml
@@ -0,0 +1,43 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  vars:
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+
+  tasks:
+  - name: Verify if veg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+    register: cron_job_pids_count
+
+  - name: DEBUG
+    debug: var=cron_job_pids_count.stdout
+
+  - name: make sure /usr/local/share/veg_monitoring_agent exists
+    file: path=/usr/local/share/beg_monitoring_agent state=directory owner=root group=root
+    become: yes
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Copy cron job to destination
+    copy: src=/opt/xos/synchronizers/veg/veg_stats_notifier.py
+      dest=/usr/local/share/veg_monitoring_agent/veg_stats_notifier.py
+    become: yes
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+    become: yes
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Initiate veg_stats_notifier cron job
+    command: sudo python /usr/local/share/veg_monitoring_agent/veg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vegservice_rabbit_exchange='vegservice'
+    async: 9999999999999999
+    poll: 0
+    become: yes
+    when: cron_job_pids_count.stdout == "0"
+
diff --git a/xos/synchronizer/steps/sync_vegtenant.py b/xos/synchronizer/steps/sync_vegtenant.py
new file mode 100644
index 0000000..2b64bb1
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vegtenant.py
@@ -0,0 +1,308 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from urlparse import urlparse
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible_helper import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice, Tag, ModelLink, CoarseTenant, Tenant, ServiceMonitoringAgentInfo
+from services.veg.models import VEGService, VEGTenant
+from xos.logger import Logger, logging
+
+# Deal with configurations where the hpc service is not onboarded
+try:
+    from services.hpc.models import HpcService, CDNPrefix
+    hpc_service_onboarded=True
+except:
+    hpc_service_onboarded=False
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+from broadbandshield import BBS
+
+logger = Logger(level=logging.INFO)
+
+ENABLE_QUICK_UPDATE=False
+
+CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
+
+class SyncVEGTenant(SyncInstanceUsingAnsible):
+    provides=[VEGTenant]
+    observes=VEGTenant
+    requested_interval=0
+    template_name = "sync_vegtenant.yaml"
+    watches = [ModelLink(CoarseTenant,via='coarsetenant'), ModelLink(ServiceMonitoringAgentInfo,via='monitoringagentinfo')]
+
+    def __init__(self, *args, **kwargs):
+        super(SyncVEGTenant, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = VEGTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = VEGTenant.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_veg_service(self, o):
+        if not o.provider_service:
+            return None
+
+        vegs = VEGService.get_service_objects().filter(id=o.provider_service.id)
+        if not vegs:
+            return None
+
+        return vegs[0]
+
+    def get_extra_attributes(self, o):
+        # This is a place to include extra attributes that aren't part of the
+        # object itself. In the case of vEG, we need to know:
+        #   1) the addresses of dnsdemux, to setup dnsmasq in the vEG
+        #   2) CDN prefixes, so we know what URLs to send to dnsdemux
+        #   3) BroadBandShield server addresses, for parental filtering
+        #   4) vlan_ids, for setting up networking in the vEG VM
+
+        veg_service = self.get_veg_service(o)
+
+        dnsdemux_ip = None
+        cdn_prefixes = []
+        #FIXME this will probably break since no folder is under syncronizers
+        cdn_config_fn = "/opt/xos/synchronizers/veg/cdn_config"
+        if os.path.exists(cdn_config_fn):
+            # manual CDN configuration
+            #   the first line is the address of dnsredir
+            #   the remaining lines are domain names, one per line
+            lines = file(cdn_config_fn).readlines()
+            if len(lines)>=2:
+                dnsdemux_ip = lines[0].strip()
+                cdn_prefixes = [x.strip() for x in lines[1:] if x.strip()]
+        elif hpc_service_onboarded:
+            # automatic CDN configuiration
+            #    it learns everything from CDN objects in XOS
+            #    not tested on pod.
+            if veg_service.backend_network_label:
+                # Connect to dnsdemux using the network specified by
+                #     veg_service.backend_network_label
+                for service in HpcService.objects.all():
+                    for slice in service.slices.all():
+                        if "dnsdemux" in slice.name:
+                            for instance in slice.instances.all():
+                                for ns in instance.ports.all():
+                                    if ns.ip and ns.network.labels and (veg_service.backend_network_label in ns.network.labels):
+                                        dnsdemux_ip = ns.ip
+                if not dnsdemux_ip:
+                    logger.info("failed to find a dnsdemux on network %s" % veg_service.backend_network_label,extra=o.tologdict())
+            else:
+                # Connect to dnsdemux using the instance's public address
+                for service in HpcService.objects.all():
+                    for slice in service.slices.all():
+                        if "dnsdemux" in slice.name:
+                            for instance in slice.instances.all():
+                                if dnsdemux_ip=="none":
+                                    try:
+                                        dnsdemux_ip = socket.gethostbyname(instance.node.name)
+                                    except:
+                                        pass
+                if not dnsdemux_ip:
+                    logger.info("failed to find a dnsdemux with a public address",extra=o.tologdict())
+
+            for prefix in CDNPrefix.objects.all():
+                cdn_prefixes.append(prefix.prefix)
+
+        dnsdemux_ip = dnsdemux_ip or "none"
+
+        # Broadbandshield can either be set up internally, using veg_service.bbs_slice,
+        # or it can be setup externally using veg_service.bbs_server.
+
+        bbs_addrs = []
+        if veg_service.bbs_slice:
+            if veg_service.backend_network_label:
+                for bbs_instance in veg_service.bbs_slice.instances.all():
+                    for ns in bbs_instance.ports.all():
+                        if ns.ip and ns.network.labels and (veg_service.backend_network_label in ns.network.labels):
+                            bbs_addrs.append(ns.ip)
+            else:
+                logger.info("unsupported configuration -- bbs_slice is set, but backend_network_label is not",extra=o.tologdict())
+            if not bbs_addrs:
+                logger.info("failed to find any usable addresses on bbs_slice",extra=o.tologdict())
+        elif veg_service.bbs_server:
+            bbs_addrs.append(veg_service.bbs_server)
+        else:
+            logger.info("neither bbs_slice nor bbs_server is configured in the vEG",extra=o.tologdict())
+
+        s_tags = []
+        c_tags = []
+        if o.volt:
+            s_tags.append(o.volt.s_tag)
+            c_tags.append(o.volt.c_tag)
+
+        try:
+            full_setup = Config().observer_full_setup
+        except:
+            full_setup = True
+
+        safe_macs=[]
+        if veg_service.url_filter_kind == "safebrowsing":
+            if o.volt and o.volt.subscriber:
+                for user in o.volt.subscriber.devices:
+                    level = user.get("level",None)
+                    mac = user.get("mac",None)
+                    if level in ["G", "PG"]:
+                        if mac:
+                            safe_macs.append(mac)
+
+
+        docker_opts = []
+        if veg_service.docker_insecure_registry:
+            reg_name = veg_service.docker_image_name.split("/",1)[0]
+            docker_opts.append("--insecure-registry " + reg_name)
+
+        fields = {"s_tags": s_tags,
+                "c_tags": c_tags,
+                "docker_remote_image_name": veg_service.docker_image_name,
+                "docker_local_image_name": veg_service.docker_image_name, # veg_service.docker_image_name.split("/",1)[1].split(":",1)[0],
+                "docker_opts": " ".join(docker_opts),
+                "dnsdemux_ip": dnsdemux_ip,
+                "cdn_prefixes": cdn_prefixes,
+                "bbs_addrs": bbs_addrs,
+                "full_setup": full_setup,
+                "isolation": o.instance.isolation,
+                "safe_browsing_macs": safe_macs,
+                "container_name": "veg-%s-%s" % (s_tags[0], c_tags[0]),
+                "dns_servers": [x.strip() for x in veg_service.dns_servers.split(",")],
+                "url_filter_kind": veg_service.url_filter_kind }
+
+        # add in the sync_attributes that come from the SubscriberRoot object
+
+        if o.volt and o.volt.subscriber and hasattr(o.volt.subscriber, "sync_attributes"):
+            for attribute_name in o.volt.subscriber.sync_attributes:
+                fields[attribute_name] = getattr(o.volt.subscriber, attribute_name)
+
+        return fields
+
+    def sync_fields(self, o, fields):
+        # the super causes the playbook to be run
+
+        super(SyncVEGTenant, self).sync_fields(o, fields)
+
+        # now do all of our broadbandshield stuff...
+
+        service = self.get_veg_service(o)
+        if not service:
+            # Ansible uses the service's keypair in order to SSH into the
+            # instance. It would be bad if the slice had no service.
+
+            raise Exception("Slice %s is not associated with a service" % instance.slice.name)
+
+        # Make sure the slice is configured properly
+        if (service != o.instance.slice.service):
+            raise Exception("Slice %s is associated with some service that is not %s" % (str(instance.slice), str(service)))
+
+        # only enable filtering if we have a subscriber object (see below)
+        url_filter_enable = False
+
+        # for attributes that come from CordSubscriberRoot
+        if o.volt and o.volt.subscriber:
+            url_filter_enable = o.volt.subscriber.url_filter_enable
+            url_filter_level = o.volt.subscriber.url_filter_level
+            url_filter_users = o.volt.subscriber.devices
+
+        if service.url_filter_kind == "broadbandshield":
+            # disable url_filter if there are no bbs_addrs
+            if url_filter_enable and (not fields.get("bbs_addrs",[])):
+                logger.info("disabling url_filter because there are no bbs_addrs",extra=o.tologdict())
+                url_filter_enable = False
+
+            if url_filter_enable:
+                bbs_hostname = None
+                if service.bbs_api_hostname and service.bbs_api_port:
+                    bbs_hostname = service.bbs_api_hostname
+                else:
+                    # TODO: extract from slice
+                    bbs_hostname = "cordcompute01.onlab.us"
+
+                if service.bbs_api_port:
+                    bbs_port = service.bbs_api_port
+                else:
+                    bbs_port = 8018
+
+                if not bbs_hostname:
+                    logger.info("broadbandshield is not configured",extra=o.tologdict())
+                else:
+                    tStart = time.time()
+                    bbs = BBS(o.bbs_account, "123", bbs_hostname, bbs_port)
+                    bbs.sync(url_filter_level, url_filter_users)
+
+                    if o.hpc_client_ip:
+                        logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip),extra=o.tologdict())
+                        bbs.associate(o.hpc_client_ip)
+                    else:
+                        logger.info("no hpc_client_ip to associate",extra=o.tologdict())
+
+                    logger.info("bbs update time %d" % int(time.time()-tStart),extra=o.tologdict())
+
+
+    def run_playbook(self, o, fields):
+        ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
+        quick_update = (o.last_ansible_hash == ansible_hash)
+
+        if ENABLE_QUICK_UPDATE and quick_update:
+            logger.info("quick_update triggered; skipping ansible recipe",extra=o.tologdict())
+        else:
+            if o.instance.isolation in ["container", "container_vm"]:
+                super(SyncVEGTenant, self).run_playbook(o, fields, "sync_vegtenant_new.yaml")
+            else:
+                if CORD_USE_VTN:
+                    super(SyncVEGTenant, self).run_playbook(o, fields, template_name="sync_vegtenant_vtn.yaml")
+                else:
+                    super(SyncVEGTenant, self).run_playbook(o, fields)
+
+        o.last_ansible_hash = ansible_hash
+
+    def delete_record(self, m):
+        pass
+
+    def handle_service_monitoringagentinfo_watch_notification(self, monitoring_agent_info):
+        if not monitoring_agent_info.service:
+            logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
+            return
+
+        if not monitoring_agent_info.target_uri:
+            logger.info("handle watch notifications for service monitoring agent info...ignoring because target_uri attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
+            return
+
+        objs = VEGTenant.get_tenant_objects().all()
+        for obj in objs:
+            if obj.provider_service.id != monitoring_agent_info.service.id:
+                logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is not matching" % (monitoring_agent_info))
+                return
+
+            instance = self.get_instance(obj)
+            if not instance:
+               logger.warn("handle watch notifications for service monitoring agent info...: No valid instance found for object %s" % (str(obj)))
+               return
+
+            logger.info("handling watch notification for monitoring agent info:%s for VEGTenant object:%s" % (monitoring_agent_info, obj))
+
+            #Run ansible playbook to update the routing table entries in the instance
+            fields = self.get_ansible_fields(instance)
+            fields["ansible_tag"] =  obj.__class__.__name__ + "_" + str(obj.id) + "_service_monitoring"
+            
+            #Parse the monitoring agent target_uri
+            url = urlparse(monitoring_agent_info.target_uri)
+
+            #Assuming target_uri is rabbitmq URI
+            fields["rabbit_user"] = url.username
+            fields["rabbit_password"] = url.password
+            fields["rabbit_host"] = url.hostname
+
+            template_name = "sync_monitoring_agent.yaml"
+            super(SyncVEGTenant, self).run_playbook(obj, fields, template_name)
+        pass
diff --git a/xos/synchronizer/steps/sync_vegtenant.yaml b/xos/synchronizer/steps/sync_vegtenant.yaml
new file mode 100644
index 0000000..eba2a97
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vegtenant.yaml
@@ -0,0 +1,179 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  vars:
+      cdn_enable: {{ cdn_enable }}
+      dnsdemux_ip: {{ dnsdemux_ip }}
+      firewall_enable: {{ firewall_enable }}
+      url_filter_enable: {{ url_filter_enable }}
+      c_tags:
+        {% for c_tag in c_tags %}
+        - {{ c_tag }}
+        {% endfor %}
+      s_tags:
+        {% for s_tag in s_tags %}
+        - {{ s_tag }}
+        {% endfor %}
+      firewall_rules:
+        {% for firewall_rule in firewall_rules.split("\n") %}
+        - {{ firewall_rule }}
+        {% endfor %}
+      cdn_prefixes:
+        {% for prefix in cdn_prefixes %}
+        - {{ prefix }}
+        {% endfor %}
+      bbs_addrs:
+        {% for bbs_addr in bbs_addrs %}
+        - {{ bbs_addr }}
+        {% endfor %}
+      dns_servers:
+        {% for dns_server in dns_servers %}
+        - {{ dns_server }}
+        {% endfor %}
+      nat_ip: {{ nat_ip }}
+      nat_mac: {{ nat_mac }}
+      lan_ip: {{ lan_ip }}
+      lan_mac: {{ lan_mac }}
+      wan_ip: {{ wan_ip }}
+      wan_mac: {{ wan_mac }}
+      wan_container_mac: {{ wan_container_mac }}
+      wan_next_hop: 10.0.1.253   # FIX ME
+      private_ip: {{ private_ip }}
+      private_mac: {{ private_mac }}
+      hpc_client_ip: {{ hpc_client_ip }}
+      hpc_client_mac: {{ hpc_client_mac }}
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+      safe_browsing:
+        {% for mac in safe_browsing_macs %}
+        - {{ mac }}
+        {% endfor %}
+      uplink_speed: {{ uplink_speed }}
+      downlink_speed: {{ downlink_speed }}
+      status: {{ status }}
+      enable_uverse: {{ enable_uverse }}
+      url_filter_kind: {{ url_filter_kind }}
+
+  tasks:
+{% if full_setup %}
+  - name: Docker repository
+    copy: src=/opt/xos/synchronizers/veg/files/docker.list
+      dest=/etc/apt/sources.list.d/docker.list
+
+  - name: Import the repository key
+    apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
+
+  - name: install Docker
+    apt: name=lxc-docker state=present update_cache=yes
+
+  - name: install python-setuptools
+    apt: name=python-setuptools state=present
+
+  - name: install pip
+    easy_install: name=pip
+
+  - name: install docker-py
+    pip: name=docker-py version=0.5.3
+
+  - name: install Pipework
+    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
+       dest=/usr/local/bin/pipework
+       mode=0755
+
+  - name: make sure /etc/dnsmasq.d exists
+    file: path=/etc/dnsmasq.d state=directory owner=root group=root
+
+  - name: Disable resolvconf service
+    shell: service resolvconf stop
+    shell: echo manual > /etc/init/resolvconf.override
+    shell: rm -f /etc/resolv.conf
+
+  - name: Install resolv.conf
+    copy: src=/opt/xos/synchronizers/veg/files/vm-resolv.conf
+      dest=/etc/resolv.conf
+
+  - name: Verify if veg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+    register: cron_job_pids_count
+
+#  - name: DEBUG
+#    debug: var=cron_job_pids_count.stdout
+
+#  - name: make sure ~/bin exists
+#    file: path=~/bin state=directory owner=root group=root
+#    when: cron_job_pids_count.stdout == "0"
+
+  - name: Copy cron job to destination
+    copy: src=/opt/xos/synchronizers/veg/veg_stats_notifier.py
+      dest=/usr/local/sbin/veg_stats_notifier.py
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Initiate veg_stats_notifier cron job
+    command: sudo python /usr/local/sbin/veg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vegservice_rabbit_exchange='vegservice'
+    async: 9999999999999999
+    poll: 0
+    when: cron_job_pids_count.stdout == "0"
+{% endif %}
+
+  - name: vEG upstart
+    template: src=/opt/xos/synchronizers/veg/templates/veg.conf.j2 dest=/etc/init/veg-{{ s_tags[0] }}-{{ c_tags[0] }}.conf
+
+  - name: vEG startup script
+    template: src=/opt/xos/synchronizers/veg/templates/start-veg.sh.j2 dest=/usr/local/sbin/start-veg-{{ s_tags[0] }}-{{ c_tags[0] }}.sh mode=0755
+    notify:
+#    - restart veg
+     - stop veg
+     - remove container
+     - start veg
+
+  - name: create /etc/veg-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d
+    file: path=/etc/veg-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d state=directory owner=root group=root
+
+  - name: vEG basic dnsmasq config
+    copy: src=/opt/xos/synchronizers/veg/files/veg.dnsmasq dest=/etc/veg-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/veg.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: dnsmasq config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_servers.j2 dest=/etc/veg-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+# These are samples, not necessary for correct function of demo
+
+#  - name: networking info
+#    template: src=/opt/xos/synchronizers/veg/templates/vlan_sample.j2 dest=/etc/vlan_sample owner=root group=root
+
+#  - name: firewall info
+#    template: src=/opt/xos/synchronizers/veg/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
+
+  - name: Make sure vEG service is running
+    service: name=veg-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
+
+  handlers:
+  # Dnsmasq is automatically restarted in the container
+  - name: restart dnsmasq
+    shell: docker exec veg-{{ s_tags[0] }}-{{ c_tags[0] }} killall dnsmasq
+
+  - name: restart veg
+    shell: service veg-{{ s_tags[0] }}-{{ c_tags[0] }} stop; sleep 1; service veg-{{ s_tags[0] }}-{{ c_tags[0] }} start
+
+  - name: stop veg
+    service: name=veg-{{ s_tags[0] }}-{{ c_tags[0] }} state=stopped
+
+  - name: remove container
+    docker: name=veg-{{ s_tags[0] }}-{{ c_tags[0] }} state=absent image=docker-veg
+
+  - name: start veg
+    service: name=veg-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
+
diff --git a/xos/synchronizer/steps/sync_vegtenant_new.yaml b/xos/synchronizer/steps/sync_vegtenant_new.yaml
new file mode 100644
index 0000000..daa30f8
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vegtenant_new.yaml
@@ -0,0 +1,136 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+  vars:
+      container_name: {{ container_name }}
+      cdn_enable: {{ cdn_enable }}
+      dnsdemux_ip: {{ dnsdemux_ip }}
+      firewall_enable: {{ firewall_enable }}
+      url_filter_enable: {{ url_filter_enable }}
+      c_tags:
+        {% for c_tag in c_tags %}
+        - {{ c_tag }}
+        {% endfor %}
+      s_tags:
+        {% for s_tag in s_tags %}
+        - {{ s_tag }}
+        {% endfor %}
+      firewall_rules:
+        {% for firewall_rule in firewall_rules.split("\n") %}
+        - {{ firewall_rule }}
+        {% endfor %}
+      cdn_prefixes:
+        {% for prefix in cdn_prefixes %}
+        - {{ prefix }}
+        {% endfor %}
+      bbs_addrs:
+        {% for bbs_addr in bbs_addrs %}
+        - {{ bbs_addr }}
+        {% endfor %}
+      dns_servers:
+        {% for dns_server in dns_servers %}
+        - {{ dns_server }}
+        {% endfor %}
+      nat_ip: {{ nat_ip }}
+      nat_mac: {{ nat_mac }}
+      lan_ip: {{ lan_ip }}
+      lan_mac: {{ lan_mac }}
+      wan_ip: {{ wan_ip }}
+      wan_mac: {{ wan_mac }}
+      wan_container_mac: {{ wan_container_mac }}
+      wan_next_hop: 10.0.1.253   # FIX ME
+      private_ip: {{ private_ip }}
+      private_mac: {{ private_mac }}
+      hpc_client_ip: {{ hpc_client_ip }}
+      hpc_client_mac: {{ hpc_client_mac }}
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+      safe_browsing:
+        {% for mac in safe_browsing_macs %}
+        - {{ mac }}
+        {% endfor %}
+      uplink_speed: {{ uplink_speed }}
+      downlink_speed: {{ downlink_speed }}
+      status: {{ status }}
+      enable_uverse: {{ enable_uverse }}
+      url_filter_kind: {{ url_filter_kind }}
+
+  tasks:
+  - name: Verify if veg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+    register: cron_job_pids_count
+
+#  - name: DEBUG
+#    debug: var=cron_job_pids_count.stdout
+
+  - name: make sure ~/bin exists
+    file: path=~/bin state=directory owner=root group=root
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Copy cron job to destination
+    copy: src=/opt/xos/synchronizers/veg/veg_stats_notifier.py
+      dest=~/bin/veg_stats_notifier.py
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Initiate veg_stats_notifier cron job
+    command: python ~/bin/veg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vegservice_rabbit_exchange='vegservice'
+    async: 9999999999999999
+    poll: 0
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: vEG basic dnsmasq config
+    copy: src=/opt/xos/synchronizers/veg/files/veg.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/veg.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: dnsmasq config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: create directory for "safe" config
+    file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory
+
+  - name: dnsmasq "safe" config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: copy base ufw files
+    synchronize: src=/opt/xos/synchronizers/veg/files/etc/ufw/ dest=/var/container_volumes/{{ container_name }}/etc/ufw/
+    notify:
+    - reload ufw
+
+  - name: redirection rules for safe DNS
+    template: src=/opt/xos/synchronizers/veg/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/etc/ufw/before.rules owner=root group=root
+    notify:
+    - reload ufw
+
+  - name: base ufw setup uses /etc/rc.local
+    copy: src=/opt/xos/synchronizers/veg/files/etc/rc.local dest=/var/container_volumes/{{ container_name }}/etc/ owner=root group=root
+    notify:
+    - copy in /etc/rc.local
+
+  handlers:
+  # Dnsmasq is automatically restarted in the container
+  - name: restart dnsmasq
+    shell: docker exec {{ container_name }} /usr/bin/killall dnsmasq
+
+  - name: reload ufw
+    shell: docker exec {{ container_name }} bash -c "/sbin/iptables -t nat -F PREROUTING; /usr/sbin/ufw reload"
+
+  # Use docker cp instead of single-file volume
+  # The reason is that changes to external file volume don't show up inside the container
+  # Probably Ansible deletes and then recreates the external file, and container has old version
+  - name: copy in /etc/rc.local
+    shell: docker cp /var/container_volumes/{{ container_name }}/etc/rc.local {{ container_name }}:/etc/
diff --git a/xos/synchronizer/steps/sync_vegtenant_vtn.yaml b/xos/synchronizer/steps/sync_vegtenant_vtn.yaml
new file mode 100644
index 0000000..fed64ab
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vegtenant_vtn.yaml
@@ -0,0 +1,255 @@
+---
+- hosts: {{ instance_name }}
+  #gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  vars:
+      container_name: {{ container_name }}
+      cdn_enable: {{ cdn_enable }}
+      dnsdemux_ip: {{ dnsdemux_ip }}
+      firewall_enable: {{ firewall_enable }}
+      url_filter_enable: {{ url_filter_enable }}
+      docker_remote_image_name: {{ docker_remote_image_name }}
+      docker_local_image_name: {{ docker_local_image_name }}
+      docker_opts: {{ docker_opts }}
+      c_tags:
+        {% for c_tag in c_tags %}
+        - {{ c_tag }}
+        {% endfor %}
+      s_tags:
+        {% for s_tag in s_tags %}
+        - {{ s_tag }}
+        {% endfor %}
+      firewall_rules:
+        {% for firewall_rule in firewall_rules.split("\n") %}
+        - {{ firewall_rule }}
+        {% endfor %}
+      cdn_prefixes:
+        {% for prefix in cdn_prefixes %}
+        - {{ prefix }}
+        {% endfor %}
+      bbs_addrs:
+        {% for bbs_addr in bbs_addrs %}
+        - {{ bbs_addr }}
+        {% endfor %}
+      dns_servers:
+        {% for dns_server in dns_servers %}
+        - {{ dns_server }}
+        {% endfor %}
+      nat_ip: {{ nat_ip }}
+      nat_mac: {{ nat_mac }}
+      lan_ip: {{ lan_ip }}
+      lan_mac: {{ lan_mac }}
+      wan_ip: {{ wan_ip }}
+      wan_mac: {{ wan_mac }}
+      wan_container_ip: {{ wan_container_ip }}
+      wan_container_netbits: {{ wan_container_netbits }}
+      wan_container_mac: {{ wan_container_mac }}
+      wan_container_gateway_ip: {{ wan_container_gateway_ip }}
+      wan_vm_ip: {{ wan_vm_ip }}
+      wan_vm_mac: {{ wan_vm_mac }}
+      wan_next_hop: 10.0.1.253   # FIX ME
+      private_ip: {{ private_ip }}
+      private_mac: {{ private_mac }}
+      hpc_client_ip: {{ hpc_client_ip }}
+      hpc_client_mac: {{ hpc_client_mac }}
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+      safe_browsing:
+        {% for mac in safe_browsing_macs %}
+        - {{ mac }}
+        {% endfor %}
+      uplink_speed: {{ uplink_speed }}
+      downlink_speed: {{ downlink_speed }}
+      status: {{ status }}
+      enable_uverse: {{ enable_uverse }}
+      url_filter_kind: {{ url_filter_kind }}
+
+
+  tasks:
+  - name: Add hostname to /etc/hosts
+    lineinfile: dest=/etc/hosts
+      regexp='^127\.0\.0\.1'
+      line="127.0.0.1 localhost {{ '{{' }} ansible_hostname {{ '}}' }}"
+      owner=root group=root mode=0644
+
+  - name: Verify that bridge-utils is installed
+    shell: stat /sbin/brctl
+
+  - name: Verify that docker is installed
+    shell: stat /usr/bin/docker
+
+  - name: Check to see if network is setup
+    stat: path=/root/network_is_setup
+    register: network_is_setup
+
+  - name: set up the network
+    shell: "{{ '{{' }} item {{ '}}' }}"
+    with_items:
+       - ip link del link eth0 eth0.500 || true
+       - ip link add link eth0 eth0.500 type vlan id 500
+       - ip link set eth0.500 up
+       - ifconfig br-wan down || true
+       - brctl delbr br-wan || true
+       - brctl addbr br-wan
+       - brctl addif br-wan eth0.500
+       - ifconfig br-wan hw ether {{ wan_vm_mac }}
+       - ip addr add {{ wan_vm_ip }}/{{ wan_container_netbits }} dev br-wan
+       - ip link set br-wan up
+       - ip route del default || true
+       - ip route add default via {{ wan_container_gateway_ip }}
+       - ip link set dev br-wan promisc on
+    when: network_is_setup.stat.exists == False
+
+  - name: Remember that the network is setup, so we never do the above again
+    shell: touch /root/network_is_setup
+
+{% if full_setup %}
+  - name: Check to see if environment is setup
+    stat: path=/root/environment_is_setup
+    register: environment_is_setup
+
+# Everything here is now baked into the vEG image
+# Leave this spot in place for future temporary setup stuff
+
+  - name: Remember that the environment is setup, so we never do the above again
+    shell: touch /root/environment_is_setup
+
+  - name: Verify if veg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+    register: cron_job_pids_count
+
+#  - name: DEBUG
+#    debug: var=cron_job_pids_count.stdout
+
+#  - name: make sure ~/bin exists
+#    file: path=~/bin state=directory owner=root group=root
+#    when: cron_job_pids_count.stdout == "0"
+
+#  - name: Copy cron job to destination
+#    copy: src=/opt/xos/synchronizers/veg/veg_stats_notifier.py
+#      dest=/usr/local/sbin/veg_stats_notifier.py
+#    when: cron_job_pids_count.stdout == "0"
+
+#  - name: install python-kombu
+#    apt: name=python-kombu state=present
+#    when: cron_job_pids_count.stdout == "0"
+
+#  - name: Initiate veg_stats_notifier cron job
+#    command: sudo python /usr/local/sbin/veg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vegservice_rabbit_exchange='vegservice'
+#    async: 9999999999999999
+#    poll: 0
+#    when: cron_job_pids_count.stdout == "0"
+{% endif %}
+
+  - name: Set docker options
+    template: src=/opt/xos/synchronizers/veg/templates/docker.j2 dest=/etc/default/docker
+    notify:
+     - restart docker
+
+  - name: vEG upstart
+    template: src=/opt/xos/synchronizers/veg/templates/veg.conf.j2 dest=/etc/init/{{ container_name }}.conf
+
+  - name: vEG startup script
+    template: src=/opt/xos/synchronizers/veg/templates/start-veg-vtn.sh.j2 dest=/usr/local/sbin/start-{{ container_name }}.sh mode=0755
+    notify:
+#    - restart veg
+     - stop veg
+     - remove container
+     - start veg
+
+  - name: create /var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/
+    file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory owner=root group=root
+
+  - name: vEG basic dnsmasq config
+    copy: src=/opt/xos/synchronizers/veg/files/veg.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/veg.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: dnsmasq config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: dnsmasq "safe" config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: create /var/container_volumes/{{ container_name }}/mount/
+    file: path=/var/container_volumes/{{ container_name }}/mount state=directory owner=root group=root
+
+  - name: redirection rules for safe DNS
+    template: src=/opt/xos/synchronizers/veg/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/mount/before.rules owner=root group=root mode=0644
+    notify:
+    - reload ufw
+
+  - name: base ufw setup uses /etc/rc.local
+    template: src=/opt/xos/synchronizers/veg/templates/rc.local.j2 dest=/var/container_volumes/{{ container_name }}/mount/rc.local owner=root group=root mode=0755
+    notify:
+    - rerun /etc/rc.local
+
+  - name: create directory for local programs
+    file: path=/var/container_volumes/{{ container_name }}/usr/local/sbin state=directory
+
+  - name: bandwidth limit script
+    template: src=/opt/xos/synchronizers/veg/templates/bwlimit.sh.j2 dest=/var/container_volumes/{{ container_name }}/usr/local/sbin/bwlimit.sh owner=root group=root mode=0755
+    notify:
+    - reset bwlimits
+
+  - name: create directory for simple webserver
+    file: path=/var/container_volumes/{{ container_name }}/etc/service/message state=directory
+
+  - name: copy simple webserver
+    copy: src=/opt/xos/synchronizers/veg/files/etc/service/ dest=/var/container_volumes/{{ container_name }}/etc/service/ owner=root group=root
+    when: status != "enabled"
+
+  - name: make webserver script executable
+    file: path=/var/container_volumes/{{ container_name }}/etc/service/message/run mode=0755
+    when: status != "enabled"
+
+  - name: generate the message page
+    template: src=/opt/xos/synchronizers/veg/templates/message.html.j2 dest=/var/container_volumes/{{ container_name }}/etc/service/message/message.html owner=root group=root mode=0644
+    when: status != "enabled"
+    #notify: restart veg
+
+  - name: remove simple webserver
+    file: path=/var/container_volumes/{{ container_name }}/etc/service/message/run state=absent
+    when: status == "enabled"
+    #notify: restart veg
+
+  - name: Make sure vEG service is running
+    service: name={{ container_name }} state=started
+
+  handlers:
+  # Dnsmasq is automatically restarted in the container
+  - name: restart dnsmasq
+    shell: docker exec {{ container_name }} killall dnsmasq
+
+  - name: stop veg
+    service: name={{ container_name }} state=stopped
+
+  - name: remove container
+    docker: name={{ container_name }} state=absent image=docker-veg
+
+  - name: start veg
+    service: name={{ container_name }} state=started
+
+  - name: reload ufw
+    shell: docker exec {{ container_name }} bash -c "/sbin/iptables -t nat -F PREROUTING; /sbin/iptables -t nat -F POSTROUTING; /usr/sbin/ufw reload"
+
+  - name: rerun /etc/rc.local
+    shell: docker exec {{ container_name }} bash -c "/etc/rc.local"
+
+  - name: reset bwlimits
+    shell: docker exec {{ container_name }} bash -c "/usr/local/sbin/bwlimit.sh restart"
+
+  - name: restart veg
+    shell: service {{ container_name }} stop; sleep 1; service {{ container_name }} start
+
+  - name: restart docker
+    shell: service docker restart
diff --git a/xos/synchronizer/steps/test.yaml b/xos/synchronizer/steps/test.yaml
new file mode 100644
index 0000000..fc8251d
--- /dev/null
+++ b/xos/synchronizer/steps/test.yaml
@@ -0,0 +1,7 @@
+---
+- hosts: {{ instance_name }}
+  connection: ssh
+  user: ubuntu
+  tasks:
+    - name: foobar
+      shell: echo foo > /tmp/foobar