Adding first implementation of vEG service

Change-Id: I2feeccd98e6e8932fe4552c575915d2f20fe5176
diff --git a/xos/admin.py b/xos/admin.py
new file mode 100644
index 0000000..ebe6353
--- /dev/null
+++ b/xos/admin.py
@@ -0,0 +1,158 @@
+from django.contrib import admin
+
+from services.veg.models import *
+from django import forms
+from django.utils.safestring import mark_safe
+from django.contrib.auth.admin import UserAdmin
+from django.contrib.admin.widgets import FilteredSelectMultiple
+from django.contrib.auth.forms import ReadOnlyPasswordHashField
+from django.contrib.auth.signals import user_logged_in
+from django.utils import timezone
+from django.contrib.contenttypes import generic
+from suit.widgets import LinkedSelect
+from core.admin import ServiceAppAdmin,SliceInline,ServiceAttrAsTabInline, ReadOnlyAwareAdmin, XOSTabularInline, ServicePrivilegeInline, TenantRootTenantInline, TenantRootPrivilegeInline
+from core.middleware import get_request
+
+from functools import update_wrapper
+from django.contrib.admin.views.main import ChangeList
+from django.core.urlresolvers import reverse
+from django.contrib.admin.utils import quote
+
+#-----------------------------------------------------------------------------
+# vEG
+#-----------------------------------------------------------------------------
+
+class VEGServiceForm(forms.ModelForm):
+    bbs_api_hostname = forms.CharField(required=False)
+    bbs_api_port = forms.IntegerField(required=False)
+    bbs_server = forms.CharField(required=False)
+    backend_network_label = forms.CharField(required=False)
+    bbs_slice = forms.ModelChoiceField(queryset=Slice.objects.all(), required=False)
+    dns_servers = forms.CharField(required=False)
+    url_filter_kind = forms.ChoiceField(choices=VEGService.URL_FILTER_KIND_CHOICES, required=False)
+    node_label = forms.CharField(required=False)
+    docker_image_name = forms.CharField(required=False)
+    docker_insecure_registry = forms.BooleanField(required=False)
+
+    def __init__(self,*args,**kwargs):
+        super (VEGServiceForm,self ).__init__(*args,**kwargs)
+        if self.instance:
+            self.fields['bbs_api_hostname'].initial = self.instance.bbs_api_hostname
+            self.fields['bbs_api_port'].initial = self.instance.bbs_api_port
+            self.fields['bbs_server'].initial = self.instance.bbs_server
+            self.fields['backend_network_label'].initial = self.instance.backend_network_label
+            self.fields['bbs_slice'].initial = self.instance.bbs_slice
+            self.fields['dns_servers'].initial = self.instance.dns_servers
+            self.fields['url_filter_kind']. initial = self.instance.url_filter_kind
+            self.fields['node_label'].initial = self.instance.node_label
+            self.fields['docker_image_name'].initial = self.instance.docker_image_name
+            self.fields['docker_insecure_registry'].initial = self.instance.docker_insecure_registry
+
+    def save(self, commit=True):
+        self.instance.bbs_api_hostname = self.cleaned_data.get("bbs_api_hostname")
+        self.instance.bbs_api_port = self.cleaned_data.get("bbs_api_port")
+        self.instance.bbs_server = self.cleaned_data.get("bbs_server")
+        self.instance.backend_network_label = self.cleaned_data.get("backend_network_label")
+        self.instance.bbs_slice = self.cleaned_data.get("bbs_slice")
+        self.instance.dns_servers = self.cleaned_data.get("dns_servers")
+        self.instance.url_filter_kind = self.cleaned_data.get("url_filter_kind")
+        self.instance.node_label = self.cleaned_data.get("node_label")
+        self.instance.docker_image_name = self.cleaned_data.get("docker_image_name")
+        self.instance.docker_insecure_registry = self.cleaned_data.get("docker_insecure_registry")
+        return super(VEGServiceForm, self).save(commit=commit)
+
+    class Meta:
+        model = VEGService
+        fields = '__all__'
+
+class VEGServiceAdmin(ReadOnlyAwareAdmin):
+    model = VEGService
+    verbose_name = "vEG Service"
+    verbose_name_plural = "vEG Service"
+    list_display = ("backend_status_icon", "name", "enabled")
+    list_display_links = ('backend_status_icon', 'name', )
+    fieldsets = [(None,             {'fields': ['backend_status_text', 'name','enabled','versionNumber', 'description', "view_url", "icon_url", "service_specific_attribute", "node_label"],
+                                     'classes':['suit-tab suit-tab-general']}),
+                 ("backend config", {'fields': [ "backend_network_label", "url_filter_kind", "bbs_api_hostname", "bbs_api_port", "bbs_server", "bbs_slice"],
+                                     'classes':['suit-tab suit-tab-backend']}),
+                 ("vEG config", {'fields': ["dns_servers", "docker_image_name", "docker_insecure_registry"],
+                                     'classes':['suit-tab suit-tab-veg']}) ]
+    readonly_fields = ('backend_status_text', "service_specific_attribute")
+    inlines = [SliceInline,ServiceAttrAsTabInline,ServicePrivilegeInline]
+    form = VEGServiceForm
+
+    extracontext_registered_admins = True
+
+    user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
+
+    suit_form_tabs =(('general', 'Service Details'),
+        ('backend', 'Backend Config'),
+        ('veg', 'vEG Config'),
+        ('administration', 'Administration'),
+        #('tools', 'Tools'),
+        ('slices','Slices'),
+        ('serviceattrs','Additional Attributes'),
+        ('serviceprivileges','Privileges') ,
+    )
+
+    suit_form_includes = (('vegadmin.html', 'top', 'administration'),
+                           ) #('hpctools.html', 'top', 'tools') )
+
+    def get_queryset(self, request):
+        return VEGService.get_service_objects_by_user(request.user)
+
+class VEGTenantForm(forms.ModelForm):
+    bbs_account = forms.CharField(required=False)
+    creator = forms.ModelChoiceField(queryset=User.objects.all())
+    instance = forms.ModelChoiceField(queryset=Instance.objects.all(),required=False)
+    last_ansible_hash = forms.CharField(required=False)
+    wan_container_ip = forms.CharField(required=False)
+    wan_container_mac = forms.CharField(required=False)
+
+    def __init__(self,*args,**kwargs):
+        super (VEGTenantForm,self ).__init__(*args,**kwargs)
+        self.fields['kind'].widget.attrs['readonly'] = True
+        self.fields['provider_service'].queryset = VEGService.get_service_objects().all()
+        if self.instance:
+            # fields for the attributes
+            self.fields['bbs_account'].initial = self.instance.bbs_account
+            self.fields['creator'].initial = self.instance.creator
+            self.fields['instance'].initial = self.instance.instance
+            self.fields['last_ansible_hash'].initial = self.instance.last_ansible_hash
+            self.fields['wan_container_ip'].initial = self.instance.wan_container_ip
+            self.fields['wan_container_mac'].initial = self.instance.wan_container_mac
+        if (not self.instance) or (not self.instance.pk):
+            # default fields for an 'add' form
+            self.fields['kind'].initial = VEG_KIND
+            self.fields['creator'].initial = get_request().user
+            if VEGService.get_service_objects().exists():
+               self.fields["provider_service"].initial = VEGService.get_service_objects().all()[0]
+
+    def save(self, commit=True):
+        self.instance.creator = self.cleaned_data.get("creator")
+        self.instance.instance = self.cleaned_data.get("instance")
+        self.instance.last_ansible_hash = self.cleaned_data.get("last_ansible_hash")
+        return super(VEGTenantForm, self).save(commit=commit)
+
+    class Meta:
+        model = VEGTenant
+        fields = '__all__'
+
+class VEGTenantAdmin(ReadOnlyAwareAdmin):
+    list_display = ('backend_status_icon', 'id', 'subscriber_tenant' )
+    list_display_links = ('backend_status_icon', 'id')
+    fieldsets = [ (None, {'fields': ['backend_status_text', 'kind', 'provider_service', 'subscriber_tenant', 'service_specific_id', # 'service_specific_attribute',
+                                     'wan_container_ip', 'wan_container_mac', 'bbs_account', 'creator', 'instance', 'last_ansible_hash'],
+                          'classes':['suit-tab suit-tab-general']})]
+    readonly_fields = ('backend_status_text', 'service_specific_attribute', 'bbs_account', 'wan_container_ip', 'wan_container_mac')
+    form = VEGTenantForm
+
+    suit_form_tabs = (('general','Details'),)
+
+    def get_queryset(self, request):
+        return VEGTenant.get_tenant_objects_by_user(request.user)
+
+
+admin.site.register(VEGService, VEGServiceAdmin)
+admin.site.register(VEGTenant, VEGTenantAdmin)
+
diff --git a/xos/api/service/veg/vegservice.py b/xos/api/service/veg/vegservice.py
new file mode 100644
index 0000000..4d35de8
--- /dev/null
+++ b/xos/api/service/veg/vegservice.py
@@ -0,0 +1,78 @@
+from rest_framework.decorators import api_view
+from rest_framework.response import Response
+from rest_framework.reverse import reverse
+from rest_framework import serializers
+from rest_framework import generics
+from rest_framework import viewsets
+from rest_framework import status
+from rest_framework.decorators import detail_route, list_route
+from rest_framework.views import APIView
+from core.models import *
+from django.forms import widgets
+from django.conf.urls import patterns, url
+from services.veg.models import VEGService
+from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
+from django.shortcuts import get_object_or_404
+from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
+from xos.exceptions import *
+import json
+import subprocess
+from django.views.decorators.csrf import ensure_csrf_cookie
+
+class VEGServiceForApi(VEGService):
+    class Meta:
+        proxy = True
+        app_label = "cord"
+
+    def __init__(self, *args, **kwargs):
+        super(VEGServiceForApi, self).__init__(*args, **kwargs)
+
+    def save(self, *args, **kwargs):
+        super(VEGServiceForApi, self).save(*args, **kwargs)
+
+    def __init__(self, *args, **kwargs):
+        super(VEGService, self).__init__(*args, **kwargs)
+
+class VEGServiceSerializer(PlusModelSerializer):
+        id = ReadOnlyField()
+        humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
+        wan_container_gateway_ip = serializers.CharField(required=False)
+        wan_container_gateway_mac = serializers.CharField(required=False)
+        dns_servers = serializers.CharField(required=False)
+        url_filter_kind = serializers.CharField(required=False)
+        node_label = serializers.CharField(required=False)
+
+        class Meta:
+            model = VEGServiceForApi
+            fields = ('humanReadableName',
+                      'id',
+                      'wan_container_gateway_ip',
+                      'wan_container_gateway_mac',
+                      'dns_servers',
+                      'url_filter_kind',
+                      'node_label')
+
+        def getHumanReadableName(self, obj):
+            return obj.__unicode__()
+
+# @ensure_csrf_cookie
+class VEGServiceViewSet(XOSViewSet):
+    base_name = "VEGservice"
+    method_name = None # use the api endpoint /api/service/veg/
+    method_kind = "viewset"
+    queryset = VEGService.get_service_objects().select_related().all()
+    serializer_class = VEGServiceSerializer
+
+    @classmethod
+    def get_urlpatterns(self, api_path="^"):
+        patterns = super(VEGServiceViewSet, self).get_urlpatterns(api_path=api_path)
+
+        return patterns
+
+    def list(self, request):
+        object_list = self.filter_queryset(self.get_queryset())
+
+        serializer = self.get_serializer(object_list, many=True)
+
+        return Response(serializer.data)
+
diff --git a/xos/api/tenant/cord/veg.py b/xos/api/tenant/cord/veg.py
new file mode 100644
index 0000000..9415829
--- /dev/null
+++ b/xos/api/tenant/cord/veg.py
@@ -0,0 +1,62 @@
+from rest_framework.decorators import api_view
+from rest_framework.response import Response
+from rest_framework.reverse import reverse
+from rest_framework import serializers
+from rest_framework import generics
+from rest_framework import status
+from core.models import *
+from django.forms import widgets
+from services.veg.models import VEGTenant, VEGService
+from xos.apibase import XOSListCreateAPIView, XOSRetrieveUpdateDestroyAPIView, XOSPermissionDenied
+from api.xosapi_helpers import PlusModelSerializer, XOSViewSet, ReadOnlyField
+
+def get_default_veg_service():
+    VEG_services = VEGService.get_service_objects().all()
+    if VEG_services:
+        return VEG_services[0].id
+    return None
+
+class VEGTenantForAPI(VEGTenant):
+    class Meta:
+        proxy = True
+        app_label = "cord"
+
+    @property
+    def related(self):
+        related = {}
+        if self.instance:
+            related["instance_id"] = self.instance.id
+        return related
+
+class VEGTenantSerializer(PlusModelSerializer):
+    id = ReadOnlyField()
+    wan_container_ip = serializers.CharField()
+    wan_container_mac = ReadOnlyField()
+    related = serializers.DictField(required=False)
+
+    humanReadableName = serializers.SerializerMethodField("getHumanReadableName")
+    class Meta:
+        model = VEGTenantForAPI
+        fields = ('humanReadableName', 'id', 'wan_container_ip', 'wan_container_mac', 'related' )
+
+    def getHumanReadableName(self, obj):
+        return obj.__unicode__()
+
+class VEGTenantViewSet(XOSViewSet):
+    base_name = "veg"
+    method_name = "veg"
+    method_kind = "viewset"
+    queryset = VEGTenantForAPI.get_tenant_objects().all()
+    serializer_class = VEGTenantSerializer
+
+    @classmethod
+    def get_urlpatterns(self, api_path="^"):
+        patterns = super(VEGTenantViewSet, self).get_urlpatterns(api_path=api_path)
+
+        return patterns
+
+
+
+
+
+
diff --git a/xos/macros.m4 b/xos/macros.m4
new file mode 100644
index 0000000..1f48f10
--- /dev/null
+++ b/xos/macros.m4
@@ -0,0 +1,84 @@
+# Note: Tosca derived_from isn't working the way I think it should, it's not
+#    inheriting from the parent template. Until we get that figured out, use
+#    m4 macros do our inheritance
+
+define(xos_base_props,
+            no-delete:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to delete this object
+            no-create:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to create this object
+            no-update:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to update this object
+            replaces:
+                type: string
+                required: false
+                descrption: Replaces/renames this object)
+# Service
+define(xos_base_service_caps,
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service)
+define(xos_base_service_props,
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.)
+# Subscriber
+define(xos_base_subscriber_caps,
+            subscriber:
+                type: tosca.capabilities.xos.Subscriber)
+define(xos_base_subscriber_props,
+            kind:
+                type: string
+                default: generic
+                description: Kind of subscriber
+            service_specific_id:
+                type: string
+                required: false
+                description: Service specific ID opaque to XOS but meaningful to service)
+define(xos_base_tenant_props,
+            kind:
+                type: string
+                default: generic
+                description: Kind of tenant
+            service_specific_id:
+                type: string
+                required: false
+                description: Service specific ID opaque to XOS but meaningful to service)
+
+# end m4 macros
+
diff --git a/xos/models.py b/xos/models.py
new file mode 100644
index 0000000..6cb81b1
--- /dev/null
+++ b/xos/models.py
@@ -0,0 +1,450 @@
+from django.db import models
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, Port, AddressPool, User
+from core.models.plcorebase import StrippedCharField
+import os
+from django.db import models, transaction
+from django.forms.models import model_to_dict
+from django.db.models import Q
+from operator import itemgetter, attrgetter, methodcaller
+from core.models import Tag
+from core.models.service import LeastLoadedNodeScheduler
+from services.vrouter.models import VRouterService, VRouterTenant
+import traceback
+from xos.exceptions import *
+from xos.config import Config
+
+class ConfigurationError(Exception):
+    pass
+
+VEG_KIND = "vEG"
+CORD_SUBSCRIBER_KIND = "CordSubscriberRoot"
+
+CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
+
+# -------------------------------------------
+# VEG
+# -------------------------------------------
+
+class VEGService(Service):
+    KIND = VEG_KIND
+
+    URL_FILTER_KIND_CHOICES = ( (None, "None"), ("safebrowsing", "Safe Browsing"), ("answerx", "AnswerX") )
+
+    simple_attributes = ( ("bbs_api_hostname", None),
+                          ("bbs_api_port", None),
+                          ("bbs_server", None),
+                          ("backend_network_label", "hpc_client"),
+                          ("dns_servers", "8.8.8.8"),
+                          ("url_filter_kind", None),
+                          ("node_label", None),
+                          ("docker_image_name", "docker.io/xosproject/veg"),
+                          ("docker_insecure_registry", False) )
+
+    def __init__(self, *args, **kwargs):
+        super(VEGService, self).__init__(*args, **kwargs)
+
+    class Meta:
+        app_label = "veg"
+        verbose_name = "vEG Service"
+        proxy = True
+
+    def allocate_bbs_account(self):
+        vegs = VEGTenant.get_tenant_objects().all()
+        bbs_accounts = [veg.bbs_account for veg in vegs]
+
+        # There's a bit of a race here; some other user could be trying to
+        # allocate a bbs_account at the same time we are.
+
+        for i in range(2,21):
+             account_name = "bbs%02d@onlab.us" % i
+             if (account_name not in bbs_accounts):
+                 return account_name
+
+        raise XOSConfigurationError("We've run out of available broadbandshield accounts. Delete some veg and try again.")
+
+    @property
+    def bbs_slice(self):
+        bbs_slice_id=self.get_attribute("bbs_slice_id")
+        if not bbs_slice_id:
+            return None
+        bbs_slices=Slice.objects.filter(id=bbs_slice_id)
+        if not bbs_slices:
+            return None
+        return bbs_slices[0]
+
+    @bbs_slice.setter
+    def bbs_slice(self, value):
+        if value:
+            value = value.id
+        self.set_attribute("bbs_slice_id", value)
+
+VEGService.setup_simple_attributes()
+
+class VEGTenant(TenantWithContainer):
+    class Meta:
+        proxy = True
+
+    KIND = VEG_KIND
+
+    sync_attributes = ("wan_container_ip", "wan_container_mac", "wan_container_netbits",
+                       "wan_container_gateway_ip", "wan_container_gateway_mac",
+                       "wan_vm_ip", "wan_vm_mac")
+
+    default_attributes = {"instance_id": None,
+                          "container_id": None,
+                          "users": [],
+                          "bbs_account": None,
+                          "last_ansible_hash": None,
+                          "wan_container_ip": None}
+
+    def __init__(self, *args, **kwargs):
+        super(VEGTenant, self).__init__(*args, **kwargs)
+        self.cached_vrouter=None
+
+    @property
+    def vbng(self):
+        # not supported
+        return None
+
+    @vbng.setter
+    def vbng(self, value):
+        raise XOSConfigurationError("vEG.vBNG cannot be set this way -- create a new vBNG object and set it's subscriber_tenant instead")
+
+    @property
+    def vrouter(self):
+        vrouter = self.get_newest_subscribed_tenant(VRouterTenant)
+        if not vrouter:
+            return None
+
+        # always return the same object when possible
+        if (self.cached_vrouter) and (self.cached_vrouter.id == vrouter.id):
+            return self.cached_vrouter
+
+        vrouter.caller = self.creator
+        self.cached_vrouter = vrouter
+        return vrouter
+
+    @vrouter.setter
+    def vrouter(self, value):
+        raise XOSConfigurationError("vEG.vRouter cannot be set this way -- create a new vRuter object and set its subscriber_tenant instead")
+
+    @property
+    def volt(self):
+        from services.volt.models import VOLTTenant
+        if not self.subscriber_tenant:
+            return None
+        volts = VOLTTenant.objects.filter(id=self.subscriber_tenant.id)
+        if not volts:
+            return None
+        return volts[0]
+
+    @property
+    def bbs_account(self):
+        return self.get_attribute("bbs_account", self.default_attributes["bbs_account"])
+
+    @bbs_account.setter
+    def bbs_account(self, value):
+        return self.set_attribute("bbs_account", value)
+
+    @property
+    def last_ansible_hash(self):
+        return self.get_attribute("last_ansible_hash", self.default_attributes["last_ansible_hash"])
+
+    @last_ansible_hash.setter
+    def last_ansible_hash(self, value):
+        return self.set_attribute("last_ansible_hash", value)
+
+    @property
+    def ssh_command(self):
+        if self.instance:
+            return self.instance.get_ssh_command()
+        else:
+            return "no-instance"
+
+    @ssh_command.setter
+    def ssh_command(self, value):
+        pass
+
+    def get_vrouter_field(self, name, default=None):
+        if self.vrouter:
+            return getattr(self.vrouter, name, default)
+        else:
+            return default
+
+    @property
+    def wan_container_ip(self):
+        return self.get_vrouter_field("public_ip", None)
+
+    @property
+    def wan_container_mac(self):
+        return self.get_vrouter_field("public_mac", None)
+
+    @property
+    def wan_container_netbits(self):
+        return self.get_vrouter_field("netbits", None)
+
+    @property
+    def wan_container_gateway_ip(self):
+        return self.get_vrouter_field("gateway_ip", None)
+
+    @property
+    def wan_container_gateway_mac(self):
+        return self.get_vrouter_field("gateway_mac", None)
+
+    @property
+    def wan_vm_ip(self):
+        tags = Tag.select_by_content_object(self.instance).filter(name="vm_vrouter_tenant")
+        if tags:
+            tenant = VRouterTenant.objects.get(id=tags[0].value)
+            return tenant.public_ip
+        else:
+            raise Exception("no vm_vrouter_tenant tag for instance %s" % o.instance)
+
+    @property
+    def wan_vm_mac(self):
+        tags = Tag.select_by_content_object(self.instance).filter(name="vm_vrouter_tenant")
+        if tags:
+            tenant = VRouterTenant.objects.get(id=tags[0].value)
+            return tenant.public_mac
+        else:
+            raise Exception("no vm_vrouter_tenant tag for instance %s" % o.instance)
+
+    @property
+    def is_synced(self):
+        return (self.enacted is not None) and (self.enacted >= self.updated)
+
+    @is_synced.setter
+    def is_synced(self, value):
+        pass
+
+    def get_vrouter_service(self):
+        vrouterServices = VRouterService.get_service_objects().all()
+        if not vrouterServices:
+            raise XOSConfigurationError("No VROUTER Services available")
+        return vrouterServices[0]
+
+    def manage_vrouter(self):
+        # Each vEG object owns exactly one vRouterTenant object
+
+        if self.deleted:
+            return
+
+        if self.vrouter is None:
+            vrouter = self.get_vrouter_service().get_tenant(address_pool_name="addresses_veg", subscriber_tenant = self)
+            vrouter.caller = self.creator
+            vrouter.save()
+
+    def cleanup_vrouter(self):
+        if self.vrouter:
+            # print "XXX cleanup vrouter", self.vrouter
+            self.vrouter.delete()
+
+    def cleanup_orphans(self):
+        # ensure vEG only has one vRouter
+        cur_vrouter = self.vrouter
+        for vrouter in list(self.get_subscribed_tenants(VRouterTenant)):
+            if (not cur_vrouter) or (vrouter.id != cur_vrouter.id):
+                # print "XXX clean up orphaned vrouter", vrouter
+                vrouter.delete()
+
+        if self.orig_instance_id and (self.orig_instance_id != self.get_attribute("instance_id")):
+            instances=Instance.objects.filter(id=self.orig_instance_id)
+            if instances:
+                # print "XXX clean up orphaned instance", instances[0]
+                instances[0].delete()
+
+    def get_slice(self):
+        if not self.provider_service.slices.count():
+            print self, "dio porco"
+            raise XOSConfigurationError("The service has no slices")
+        slice = self.provider_service.slices.all()[0]
+        return slice
+
+    def get_veg_service(self):
+        return VEGService.get_service_objects().get(id=self.provider_service.id)
+
+    def find_instance_for_s_tag(self, s_tag):
+        #s_tags = STagBlock.objects.find(s_s_tag)
+        #if s_tags:
+        #    return s_tags[0].instance
+
+        tags = Tag.objects.filter(name="s_tag", value=s_tag)
+        if tags:
+            return tags[0].content_object
+
+        return None
+
+    def find_or_make_instance_for_s_tag(self, s_tag):
+        instance = self.find_instance_for_s_tag(self.volt.s_tag)
+        if instance:
+            return instance
+
+        flavors = Flavor.objects.filter(name="m1.small")
+        if not flavors:
+            raise XOSConfigurationError("No m1.small flavor")
+
+        slice = self.provider_service.slices.all()[0]
+
+        if slice.default_isolation == "container_vm":
+            (node, parent) = ContainerVmScheduler(slice).pick()
+        else:
+            (node, parent) = LeastLoadedNodeScheduler(slice, label=self.get_veg_service().node_label).pick()
+
+        instance = Instance(slice = slice,
+                        node = node,
+                        image = self.image,
+                        creator = self.creator,
+                        deployment = node.site_deployment.deployment,
+                        flavor = flavors[0],
+                        isolation = slice.default_isolation,
+                        parent = parent)
+
+        self.save_instance(instance)
+
+        return instance
+
+    def manage_container(self):
+        from core.models import Instance, Flavor
+
+        if self.deleted:
+            return
+
+        # For container or container_vm isolation, use what TenantWithCotnainer
+        # provides us
+        slice = self.get_slice()
+        if slice.default_isolation in ["container_vm", "container"]:
+            super(VEGTenant,self).manage_container()
+            return
+
+        if not self.volt:
+            raise XOSConfigurationError("This vEG container has no volt")
+
+        if self.instance:
+            # We're good.
+            return
+
+        instance = self.find_or_make_instance_for_s_tag(self.volt.s_tag)
+        self.instance = instance
+        super(TenantWithContainer, self).save()
+
+    def cleanup_container(self):
+        if self.get_slice().default_isolation in ["container_vm", "container"]:
+            super(VEGTenant,self).cleanup_container()
+
+        # To-do: cleanup unused instances
+        pass
+
+    def manage_bbs_account(self):
+        if self.deleted:
+            return
+
+        if self.volt and self.volt.subscriber and self.volt.subscriber.url_filter_enable:
+            if not self.bbs_account:
+                # make sure we use the proxied VEGService object, not the generic Service object
+                veg_service = VEGService.objects.get(id=self.provider_service.id)
+                self.bbs_account = veg_service.allocate_bbs_account()
+                super(VEGTenant, self).save()
+        else:
+            if self.bbs_account:
+                self.bbs_account = None
+                super(VEGTenant, self).save()
+
+    def find_or_make_port(self, instance, network, **kwargs):
+        port = Port.objects.filter(instance=instance, network=network)
+        if port:
+            port = port[0]
+        else:
+            port = Port(instance=instance, network=network, **kwargs)
+            port.save()
+        return port
+
+    def get_lan_network(self, instance):
+        slice = self.provider_service.slices.all()[0]
+        if CORD_USE_VTN:
+            # there should only be one network private network, and its template should not be the management template
+            lan_networks = [x for x in slice.networks.all() if x.template.visibility=="private" and (not "management" in x.template.name)]
+            if len(lan_networks)>1:
+                raise XOSProgrammingError("The vEG slice should only have one non-management private network")
+        else:
+            lan_networks = [x for x in slice.networks.all() if "lan" in x.name]
+        if not lan_networks:
+            raise XOSProgrammingError("No lan_network")
+        return lan_networks[0]
+
+    def save_instance(self, instance):
+        with transaction.atomic():
+            instance.volumes = "/etc/dnsmasq.d,/etc/ufw"
+            super(VEGTenant, self).save_instance(instance)
+
+            if instance.isolation in ["container", "container_vm"]:
+                lan_network = self.get_lan_network(instance)
+                port = self.find_or_make_port(instance, lan_network, ip="192.168.0.1", port_id="unmanaged")
+                port.set_parameter("c_tag", self.volt.c_tag)
+                port.set_parameter("s_tag", self.volt.s_tag)
+                port.set_parameter("device", "eth1")
+                port.set_parameter("bridge", "br-lan")
+
+                wan_networks = [x for x in instance.slice.networks.all() if "wan" in x.name]
+                if not wan_networks:
+                    raise XOSProgrammingError("No wan_network")
+                port = self.find_or_make_port(instance, wan_networks[0])
+                port.set_parameter("next_hop", value="10.0.1.253")   # FIX ME
+                port.set_parameter("device", "eth0")
+
+            if instance.isolation in ["vm"]:
+                lan_network = self.get_lan_network(instance)
+                port = self.find_or_make_port(instance, lan_network)
+                port.set_parameter("c_tag", self.volt.c_tag)
+                port.set_parameter("s_tag", self.volt.s_tag)
+                port.set_parameter("neutron_port_name", "stag-%s" % self.volt.s_tag)
+                port.save()
+
+            # tag the instance with the s-tag, so we can easily find the
+            # instance later
+            if self.volt and self.volt.s_tag:
+                tags = Tag.objects.filter(name="s_tag", value=self.volt.s_tag)
+                if not tags:
+                    tag = Tag(service=self.provider_service, content_object=instance, name="s_tag", value=self.volt.s_tag)
+                    tag.save()
+
+            # VTN-CORD needs a WAN address for the VM, so that the VM can
+            # be configured.
+            if CORD_USE_VTN:
+                tags = Tag.select_by_content_object(instance).filter(name="vm_vrouter_tenant")
+                if not tags:
+                    vrouter = self.get_vrouter_service().get_tenant(address_pool_name="addresses_veg", subscriber_service = self.provider_service)
+                    vrouter.set_attribute("tenant_for_instance_id", instance.id)
+                    vrouter.save()
+                    tag = Tag(service=self.provider_service, content_object=instance, name="vm_vrouter_tenant", value="%d" % vrouter.id)
+                    tag.save()
+
+    def save(self, *args, **kwargs):
+        if not self.creator:
+            if not getattr(self, "caller", None):
+                # caller must be set when creating a vEG since it creates a slice
+                raise XOSProgrammingError("VEGTenant's self.caller was not set")
+            self.creator = self.caller
+            if not self.creator:
+                raise XOSProgrammingError("VEGTenant's self.creator was not set")
+
+        super(VEGTenant, self).save(*args, **kwargs)
+        model_policy_veg(self.pk)
+
+    def delete(self, *args, **kwargs):
+        self.cleanup_vrouter()
+        self.cleanup_container()
+        super(VEGTenant, self).delete(*args, **kwargs)
+
+def model_policy_veg(pk):
+    # TODO: this should be made in to a real model_policy
+    with transaction.atomic():
+        veg = VEGTenant.objects.select_for_update().filter(pk=pk)
+        if not veg:
+            return
+        veg = veg[0]
+        veg.manage_container()
+        veg.manage_vrouter()
+        veg.manage_bbs_account()
+        veg.cleanup_orphans()
+
+
diff --git a/xos/synchronizer/broadbandshield.py b/xos/synchronizer/broadbandshield.py
new file mode 100644
index 0000000..19ad018
--- /dev/null
+++ b/xos/synchronizer/broadbandshield.py
@@ -0,0 +1,396 @@
+import requests
+import logging
+import json
+import sys
+from rest_framework.exceptions import APIException
+
+""" format of settings
+
+    ["settings"]
+        ["watershed"]
+        ["rating"]
+        ["categories"]
+        ["blocklist"]
+        ["allowlist"]
+
+    ["users"]
+        array
+            ["account_id"] - 58
+            ["reporting"] - False
+            ["name"] - Scott1
+            ["devices"]
+            ["settings"] -
+                ["watershed"]
+                ["rating"]
+                ["categories"]
+                ["blocklist"]
+                ["allowlist"]
+
+    ["devices"]
+        array
+            ["username"] - "Scott1" or "" if whole-house
+            ["uuid"] - empty
+            ["mac_address"] - mac address as hex digits in ascii
+            ["type"] - "laptop"
+            ["name"] - human readable name of device ("Scott's laptop")
+            ["settings"]
+                 ["watershed"]
+                     array
+                         array
+                             ["rating"]
+                             ["category"]
+                 ["rating"] - ["G" | "NONE"]
+                 ["categories"] - list of categories set by rating
+                 ["blocklist"] - []
+                 ["allowlist"] - []
+"""
+
+class BBS_Failure(APIException):
+    status_code=400
+    def __init__(self, why="broadbandshield error", fields={}):
+        APIException.__init__(self, {"error": "BBS_Failure",
+                            "specific_error": why,
+                            "fields": fields})
+
+
+class BBS:
+    level_map = {"PG_13": "PG13",
+                 "NONE": "OFF",
+                 "ALL": "NONE",
+                 None: "NONE"}
+
+    def __init__(self, username, password, bbs_hostname=None, bbs_port=None):
+        self.username = username
+        self.password = password
+
+        # XXX not tested on port 80
+        #self.bbs_hostname = "www.broadbandshield.com"
+        #self.bbs_port = 80
+
+        if not bbs_hostname:
+            bbs_hostname = "cordcompute01.onlab.us"
+        if not bbs_port:
+            bbs_port = 8018
+
+        self.bbs_hostname = bbs_hostname
+        self.bbs_port = int(bbs_port)
+
+        self.api = "http://%s:%d/api" % (self.bbs_hostname, self.bbs_port)
+        self.nic_update = "http://%s:%d/nic/update" % (self.bbs_hostname, self.bbs_port)
+
+        self.session = None
+        self.settings = None
+
+    def login(self):
+        self.session = requests.Session()
+        r = self.session.post(self.api + "/login", data = json.dumps({"email": self.username, "password": self.password}))
+        if (r.status_code != 200):
+            raise BBS_Failure("Failed to login (%d)" % r.status_code)
+
+    def get_account(self):
+        if not self.session:
+            self.login()
+
+        r = self.session.get(self.api + "/account")
+        if (r.status_code != 200):
+            raise BBS_Failure("Failed to get account settings (%d)" % r.status_code)
+        self.settings = r.json()
+
+        return self.settings
+
+    def post_account(self):
+        if not self.settings:
+             raise XOSProgrammingError("no settings to post")
+
+        r = self.session.post(self.api + "/account/settings", data= json.dumps(self.settings))
+        if (r.status_code != 200):
+            raise BBS_Failure("Failed to set account settings (%d)" % r.status_code)
+
+    def add_device(self, name, mac, type="tablet", username=""):
+        data = {"name": name, "mac_address": mac, "type": type, "username": username}
+        r = self.session.post(self.api + "/device", data = json.dumps(data))
+        if (r.status_code != 200):
+            raise BBS_Failure("Failed to add device (%d)" % r.status_code)
+
+    def delete_device(self, data):
+        r = self.session.delete(self.api + "/device", data = json.dumps(data))
+        if (r.status_code != 200):
+            raise BBS_Failure("Failed to delete device (%d)" % r.status_code)
+
+    def add_user(self, name, rating="NONE", categories=[]):
+        data = {"name": name, "settings": {"rating": rating, "categories": categories}}
+        r = self.session.post(self.api + "/users", data = json.dumps(data))
+        if (r.status_code != 200):
+            raise BBS_Failure("Failed to add user (%d)" % r.status_code)
+
+    def delete_user(self, data):
+        r = self.session.delete(self.api + "/users", data = json.dumps(data))
+        if (r.status_code != 200):
+            raise BBS_Failure("Failed to delete user (%d)" % r.status_code)
+
+    def clear_users_and_devices(self):
+        if not self.settings:
+            self.get_account()
+
+        for device in self.settings["devices"]:
+            self.delete_device(device)
+
+        for user in self.settings["users"]:
+            self.delete_user(user)
+
+    def get_whole_home_level(self):
+        if not self.settings:
+            self.get_account()
+
+        return self.settings["settings"]["rating"]
+
+    def sync(self, whole_home_level, users):
+        if not self.settings:
+            self.get_account()
+
+        veg_users = {}
+        for user in users:
+            user = user.copy()
+            user["level"] = self.level_map.get(user["level"], user["level"])
+            user["mac"] = user.get("mac", "")
+            veg_users[user["name"]] = user
+
+        whole_home_level = self.level_map.get(whole_home_level, whole_home_level)
+
+        if (whole_home_level != self.settings["settings"]["rating"]):
+            print "*** set whole_home", whole_home_level, "***"
+            self.settings["settings"]["rating"] = whole_home_level
+            self.post_account()
+
+        bbs_usernames = [bbs_user["name"] for bbs_user in self.settings["users"]]
+        bbs_devicenames = [bbs_device["name"] for bbs_device in self.settings["devices"]]
+
+        add_users = []
+        add_devices = []
+        delete_users = []
+        delete_devices = []
+
+        for bbs_user in self.settings["users"]:
+             bbs_username = bbs_user["name"]
+             if bbs_username in veg_users.keys():
+                 veg_user = veg_users[bbs_username]
+                 if bbs_user["settings"]["rating"] != veg_user["level"]:
+                     print "set user", veg_user["name"], "rating", veg_user["level"]
+                     #bbs_user["settings"]["rating"] = veg_user["level"]
+                     # add can be used as an update
+                     add_users.append(veg_user)
+             else:
+                 delete_users.append(bbs_user)
+
+        for bbs_device in self.settings["devices"]:
+             bbs_devicename = bbs_device["name"]
+             if bbs_devicename in veg_users.keys():
+                 veg_user = veg_users[bbs_devicename]
+                 if bbs_device["mac_address"] != veg_user["mac"]:
+                     print "set device", veg_user["name"], "mac", veg_user["mac"]
+                     #bbs_device["mac_address"] = veg_user["mac"]
+                     # add of a device can't be used as an update, as you'll end
+                     # up with two of them.
+                     delete_devices.append(bbs_device)
+                     add_devices.append(veg_user)
+             else:
+                 delete_devices.append(bbs_device)
+
+        for (username, user) in veg_users.iteritems():
+            if not username in bbs_usernames:
+                add_users.append(user)
+            if not username in bbs_devicenames:
+                add_devices.append(user)
+
+        for bbs_user in delete_users:
+            print "delete user", bbs_user["name"]
+            self.delete_user(bbs_user)
+
+        for bbs_device in delete_devices:
+            print "delete device", bbs_device["name"]
+            self.delete_device(bbs_device)
+
+        for veg_user in add_users:
+            print "add user", veg_user["name"], "level", veg_user["level"]
+            self.add_user(veg_user["name"], veg_user["level"])
+
+        for veg_user in add_devices:
+            print "add device", veg_user["name"], "mac", veg_user["mac"]
+            self.add_device(veg_user["name"], veg_user["mac"], "tablet", veg_user["name"])
+
+    def get_whole_home_rating(self):
+        return self.settings["settings"]["rating"]
+
+    def get_user(self, name):
+        for user in self.settings["users"]:
+            if user["name"]==name:
+                return user
+        return None
+
+    def get_device(self, name):
+        for device in self.settings["devices"]:
+             if device["name"]==name:
+                 return device
+        return None
+
+    def dump(self):
+        if not self.settings:
+            self.get_account()
+
+        print "whole_home_rating:", self.settings["settings"]["rating"]
+        print "users:"
+        for user in self.settings["users"]:
+            print "  user", user["name"], "rating", user["settings"]["rating"]
+
+        print "devices:"
+        for device in self.settings["devices"]:
+            print "  device", device["name"], "user", device["username"], "rating", device["settings"]["rating"], "mac", device["mac_address"]
+
+    def associate(self, ip):
+        bbs_hostname = "cordcompute01.onlab.us"
+        r = requests.get(self.nic_update, params={"hostname": "onlab.us"}, headers={"X-Forwarded-For": ip}, auth=requests.auth.HTTPBasicAuth(self.username,self.password))
+        if (r.status_code != 200):
+            raise BBS_Failure("Failed to associate account with ip (%d)" % r.status_code)
+
+def dump():
+    bbs = BBS(sys.argv[2], sys.argv[3])
+    bbs.dump()
+
+def associate():
+    if len(sys.argv)<5:
+        print "you need to specify IP address"
+        sys.exit(-1)
+
+    bbs = BBS(sys.argv[2], sys.argv[3])
+    bbs.associate(sys.argv[4])
+
+def self_test():
+    bbs = BBS(sys.argv[2], sys.argv[3])
+
+    print "*** initial ***"
+    bbs.dump()
+
+    open("bbs.json","w").write(json.dumps(bbs.settings))
+
+    # a new BBS account will throw a 500 error if it has no rating
+    bbs.settings["settings"]["rating"] = "R"
+    #bbs.settings["settings"]["category"] = [u'PORNOGRAPHY', u'ADULT', u'ILLEGAL', u'WEAPONS', u'DRUGS', u'GAMBLING', u'CYBERBULLY', u'ANONYMIZERS', u'SUICIDE', u'MALWARE']
+    #bbs.settings["settings"]["blocklist"] = []
+    #bbs.settings["settings"]["allowlist"] = []
+    #for water in bbs.settings["settings"]["watershed"];
+    #    water["categories"]=[]
+    # delete everything
+    bbs.post_account()
+    bbs.clear_users_and_devices()
+
+    print "*** cleared ***"
+    bbs.settings=None
+    bbs.dump()
+
+    users = [{"name": "Moms pc", "level": "R", "mac": "010203040506"},
+             {"name": "Dads pc", "level": "R", "mac": "010203040507"},
+             {"name": "Jacks ipad", "level": "PG", "mac": "010203040508"},
+             {"name": "Jills iphone", "level": "G", "mac": "010203040509"}]
+
+    print "*** syncing mom-R, Dad-R, jack-PG, Jill-G, wholehome-PG-13 ***"
+
+    bbs.settings = None
+    bbs.sync("PG-13", users)
+
+    print "*** after sync ***"
+    bbs.settings=None
+    bbs.dump()
+    assert(bbs.get_whole_home_rating() == "PG-13")
+    assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
+    assert(bbs.get_user("Dads pc")["settings"]["rating"] == "R")
+    assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
+    assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "G")
+    assert(bbs.get_device("Moms pc")["mac_address"] == "010203040506")
+    assert(bbs.get_device("Dads pc")["mac_address"] == "010203040507")
+    assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
+    assert(bbs.get_device("Jills iphone")["mac_address"] == "010203040509")
+
+    print "*** update whole home level ***"
+    bbs.settings=None
+    bbs.get_account()
+    bbs.settings["settings"]["rating"] = "PG"
+    bbs.post_account()
+
+    print "*** after sync ***"
+    bbs.settings=None
+    bbs.dump()
+    assert(bbs.get_whole_home_rating() == "PG")
+    assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
+    assert(bbs.get_user("Dads pc")["settings"]["rating"] == "R")
+    assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
+    assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "G")
+    assert(bbs.get_device("Moms pc")["mac_address"] == "010203040506")
+    assert(bbs.get_device("Dads pc")["mac_address"] == "010203040507")
+    assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
+    assert(bbs.get_device("Jills iphone")["mac_address"] == "010203040509")
+
+    print "*** delete dad, change moms IP, change jills level to PG, change whole home to PG-13 ***"
+    users = [{"name": "Moms pc", "level": "R", "mac": "010203040511"},
+             {"name": "Jacks ipad", "level": "PG", "mac": "010203040508"},
+             {"name": "Jills iphone", "level": "PG", "mac": "010203040509"}]
+
+    bbs.settings = None
+    bbs.sync("PG-13", users)
+
+    print "*** after sync ***"
+    bbs.settings=None
+    bbs.dump()
+    assert(bbs.get_whole_home_rating() == "PG-13")
+    assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
+    assert(bbs.get_user("Dads pc") == None)
+    assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
+    assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "PG")
+    assert(bbs.get_device("Moms pc")["mac_address"] == "010203040511")
+    assert(bbs.get_device("Dads pc") == None)
+    assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
+
+    print "add dad's laptop"
+    users = [{"name": "Moms pc", "level": "R", "mac": "010203040511"},
+             {"name": "Dads laptop", "level": "PG-13", "mac": "010203040512"},
+             {"name": "Jacks ipad", "level": "PG", "mac": "010203040508"},
+             {"name": "Jills iphone", "level": "PG", "mac": "010203040509"}]
+
+    bbs.settings = None
+    bbs.sync("PG-13", users)
+
+    print "*** after sync ***"
+    bbs.settings=None
+    bbs.dump()
+    assert(bbs.get_whole_home_rating() == "PG-13")
+    assert(bbs.get_user("Moms pc")["settings"]["rating"] == "R")
+    assert(bbs.get_user("Dads pc") == None)
+    assert(bbs.get_user("Dads laptop")["settings"]["rating"] == "PG-13")
+    assert(bbs.get_user("Jacks ipad")["settings"]["rating"] == "PG")
+    assert(bbs.get_user("Jills iphone")["settings"]["rating"] == "PG")
+    assert(bbs.get_device("Moms pc")["mac_address"] == "010203040511")
+    assert(bbs.get_device("Dads pc") == None)
+    assert(bbs.get_device("Dads laptop")["mac_address"] == "010203040512")
+    assert(bbs.get_device("Jacks ipad")["mac_address"] == "010203040508")
+
+    #bbs.add_user("tom", "G", [u'PORNOGRAPHY', u'ADULT', u'ILLEGAL', u'WEAPONS', u'DRUGS', u'GAMBLING', u'SOCIAL', u'CYBERBULLY', u'GAMES', u'ANONYMIZERS', u'SUICIDE', u'MALWARE'])
+    #bbs.add_device(name="tom's iphone", mac="010203040506", type="tablet", username="tom")
+
+def main():
+    if len(sys.argv)<4:
+        print "syntax: broadbandshield.py <operation> <email> <password>"
+        print "        operation = [dump | selftest | assocate"
+        sys.exit(-1)
+
+    operation = sys.argv[1]
+
+    if operation=="dump":
+        dump()
+    elif operation=="selftest":
+        self_test()
+    elif operation=="associate":
+        associate()
+
+if __name__ == "__main__":
+    main()
+
+
diff --git a/xos/synchronizer/files/docker.list b/xos/synchronizer/files/docker.list
new file mode 100644
index 0000000..0ee9ae0
--- /dev/null
+++ b/xos/synchronizer/files/docker.list
@@ -0,0 +1 @@
+deb https://get.docker.com/ubuntu docker main
diff --git a/xos/synchronizer/files/etc/rc.local b/xos/synchronizer/files/etc/rc.local
new file mode 100755
index 0000000..2c7588f
--- /dev/null
+++ b/xos/synchronizer/files/etc/rc.local
@@ -0,0 +1,23 @@
+#!/bin/sh -e
+#
+# rc.local
+#
+# This script is executed at the end of each multiuser runlevel.
+# Make sure that the script will "exit 0" on success or any other
+# value on error.
+#
+# In order to enable or disable this script just change the execution
+# bits.
+#
+# By default this script does nothing.
+
+ufw enable
+ufw allow bootps
+ufw allow from 192.168.0.0/24
+ufw route allow in on eth1 out on eth0
+ufw route allow in on eth1 out on eth2
+
+BWLIMIT=/usr/local/sbin/bwlimit.sh
+[ -e $BWLIMIT ] && $BWLIMIT start || true
+
+exit 0
diff --git a/xos/synchronizer/files/etc/service/message/run b/xos/synchronizer/files/etc/service/message/run
new file mode 100755
index 0000000..7b587d8
--- /dev/null
+++ b/xos/synchronizer/files/etc/service/message/run
@@ -0,0 +1,19 @@
+#!/usr/bin/python
+
+import BaseHTTPServer
+
+
+class HTTPHandlerOne(BaseHTTPServer.BaseHTTPRequestHandler):
+    def do_GET(self):
+        with open('./message.html', 'r') as msgfile:
+            message = msgfile.read()
+        self.wfile.write(message)
+
+
+def run(server_class=BaseHTTPServer.HTTPServer,
+        handler_class=BaseHTTPServer.BaseHTTPRequestHandler):
+    server_address = ('192.168.0.1', 8000)
+    httpd = server_class(server_address, handler_class)
+    httpd.serve_forever()
+
+run(handler_class=HTTPHandlerOne)
diff --git a/xos/synchronizer/files/etc/ufw/after.init b/xos/synchronizer/files/etc/ufw/after.init
new file mode 100644
index 0000000..e89217d
--- /dev/null
+++ b/xos/synchronizer/files/etc/ufw/after.init
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# after.init: if executable, called by ufw-init. See 'man ufw-framework' for
+#             details. Note that output from these scripts is not seen via the
+#             the ufw command, but instead via ufw-init.
+#
+# Copyright 2013 Canonical Ltd.
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3,
+#    as published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+set -e
+
+case "$1" in
+start)
+    # typically required
+    ;;
+stop)
+    # typically required
+    ;;
+status)
+    # optional
+    ;;
+flush-all)
+    # optional
+    ;;
+*)
+    echo "'$1' not supported"
+    echo "Usage: after.init {start|stop|flush-all|status}"
+    ;;
+esac
diff --git a/xos/synchronizer/files/etc/ufw/after.rules b/xos/synchronizer/files/etc/ufw/after.rules
new file mode 100644
index 0000000..0d6c646
--- /dev/null
+++ b/xos/synchronizer/files/etc/ufw/after.rules
@@ -0,0 +1,30 @@
+#
+# rules.input-after
+#
+# Rules that should be run after the ufw command line added rules. Custom
+# rules should be added to one of these chains:
+#   ufw-after-input
+#   ufw-after-output
+#   ufw-after-forward
+#
+
+# Don't delete these required lines, otherwise there will be errors
+*filter
+:ufw-after-input - [0:0]
+:ufw-after-output - [0:0]
+:ufw-after-forward - [0:0]
+# End required lines
+
+# don't log noisy services by default
+-A ufw-after-input -p udp --dport 137 -j ufw-skip-to-policy-input
+-A ufw-after-input -p udp --dport 138 -j ufw-skip-to-policy-input
+-A ufw-after-input -p tcp --dport 139 -j ufw-skip-to-policy-input
+-A ufw-after-input -p tcp --dport 445 -j ufw-skip-to-policy-input
+-A ufw-after-input -p udp --dport 67 -j ufw-skip-to-policy-input
+-A ufw-after-input -p udp --dport 68 -j ufw-skip-to-policy-input
+
+# don't log noisy broadcast
+-A ufw-after-input -m addrtype --dst-type BROADCAST -j ufw-skip-to-policy-input
+
+# don't delete the 'COMMIT' line or these rules won't be processed
+COMMIT
diff --git a/xos/synchronizer/files/etc/ufw/after6.rules b/xos/synchronizer/files/etc/ufw/after6.rules
new file mode 100644
index 0000000..0d99672
--- /dev/null
+++ b/xos/synchronizer/files/etc/ufw/after6.rules
@@ -0,0 +1,27 @@
+#
+# rules.input-after
+#
+# Rules that should be run after the ufw command line added rules. Custom
+# rules should be added to one of these chains:
+#   ufw6-after-input
+#   ufw6-after-output
+#   ufw6-after-forward
+#
+
+# Don't delete these required lines, otherwise there will be errors
+*filter
+:ufw6-after-input - [0:0]
+:ufw6-after-output - [0:0]
+:ufw6-after-forward - [0:0]
+# End required lines
+
+# don't log noisy services by default
+-A ufw6-after-input -p udp --dport 137 -j ufw6-skip-to-policy-input
+-A ufw6-after-input -p udp --dport 138 -j ufw6-skip-to-policy-input
+-A ufw6-after-input -p tcp --dport 139 -j ufw6-skip-to-policy-input
+-A ufw6-after-input -p tcp --dport 445 -j ufw6-skip-to-policy-input
+-A ufw6-after-input -p udp --dport 546 -j ufw6-skip-to-policy-input
+-A ufw6-after-input -p udp --dport 547 -j ufw6-skip-to-policy-input
+
+# don't delete the 'COMMIT' line or these rules won't be processed
+COMMIT
diff --git a/xos/synchronizer/files/etc/ufw/applications.d/openssh-server b/xos/synchronizer/files/etc/ufw/applications.d/openssh-server
new file mode 100644
index 0000000..9bbe906
--- /dev/null
+++ b/xos/synchronizer/files/etc/ufw/applications.d/openssh-server
@@ -0,0 +1,4 @@
+[OpenSSH]
+title=Secure shell server, an rshd replacement
+description=OpenSSH is a free implementation of the Secure Shell protocol.
+ports=22/tcp
diff --git a/xos/synchronizer/files/etc/ufw/before.init b/xos/synchronizer/files/etc/ufw/before.init
new file mode 100644
index 0000000..1348cb1
--- /dev/null
+++ b/xos/synchronizer/files/etc/ufw/before.init
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# before.init: if executable, called by ufw-init. See 'man ufw-framework' for
+#              details. Note that output from these scripts is not seen via the
+#              the ufw command, but instead via ufw-init.
+#
+# Copyright 2013 Canonical Ltd.
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3,
+#    as published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#
+set -e
+
+case "$1" in
+start)
+    # typically required
+    ;;
+stop)
+    # typically required
+    ;;
+status)
+    # optional
+    ;;
+flush-all)
+    # optional
+    ;;
+*)
+    echo "'$1' not supported"
+    echo "Usage: before.init {start|stop|flush-all|status}"
+    ;;
+esac
diff --git a/xos/synchronizer/files/etc/ufw/before6.rules b/xos/synchronizer/files/etc/ufw/before6.rules
new file mode 100644
index 0000000..0b26ed8
--- /dev/null
+++ b/xos/synchronizer/files/etc/ufw/before6.rules
@@ -0,0 +1,73 @@
+#
+# rules.before
+#
+# Rules that should be run before the ufw command line added rules. Custom
+# rules should be added to one of these chains:
+#   ufw6-before-input
+#   ufw6-before-output
+#   ufw6-before-forward
+#
+
+# Don't delete these required lines, otherwise there will be errors
+*filter
+:ufw6-before-input - [0:0]
+:ufw6-before-output - [0:0]
+:ufw6-before-forward - [0:0]
+# End required lines
+
+
+# allow all on loopback
+-A ufw6-before-input -i lo -j ACCEPT
+-A ufw6-before-output -o lo -j ACCEPT
+
+# drop packets with RH0 headers
+-A ufw6-before-input -m rt --rt-type 0 -j DROP
+-A ufw6-before-forward -m rt --rt-type 0 -j DROP
+-A ufw6-before-output -m rt --rt-type 0 -j DROP
+
+# for stateless autoconfiguration (restrict NDP messages to hop limit of 255)
+-A ufw6-before-input -p icmpv6 --icmpv6-type neighbor-solicitation -m hl --hl-eq 255 -j ACCEPT
+-A ufw6-before-output -p icmpv6 --icmpv6-type neighbor-solicitation -m hl --hl-eq 255 -j ACCEPT
+-A ufw6-before-input -p icmpv6 --icmpv6-type neighbor-advertisement -m hl --hl-eq 255 -j ACCEPT
+-A ufw6-before-output -p icmpv6 --icmpv6-type neighbor-advertisement -m hl --hl-eq 255 -j ACCEPT
+-A ufw6-before-input -p icmpv6 --icmpv6-type router-solicitation -m hl --hl-eq 255 -j ACCEPT
+-A ufw6-before-input -p icmpv6 --icmpv6-type router-advertisement -m hl --hl-eq 255 -j ACCEPT
+
+# quickly process packets for which we already have a connection
+-A ufw6-before-input -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
+-A ufw6-before-output -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
+-A ufw6-before-forward -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
+
+# for multicast ping replies from link-local addresses (these don't have an
+# associated connection and would otherwise be marked INVALID)
+-A ufw6-before-input -p icmpv6 --icmpv6-type echo-reply -s fe80::/10 -j ACCEPT
+
+# drop INVALID packets (logs these in loglevel medium and higher)
+-A ufw6-before-input -m conntrack --ctstate INVALID -j ufw6-logging-deny
+-A ufw6-before-input -m conntrack --ctstate INVALID -j DROP
+
+# ok icmp codes for INPUT
+-A ufw6-before-input -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT
+-A ufw6-before-input -p icmpv6 --icmpv6-type packet-too-big -j ACCEPT
+-A ufw6-before-input -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT
+-A ufw6-before-input -p icmpv6 --icmpv6-type parameter-problem -j ACCEPT
+-A ufw6-before-input -p icmpv6 --icmpv6-type echo-request -j ACCEPT
+
+# ok icmp code for FORWARD
+-A ufw6-before-forward -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT
+-A ufw6-before-forward -p icmpv6 --icmpv6-type packet-too-big -j ACCEPT
+-A ufw6-before-forward -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT
+-A ufw6-before-forward -p icmpv6 --icmpv6-type parameter-problem -j ACCEPT
+-A ufw6-before-forward -p icmpv6 --icmpv6-type echo-request -j ACCEPT
+
+# allow dhcp client to work
+-A ufw6-before-input -p udp -s fe80::/10 --sport 547 -d fe80::/10 --dport 546 -j ACCEPT
+
+# allow MULTICAST mDNS for service discovery
+-A ufw6-before-input -p udp -d ff02::fb --dport 5353 -j ACCEPT
+
+# allow MULTICAST UPnP for service discovery
+-A ufw6-before-input -p udp -d ff02::f --dport 1900 -j ACCEPT
+
+# don't delete the 'COMMIT' line or these rules won't be processed
+COMMIT
diff --git a/xos/synchronizer/files/etc/ufw/sysctl.conf b/xos/synchronizer/files/etc/ufw/sysctl.conf
new file mode 100644
index 0000000..8707032
--- /dev/null
+++ b/xos/synchronizer/files/etc/ufw/sysctl.conf
@@ -0,0 +1,57 @@
+#
+# Configuration file for setting network variables. Please note these settings
+# override /etc/sysctl.conf. If you prefer to use /etc/sysctl.conf, please
+# adjust IPT_SYSCTL in /etc/default/ufw.
+#
+
+# Uncomment this to allow this host to route packets between interfaces
+#net/ipv4/ip_forward=1
+#net/ipv6/conf/default/forwarding=1
+#net/ipv6/conf/all/forwarding=1
+
+# Turn on Source Address Verification in all interfaces to prevent some
+# spoofing attacks
+net/ipv4/conf/default/rp_filter=1
+net/ipv4/conf/all/rp_filter=1
+
+# Do not accept IP source route packets (we are not a router)
+net/ipv4/conf/default/accept_source_route=0
+net/ipv4/conf/all/accept_source_route=0
+net/ipv6/conf/default/accept_source_route=0
+net/ipv6/conf/all/accept_source_route=0
+
+# Disable ICMP redirects. ICMP redirects are rarely used but can be used in
+# MITM (man-in-the-middle) attacks. Disabling ICMP may disrupt legitimate
+# traffic to those sites.
+net/ipv4/conf/default/accept_redirects=0
+net/ipv4/conf/all/accept_redirects=0
+net/ipv6/conf/default/accept_redirects=0
+net/ipv6/conf/all/accept_redirects=0
+
+# Ignore bogus ICMP errors
+net/ipv4/icmp_echo_ignore_broadcasts=1
+net/ipv4/icmp_ignore_bogus_error_responses=1
+net/ipv4/icmp_echo_ignore_all=0
+
+# Don't log Martian Packets (impossible packets)
+net/ipv4/conf/default/log_martians=0
+net/ipv4/conf/all/log_martians=0
+
+# Change to '1' to enable TCP/IP SYN cookies This disables TCP Window Scaling
+# (http://lkml.org/lkml/2008/2/5/167)
+net/ipv4/tcp_syncookies=0
+
+#net/ipv4/tcp_fin_timeout=30
+#net/ipv4/tcp_keepalive_intvl=1800
+
+# normally allowing tcp_sack is ok, but if going through OpenBSD 3.8 RELEASE or
+# earlier pf firewall, should set this to 0
+net/ipv4/tcp_sack=1
+
+# Uncomment this to turn off ipv6 autoconfiguration
+#net/ipv6/conf/default/autoconf=0
+#net/ipv6/conf/all/autoconf=0
+
+# Uncomment this to enable ipv6 privacy addressing
+#net/ipv6/conf/default/use_tempaddr=2
+#net/ipv6/conf/all/use_tempaddr=2
diff --git a/xos/synchronizer/files/etc/ufw/ufw.conf b/xos/synchronizer/files/etc/ufw/ufw.conf
new file mode 100644
index 0000000..28fe534
--- /dev/null
+++ b/xos/synchronizer/files/etc/ufw/ufw.conf
@@ -0,0 +1,10 @@
+# /etc/ufw/ufw.conf
+#
+
+# Set to yes to start on boot. If setting this remotely, be sure to add a rule
+# to allow your remote connection before starting ufw. Eg: 'ufw allow 22/tcp'
+ENABLED=yes
+
+# Please use the 'ufw' command to set the loglevel. Eg: 'ufw logging medium'.
+# See 'man ufw' for details.
+LOGLEVEL=low
diff --git a/xos/synchronizer/files/veg.conf b/xos/synchronizer/files/veg.conf
new file mode 100644
index 0000000..7372d19
--- /dev/null
+++ b/xos/synchronizer/files/veg.conf
@@ -0,0 +1,10 @@
+# Upstart script for vEG
+description "vEG container"
+author "andy@onlab.us"
+start on filesystem and started docker
+stop on runlevel [!2345]
+respawn
+
+script
+  /usr/local/sbin/start-veg.sh
+end script
diff --git a/xos/synchronizer/files/veg.dnsmasq b/xos/synchronizer/files/veg.dnsmasq
new file mode 100644
index 0000000..2b2687b
--- /dev/null
+++ b/xos/synchronizer/files/veg.dnsmasq
@@ -0,0 +1,2 @@
+listen-address=192.168.0.1
+dhcp-range=192.168.0.2,192.168.0.254,6
diff --git a/xos/synchronizer/files/vm-resolv.conf b/xos/synchronizer/files/vm-resolv.conf
new file mode 100644
index 0000000..cae093a
--- /dev/null
+++ b/xos/synchronizer/files/vm-resolv.conf
@@ -0,0 +1 @@
+nameserver 8.8.8.8
diff --git a/xos/synchronizer/manifest b/xos/synchronizer/manifest
new file mode 100644
index 0000000..959c763
--- /dev/null
+++ b/xos/synchronizer/manifest
@@ -0,0 +1,51 @@
+templates/bwlimit.sh.j2
+templates/vlan_sample.j2
+templates/before.rules.j2
+templates/start-veg.sh.j2
+templates/dnsmasq_safe_servers.j2
+templates/docker.j2
+templates/firewall_sample.j2
+templates/rc.local.j2
+templates/veg.conf.j2
+templates/message.html.j2
+templates/dnsmasq_servers.j2
+templates/start-veg-vtn.sh.j2
+manifest
+broadbandshield.py
+observer_ansible_test.py
+veg_synchronizer_config
+start-bbs.sh
+steps/sync_vegtenant.py
+steps/sync_vegtenant_new.yaml
+steps/sync_vegtenant_vtn.yaml
+steps/sync_vegtenant.yaml
+steps/sync_monitoring_agent.yaml
+steps/test.yaml
+steps/ansible_test/README
+steps/ansible_test/test.yaml
+steps/ansible_test/xos.py
+steps/ansible_test/test.sh
+steps/ansible_test/inventory.txt
+start.sh
+files/veg.conf
+files/etc/service/message/run
+files/etc/rc.local
+files/etc/ufw/after6.rules
+files/etc/ufw/applications.d/openssh-server
+files/etc/ufw/sysctl.conf
+files/etc/ufw/ufw.conf
+files/etc/ufw/before6.rules
+files/etc/ufw/after.init
+files/etc/ufw/before.init
+files/etc/ufw/after.rules
+files/vm-resolv.conf
+files/docker.list
+files/veg.dnsmasq
+run-vtn.sh
+stop.sh
+veg-synchronizer.py
+model-deps
+supervisor/veg-observer.conf
+run.sh
+vtn_veg_synchronizer_config
+veg_stats_notifier.py
diff --git a/xos/synchronizer/model-deps b/xos/synchronizer/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/synchronizer/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/synchronizer/observer_ansible_test.py b/xos/synchronizer/observer_ansible_test.py
new file mode 100644
index 0000000..00fb067
--- /dev/null
+++ b/xos/synchronizer/observer_ansible_test.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+import os
+import sys
+
+sys.path.append("../..")
+import synchronizers.base.ansible_helper
+
+print sys.argv
+
+private_key="""-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAtJiuarud5S4Y2quDeWyaS0UCQGQtfuSzzNhplFwujYnJGL65
+e14REtv+UuHGymyr/SfkTrBd8vH5NI2UZ/4sZW13ieI/1d97OeVe2+ct0Y4BaFEI
+3Hja6DIpsY3Q2cBQsWUwcQzbMIF9jIq8SzwR1zk8UtZi09fNxqjCchRPlBvbiLKX
+g0/yirN237WbaKzK++8EPy3nuv83216MXHFFSjuxfICe/RhjaqMzVp7isSbv1suU
+geyvNNzU71c/K13gTggdcIXeRQBiJYio2Sn3h2nsV6AFqFH4fjERxWG55Q4e3jeE
+tWM/Dw+hqYKg/25UcmM862a6sUmuDCmM5S3VEQIDAQABAoIBACH88iZoNOki6c6N
+pOq/Q7KSxD+2GtHc3PHacNRZHPHKUqxziJjtNS1lddHHaBBEr4GvdkpQ6v2ECLCZ
+TKrdrSFRnsO2bukjbB+TSWz9byQXI7CsP4yuuhQlDK+7zuiMRyN7tcgw8TeJx0Uh
+/xnxrjHhNbcpXeQcoz+WFzI9HFT1MEGmMS4Lyp/zLB/pmfY9h7V9d+EeRZDi78jq
+Vir6MI6iCTa0T02dvHUFOg+wXLb0nb8V1xKDL+6cAJla7LzwoG8lTnvp5DSYCojI
+5JrILYafeO8RbBV2GWmaE5mkHgeBkFZ+qZQ7K0MjR30Yh6tajB7P3+F/Max8FUgW
+xLHr8AECgYEA2+o0ge3HtZcepEFBKKYnLTwoEpPCfLElWZHzUJYDz259s4JLsfak
+tROANFEdsJUjpmWG52MCL+bgKFFOedDkt4p1jgcIneaHk0jvoU11wG7W3jZZVy1q
+WjQNH5vDU+hg5tm/CREwm7lbUxR9Xuj9K63CNAAGp8KO7h2tOH8woIECgYEA0jrb
+LUg30RxO3+vrq9dUYohrDRisk5zKXuRLfxRA+E+ruvZ7CctG2OpM+658/qZM/w95
+7pOj6zz3//w7tAvH9erY+JOISnzaYKx04sYC1MfbFiFkq5j0gpuYm/MULDYNvFqr
+NU2Buj4dW+ZB+SeficsQOqm5QeNxh1kgiDCs7JECgYEAjSLGCAzeesA9vhTTCI95
+3SIaZbHGw9e8rLtqeHGOiHXU3nvksJYmJsAZK3pTn5xXgNbvuVhlcvCtM7LatntG
+DjUiNMB22z+0CuZoRBE+XP3FkF84/yX6d2Goenyw4wzkA8QDQoJxu789yRgBTgQh
+VwLw/AZ4PvoyWMdbAENApgECgYEAvFikosYP09XTyIPaKaOKY5iqqBoSC1GucSOB
+jAG+T3k5dxB6nQS0nYQUomvqak7drqnT6O33Lrr5ySrW5nCjnmvgJZwvv+Rp1bDM
+K5uRT8caPpJ+Wcp4TUdPi3BVA2MOHVDyEJg3AH/D1+DL/IgGQ/JcwOHsKt61iLhO
+EBXj5zECgYEAk+HuwksUPkSxg/AiJGbapGDK6XGymEUzo2duWlnofRqGcZ3NT3bB
+/kDI1KxQdlpODXSi4/BuTpbQiFOrzcEq5e5ytoMxlCHh3Fl3Jxl+JlgO21vAUvP6
+4SET7Q/6LxmfBlCVRg0dXDwcfJLgbnWxyvprIcz4e0FSFVZTBs/6tFk=
+-----END RSA PRIVATE KEY-----
+"""
+
+observer.ansible.run_template_ssh("test.yaml",
+                                  {"instance_name": "onlab_test405-378",
+                                   "instance_id": "instance-0000004d",
+                                   "hostname": "node67.washington.vicci.org",
+                                   "private_key": private_key})
+
diff --git a/xos/synchronizer/run-vtn.sh b/xos/synchronizer/run-vtn.sh
new file mode 100755
index 0000000..dab40ba
--- /dev/null
+++ b/xos/synchronizer/run-vtn.sh
@@ -0,0 +1,8 @@
+#if [[ ! -e ./veg-observer.py ]]; then
+#    ln -s ../../xos-observer.py veg-observer.py
+#fi
+
+export XOS_DIR=/opt/xos
+cp /root/setup/node_key $XOS_DIR/synchronizers/veg/node_key
+chmod 0600 $XOS_DIR/synchronizers/veg/node_key
+python veg-synchronizer.py  -C $XOS_DIR/synchronizers/veg/vtn_veg_synchronizer_config
diff --git a/xos/synchronizer/run.sh b/xos/synchronizer/run.sh
new file mode 100755
index 0000000..043b1b4
--- /dev/null
+++ b/xos/synchronizer/run.sh
@@ -0,0 +1,6 @@
+#if [[ ! -e ./veg-observer.py ]]; then
+#    ln -s ../../xos-observer.py veg-observer.py
+#fi
+
+export XOS_DIR=/opt/xos
+python veg-synchronizer.py  -C $XOS_DIR/synchronizers/veg/veg_synchronizer_config
diff --git a/xos/synchronizer/start-bbs.sh b/xos/synchronizer/start-bbs.sh
new file mode 100755
index 0000000..c8ee147
--- /dev/null
+++ b/xos/synchronizer/start-bbs.sh
@@ -0,0 +1,14 @@
+#! /bin/bash
+
+# put this in /opt/xerocole/start-bbs.sh
+# make sure it's executable
+# set it up in crontab
+#   @reboot /opt/xerocole/start-bbs.sh
+
+ulimit -n 200000
+cd /opt/xerocole/answerx
+/opt/xerocole/answerx/startStop checkconfig answerx
+/opt/xerocole/answerx/startStop start answerx
+cd /opt/xerocole/namecontrols
+nohup /opt/xerocole/namecontrols/broadbandshield &
+nohup socat TCP-LISTEN:80,bind=0.0.0.0,fork TCP4:127.0.0.1:8018 &  
diff --git a/xos/synchronizer/start.sh b/xos/synchronizer/start.sh
new file mode 100755
index 0000000..2f0b881
--- /dev/null
+++ b/xos/synchronizer/start.sh
@@ -0,0 +1,6 @@
+#if [[ ! -e ./veg-observer.py ]]; then
+#    ln -s ../../xos-observer.py veg-observer.py
+#fi
+
+export XOS_DIR=/opt/xos
+nohup python veg-synchronizer.py  -C $XOS_DIR/synchronizers/veg/veg_synchronizer_config > /dev/null 2>&1 &
diff --git a/xos/synchronizer/steps/ansible_test/README b/xos/synchronizer/steps/ansible_test/README
new file mode 100644
index 0000000..d3b2c54
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/README
@@ -0,0 +1,4 @@
+Some scripts used while testing the Ansible instance configuraiton observer
+
+xos.py was probably the prototype of an XOS SSH module for Ansible, that understood how to SSH into the instances
+without needing to play config file and environment tricks. 
diff --git a/xos/synchronizer/steps/ansible_test/inventory.txt b/xos/synchronizer/steps/ansible_test/inventory.txt
new file mode 100644
index 0000000..bd5b542
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/inventory.txt
@@ -0,0 +1,16 @@
+[onlab_hpc-355]
+node67.washington.vicci.org instance_id=instance-00000045 instance_name=onlab_hpc-355
+
+[onlab_test405-372]
+node67.washington.vicci.org instance_id=instance-0000004c instance_name=onlab_test405-372
+
+[onlab_test405-376]
+node1.cs.arizona.edu
+
+[onlab_test405-378]
+node67.washington.vicci.org ansible_ssh_private_key_file=/home/smbaker/.ssh/id_rsa
+#/home/smbaker/projects/vicci/keys/test_service_key_rsa
+
+[mysite_test2-48]
+cordcompute02.onlab.us ansible_ssh_private_key_file=/home/smbaker/projects/vicci/keys/demo_admin.rsa
+
diff --git a/xos/synchronizer/steps/ansible_test/test.sh b/xos/synchronizer/steps/ansible_test/test.sh
new file mode 100755
index 0000000..157ba9c
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/test.sh
@@ -0,0 +1,2 @@
+#! /bin/bash
+ansible-playbook --private-key /home/smbaker/.ssh/id_rsa -i ./inventory.txt test.yaml
diff --git a/xos/synchronizer/steps/ansible_test/test.yaml b/xos/synchronizer/steps/ansible_test/test.yaml
new file mode 100644
index 0000000..6a29d56
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/test.yaml
@@ -0,0 +1,12 @@
+---
+- hosts: onlab_test405-372
+  connection: xos
+  user: ubuntu
+  vars:
+     foo: 25
+#  instance_name: instance-00000045
+#  slice_name: onlab_hpc-355
+
+  tasks:
+    - name: foobar
+      shell: echo foo > /tmp/foobar
diff --git a/xos/synchronizer/steps/ansible_test/xos.py b/xos/synchronizer/steps/ansible_test/xos.py
new file mode 100755
index 0000000..eb4f3eb
--- /dev/null
+++ b/xos/synchronizer/steps/ansible_test/xos.py
@@ -0,0 +1,444 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import os
+import re
+import subprocess
+import shlex
+import pipes
+import random
+import select
+import fcntl
+import hmac
+import pwd
+import gettext
+import pty
+from hashlib import sha1
+import ansible.constants as C
+from ansible.callbacks import vvv
+from ansible import errors
+from ansible import utils
+
+class Connection(object):
+    ''' ssh based connections '''
+
+    def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
+        self.runner = runner
+        self.host = host
+        self.ipv6 = ':' in self.host
+        self.port = port
+        self.user = str(user)
+        self.password = password
+        self.private_key_file = private_key_file
+        self.HASHED_KEY_MAGIC = "|1|"
+        self.has_pipelining = True
+        #self.instance_id = "instance-00000045" # C.get_config(C.p, "xos", "instance_id", "INSTANCE_ID", None)
+        #self.instance_name = "onlab_hpc-355" # C.get_config(C.p, "xos", "instance_name", "SLIVER_NAME", None)
+
+        inject={}
+        inject= utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
+
+        self.instance_id = inject["instance_id"]
+        self.instance_name = inject["instance_name"]
+
+        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
+        self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
+        fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+
+    def connect(self):
+        ''' connect to the remote host '''
+
+        vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
+
+        self.common_args = []
+        extra_args = C.ANSIBLE_SSH_ARGS
+        if extra_args is not None:
+            # make sure there is no empty string added as this can produce weird errors
+            self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
+        else:
+            self.common_args += ["-o", "ControlMaster=auto",
+                                 "-o", "ControlPersist=60s",
+                                 "-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
+
+        self.common_args += ["-o", "ProxyCommand ssh -q -i %s %s@%s" % (self.private_key_file, self.instance_id, self.host)]
+
+        cp_in_use = False
+        cp_path_set = False
+        for arg in self.common_args:
+            if "ControlPersist" in arg:
+                cp_in_use = True
+            if "ControlPath" in arg:
+                cp_path_set = True
+
+        if cp_in_use and not cp_path_set:
+            self.common_args += ["-o", "ControlPath=%s" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
+
+        if not C.HOST_KEY_CHECKING:
+            self.common_args += ["-o", "StrictHostKeyChecking=no"]
+
+        if self.port is not None:
+            self.common_args += ["-o", "Port=%d" % (self.port)]
+        if self.private_key_file is not None:
+            self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
+        elif self.runner.private_key_file is not None:
+            self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
+        if self.password:
+            self.common_args += ["-o", "GSSAPIAuthentication=no",
+                                 "-o", "PubkeyAuthentication=no"]
+        else:
+            self.common_args += ["-o", "KbdInteractiveAuthentication=no",
+                                 "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
+                                 "-o", "PasswordAuthentication=no"]
+        if self.user != pwd.getpwuid(os.geteuid())[0]:
+            self.common_args += ["-o", "User="+self.user]
+        self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
+
+        return self
+
+    def _run(self, cmd, indata):
+        if indata:
+            # do not use pseudo-pty
+            p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            stdin = p.stdin
+        else:
+            # try to use upseudo-pty
+            try:
+                # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
+                master, slave = pty.openpty()
+                p = subprocess.Popen(cmd, stdin=slave,
+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                stdin = os.fdopen(master, 'w', 0)
+                os.close(slave)
+            except:
+                p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                stdin = p.stdin
+
+        return (p, stdin)
+
+    def _password_cmd(self):
+        if self.password:
+            try:
+                p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
+                    stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+                p.communicate()
+            except OSError:
+                raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
+            (self.rfd, self.wfd) = os.pipe()
+            return ["sshpass", "-d%d" % self.rfd]
+        return []
+
+    def _send_password(self):
+        if self.password:
+            os.close(self.rfd)
+            os.write(self.wfd, "%s\n" % self.password)
+            os.close(self.wfd)
+
+    def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
+        fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+        fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+        # We can't use p.communicate here because the ControlMaster may have stdout open as well
+        stdout = ''
+        stderr = ''
+        rpipes = [p.stdout, p.stderr]
+        if indata:
+            try:
+                stdin.write(indata)
+                stdin.close()
+            except:
+                raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
+        # Read stdout/stderr from process
+        while True:
+            rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
+
+            # fail early if the sudo/su password is wrong
+            if self.runner.sudo and sudoable:
+                if self.runner.sudo_pass:
+                    incorrect_password = gettext.dgettext(
+                        "sudo", "Sorry, try again.")
+                    if stdout.endswith("%s\r\n%s" % (incorrect_password,
+                                                     prompt)):
+                        raise errors.AnsibleError('Incorrect sudo password')
+
+                if stdout.endswith(prompt):
+                    raise errors.AnsibleError('Missing sudo password')
+
+            if self.runner.su and su and self.runner.su_pass:
+                incorrect_password = gettext.dgettext(
+                    "su", "Sorry")
+                if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
+                    raise errors.AnsibleError('Incorrect su password')
+
+            if p.stdout in rfd:
+                dat = os.read(p.stdout.fileno(), 9000)
+                stdout += dat
+                if dat == '':
+                    rpipes.remove(p.stdout)
+            if p.stderr in rfd:
+                dat = os.read(p.stderr.fileno(), 9000)
+                stderr += dat
+                if dat == '':
+                    rpipes.remove(p.stderr)
+            # only break out if no pipes are left to read or
+            # the pipes are completely read and
+            # the process is terminated
+            if (not rpipes or not rfd) and p.poll() is not None:
+                break
+            # No pipes are left to read but process is not yet terminated
+            # Only then it is safe to wait for the process to be finished
+            # NOTE: Actually p.poll() is always None here if rpipes is empty
+            elif not rpipes and p.poll() == None:
+                p.wait()
+                # The process is terminated. Since no pipes to read from are
+                # left, there is no need to call select() again.
+                break
+        # close stdin after process is terminated and stdout/stderr are read
+        # completely (see also issue #848)
+        stdin.close()
+        return (p.returncode, stdout, stderr)
+
+    def not_in_host_file(self, host):
+        if 'USER' in os.environ:
+            user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+        else:
+            user_host_file = "~/.ssh/known_hosts"
+        user_host_file = os.path.expanduser(user_host_file)
+        
+        host_file_list = []
+        host_file_list.append(user_host_file)
+        host_file_list.append("/etc/ssh/ssh_known_hosts")
+        host_file_list.append("/etc/ssh/ssh_known_hosts2")
+        
+        hfiles_not_found = 0
+        for hf in host_file_list:
+            if not os.path.exists(hf):
+                hfiles_not_found += 1
+                continue
+            try:
+                host_fh = open(hf)
+            except IOError, e:
+                hfiles_not_found += 1
+                continue
+            else:
+                data = host_fh.read()
+                host_fh.close()
+                
+            for line in data.split("\n"):
+                if line is None or " " not in line:
+                    continue
+                tokens = line.split()
+                if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
+                    # this is a hashed known host entry
+                    try:
+                        (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
+                        hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
+                        hash.update(host)
+                        if hash.digest() == kn_host.decode('base64'):
+                            return False
+                    except:
+                        # invalid hashed host key, skip it
+                        continue
+                else:
+                    # standard host file entry
+                    if host in tokens[0]:
+                        return False
+
+        if (hfiles_not_found == len(host_file_list)):
+            vvv("EXEC previous known host file not found for %s" % host)
+        return True
+
+    def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable='/bin/sh', in_data=None, su_user=None, su=False):
+        ''' run a command on the remote host '''
+
+        ssh_cmd = self._password_cmd()
+        ssh_cmd += ["ssh", "-C"]
+        if not in_data:
+            # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
+            # inside a tty automatically invokes the python interactive-mode but the modules are not
+            # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
+            ssh_cmd += ["-tt"]
+        if utils.VERBOSITY > 3:
+            ssh_cmd += ["-vvv"]
+        else:
+            ssh_cmd += ["-q"]
+        ssh_cmd += self.common_args
+
+        if self.ipv6:
+            ssh_cmd += ['-6']
+        #ssh_cmd += [self.host]
+        ssh_cmd += [self.instance_name]
+
+        if su and su_user:
+            sudocmd, prompt, success_key = utils.make_su_cmd(su_user, executable, cmd)
+            prompt_re = re.compile(prompt)
+            ssh_cmd.append(sudocmd)
+        elif not self.runner.sudo or not sudoable:
+            prompt = None
+            if executable:
+                ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
+            else:
+                ssh_cmd.append(cmd)
+        else:
+            sudocmd, prompt, success_key = utils.make_sudo_cmd(sudo_user, executable, cmd)
+            ssh_cmd.append(sudocmd)
+
+        vvv("EXEC %s" % ssh_cmd, host=self.host)
+
+        not_in_host_file = self.not_in_host_file(self.host)
+
+        if C.HOST_KEY_CHECKING and not_in_host_file:
+            # lock around the initial SSH connectivity so the user prompt about whether to add 
+            # the host to known hosts is not intermingled with multiprocess output.
+            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
+            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
+
+        # create process
+        (p, stdin) = self._run(ssh_cmd, in_data)
+
+        self._send_password()
+
+        if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
+                (self.runner.su and su and self.runner.su_pass):
+            # several cases are handled for sudo privileges with password
+            # * NOPASSWD (tty & no-tty): detect success_key on stdout
+            # * without NOPASSWD:
+            #   * detect prompt on stdout (tty)
+            #   * detect prompt on stderr (no-tty)
+            fcntl.fcntl(p.stdout, fcntl.F_SETFL,
+                        fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+            fcntl.fcntl(p.stderr, fcntl.F_SETFL,
+                        fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+            sudo_output = ''
+            sudo_errput = ''
+
+            while True:
+                if success_key in sudo_output or \
+                    (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
+                    (self.runner.su_pass and prompt_re.match(sudo_output)):
+                    break
+
+                rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
+                                              [p.stdout], self.runner.timeout)
+                if p.stderr in rfd:
+                    chunk = p.stderr.read()
+                    if not chunk:
+                        raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
+                    sudo_errput += chunk
+                    incorrect_password = gettext.dgettext(
+                        "sudo", "Sorry, try again.")
+                    if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
+                        raise errors.AnsibleError('Incorrect sudo password')
+                    elif sudo_errput.endswith(prompt):
+                        stdin.write(self.runner.sudo_pass + '\n')
+
+                if p.stdout in rfd:
+                    chunk = p.stdout.read()
+                    if not chunk:
+                        raise errors.AnsibleError('ssh connection closed waiting for sudo or su password prompt')
+                    sudo_output += chunk
+
+                if not rfd:
+                    # timeout. wrap up process communication
+                    stdout = p.communicate()
+                    raise errors.AnsibleError('ssh connection error waiting for sudo or su password prompt')
+
+            if success_key not in sudo_output:
+                if sudoable:
+                    stdin.write(self.runner.sudo_pass + '\n')
+                elif su:
+                    stdin.write(self.runner.su_pass + '\n')
+
+        (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
+
+        if C.HOST_KEY_CHECKING and not_in_host_file:
+            # lock around the initial SSH connectivity so the user prompt about whether to add 
+            # the host to known hosts is not intermingled with multiprocess output.
+            fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
+            fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+        controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
+                              'unknown configuration option: ControlPersist' in stderr
+
+        if C.HOST_KEY_CHECKING:
+            if ssh_cmd[0] == "sshpass" and p.returncode == 6:
+                raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this.  Please add this host\'s fingerprint to your known_hosts file to manage this host.')
+
+        if p.returncode != 0 and controlpersisterror:
+            raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
+        if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
+            raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
+
+        return (p.returncode, '', stdout, stderr)
+
+    def put_file(self, in_path, out_path):
+        ''' transfer a file from local to remote '''
+        vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+        if not os.path.exists(in_path):
+            raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+        cmd = self._password_cmd()
+
+        host = self.host
+        if self.ipv6:
+            host = '[%s]' % host
+
+        if C.DEFAULT_SCP_IF_SSH:
+            cmd += ["scp"] + self.common_args
+            cmd += [in_path,host + ":" + pipes.quote(out_path)]
+            indata = None
+        else:
+            cmd += ["sftp"] + self.common_args + [host]
+            indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
+
+        (p, stdin) = self._run(cmd, indata)
+
+        self._send_password()
+
+        (returncode, stdout, stderr) = self._communicate(p, stdin, indata)
+
+        if returncode != 0:
+            raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
+
+    def fetch_file(self, in_path, out_path):
+        ''' fetch a file from remote to local '''
+        vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+        cmd = self._password_cmd()
+
+        host = self.host
+        if self.ipv6:
+            host = '[%s]' % host
+
+        if C.DEFAULT_SCP_IF_SSH:
+            cmd += ["scp"] + self.common_args
+            cmd += [host + ":" + in_path, out_path]
+            indata = None
+        else:
+            cmd += ["sftp"] + self.common_args + [host]
+            indata = "get %s %s\n" % (in_path, out_path)
+
+        p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
+                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        self._send_password()
+        stdout, stderr = p.communicate(indata)
+
+        if p.returncode != 0:
+            raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
+
+    def close(self):
+        ''' not applicable since we're executing openssh binaries '''
+        pass
+
diff --git a/xos/synchronizer/steps/sync_monitoring_agent.yaml b/xos/synchronizer/steps/sync_monitoring_agent.yaml
new file mode 100644
index 0000000..36b7221
--- /dev/null
+++ b/xos/synchronizer/steps/sync_monitoring_agent.yaml
@@ -0,0 +1,43 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  vars:
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+
+  tasks:
+  - name: Verify if veg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+    register: cron_job_pids_count
+
+  - name: DEBUG
+    debug: var=cron_job_pids_count.stdout
+
+  - name: make sure /usr/local/share/veg_monitoring_agent exists
+    file: path=/usr/local/share/beg_monitoring_agent state=directory owner=root group=root
+    become: yes
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Copy cron job to destination
+    copy: src=/opt/xos/synchronizers/veg/veg_stats_notifier.py
+      dest=/usr/local/share/veg_monitoring_agent/veg_stats_notifier.py
+    become: yes
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+    become: yes
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Initiate veg_stats_notifier cron job
+    command: sudo python /usr/local/share/veg_monitoring_agent/veg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vegservice_rabbit_exchange='vegservice'
+    async: 9999999999999999
+    poll: 0
+    become: yes
+    when: cron_job_pids_count.stdout == "0"
+
diff --git a/xos/synchronizer/steps/sync_vegtenant.py b/xos/synchronizer/steps/sync_vegtenant.py
new file mode 100644
index 0000000..2b64bb1
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vegtenant.py
@@ -0,0 +1,308 @@
+import hashlib
+import os
+import socket
+import sys
+import base64
+import time
+from urlparse import urlparse
+from django.db.models import F, Q
+from xos.config import Config
+from synchronizers.base.syncstep import SyncStep
+from synchronizers.base.ansible_helper import run_template_ssh
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+from core.models import Service, Slice, Tag, ModelLink, CoarseTenant, Tenant, ServiceMonitoringAgentInfo
+from services.veg.models import VEGService, VEGTenant
+from xos.logger import Logger, logging
+
+# Deal with configurations where the hpc service is not onboarded
+try:
+    from services.hpc.models import HpcService, CDNPrefix
+    hpc_service_onboarded=True
+except:
+    hpc_service_onboarded=False
+
+# hpclibrary will be in steps/..
+parentdir = os.path.join(os.path.dirname(__file__),"..")
+sys.path.insert(0,parentdir)
+
+from broadbandshield import BBS
+
+logger = Logger(level=logging.INFO)
+
+ENABLE_QUICK_UPDATE=False
+
+CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
+
+class SyncVEGTenant(SyncInstanceUsingAnsible):
+    provides=[VEGTenant]
+    observes=VEGTenant
+    requested_interval=0
+    template_name = "sync_vegtenant.yaml"
+    watches = [ModelLink(CoarseTenant,via='coarsetenant'), ModelLink(ServiceMonitoringAgentInfo,via='monitoringagentinfo')]
+
+    def __init__(self, *args, **kwargs):
+        super(SyncVEGTenant, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+        if (not deleted):
+            objs = VEGTenant.get_tenant_objects().filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False))
+        else:
+            objs = VEGTenant.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_veg_service(self, o):
+        if not o.provider_service:
+            return None
+
+        vegs = VEGService.get_service_objects().filter(id=o.provider_service.id)
+        if not vegs:
+            return None
+
+        return vegs[0]
+
+    def get_extra_attributes(self, o):
+        # This is a place to include extra attributes that aren't part of the
+        # object itself. In the case of vEG, we need to know:
+        #   1) the addresses of dnsdemux, to setup dnsmasq in the vEG
+        #   2) CDN prefixes, so we know what URLs to send to dnsdemux
+        #   3) BroadBandShield server addresses, for parental filtering
+        #   4) vlan_ids, for setting up networking in the vEG VM
+
+        veg_service = self.get_veg_service(o)
+
+        dnsdemux_ip = None
+        cdn_prefixes = []
+        #FIXME this will probably break since no folder is under syncronizers
+        cdn_config_fn = "/opt/xos/synchronizers/veg/cdn_config"
+        if os.path.exists(cdn_config_fn):
+            # manual CDN configuration
+            #   the first line is the address of dnsredir
+            #   the remaining lines are domain names, one per line
+            lines = file(cdn_config_fn).readlines()
+            if len(lines)>=2:
+                dnsdemux_ip = lines[0].strip()
+                cdn_prefixes = [x.strip() for x in lines[1:] if x.strip()]
+        elif hpc_service_onboarded:
+            # automatic CDN configuiration
+            #    it learns everything from CDN objects in XOS
+            #    not tested on pod.
+            if veg_service.backend_network_label:
+                # Connect to dnsdemux using the network specified by
+                #     veg_service.backend_network_label
+                for service in HpcService.objects.all():
+                    for slice in service.slices.all():
+                        if "dnsdemux" in slice.name:
+                            for instance in slice.instances.all():
+                                for ns in instance.ports.all():
+                                    if ns.ip and ns.network.labels and (veg_service.backend_network_label in ns.network.labels):
+                                        dnsdemux_ip = ns.ip
+                if not dnsdemux_ip:
+                    logger.info("failed to find a dnsdemux on network %s" % veg_service.backend_network_label,extra=o.tologdict())
+            else:
+                # Connect to dnsdemux using the instance's public address
+                for service in HpcService.objects.all():
+                    for slice in service.slices.all():
+                        if "dnsdemux" in slice.name:
+                            for instance in slice.instances.all():
+                                if dnsdemux_ip=="none":
+                                    try:
+                                        dnsdemux_ip = socket.gethostbyname(instance.node.name)
+                                    except:
+                                        pass
+                if not dnsdemux_ip:
+                    logger.info("failed to find a dnsdemux with a public address",extra=o.tologdict())
+
+            for prefix in CDNPrefix.objects.all():
+                cdn_prefixes.append(prefix.prefix)
+
+        dnsdemux_ip = dnsdemux_ip or "none"
+
+        # Broadbandshield can either be set up internally, using veg_service.bbs_slice,
+        # or it can be setup externally using veg_service.bbs_server.
+
+        bbs_addrs = []
+        if veg_service.bbs_slice:
+            if veg_service.backend_network_label:
+                for bbs_instance in veg_service.bbs_slice.instances.all():
+                    for ns in bbs_instance.ports.all():
+                        if ns.ip and ns.network.labels and (veg_service.backend_network_label in ns.network.labels):
+                            bbs_addrs.append(ns.ip)
+            else:
+                logger.info("unsupported configuration -- bbs_slice is set, but backend_network_label is not",extra=o.tologdict())
+            if not bbs_addrs:
+                logger.info("failed to find any usable addresses on bbs_slice",extra=o.tologdict())
+        elif veg_service.bbs_server:
+            bbs_addrs.append(veg_service.bbs_server)
+        else:
+            logger.info("neither bbs_slice nor bbs_server is configured in the vEG",extra=o.tologdict())
+
+        s_tags = []
+        c_tags = []
+        if o.volt:
+            s_tags.append(o.volt.s_tag)
+            c_tags.append(o.volt.c_tag)
+
+        try:
+            full_setup = Config().observer_full_setup
+        except:
+            full_setup = True
+
+        safe_macs=[]
+        if veg_service.url_filter_kind == "safebrowsing":
+            if o.volt and o.volt.subscriber:
+                for user in o.volt.subscriber.devices:
+                    level = user.get("level",None)
+                    mac = user.get("mac",None)
+                    if level in ["G", "PG"]:
+                        if mac:
+                            safe_macs.append(mac)
+
+
+        docker_opts = []
+        if veg_service.docker_insecure_registry:
+            reg_name = veg_service.docker_image_name.split("/",1)[0]
+            docker_opts.append("--insecure-registry " + reg_name)
+
+        fields = {"s_tags": s_tags,
+                "c_tags": c_tags,
+                "docker_remote_image_name": veg_service.docker_image_name,
+                "docker_local_image_name": veg_service.docker_image_name, # veg_service.docker_image_name.split("/",1)[1].split(":",1)[0],
+                "docker_opts": " ".join(docker_opts),
+                "dnsdemux_ip": dnsdemux_ip,
+                "cdn_prefixes": cdn_prefixes,
+                "bbs_addrs": bbs_addrs,
+                "full_setup": full_setup,
+                "isolation": o.instance.isolation,
+                "safe_browsing_macs": safe_macs,
+                "container_name": "veg-%s-%s" % (s_tags[0], c_tags[0]),
+                "dns_servers": [x.strip() for x in veg_service.dns_servers.split(",")],
+                "url_filter_kind": veg_service.url_filter_kind }
+
+        # add in the sync_attributes that come from the SubscriberRoot object
+
+        if o.volt and o.volt.subscriber and hasattr(o.volt.subscriber, "sync_attributes"):
+            for attribute_name in o.volt.subscriber.sync_attributes:
+                fields[attribute_name] = getattr(o.volt.subscriber, attribute_name)
+
+        return fields
+
+    def sync_fields(self, o, fields):
+        # the super causes the playbook to be run
+
+        super(SyncVEGTenant, self).sync_fields(o, fields)
+
+        # now do all of our broadbandshield stuff...
+
+        service = self.get_veg_service(o)
+        if not service:
+            # Ansible uses the service's keypair in order to SSH into the
+            # instance. It would be bad if the slice had no service.
+
+            raise Exception("Slice %s is not associated with a service" % instance.slice.name)
+
+        # Make sure the slice is configured properly
+        if (service != o.instance.slice.service):
+            raise Exception("Slice %s is associated with some service that is not %s" % (str(instance.slice), str(service)))
+
+        # only enable filtering if we have a subscriber object (see below)
+        url_filter_enable = False
+
+        # for attributes that come from CordSubscriberRoot
+        if o.volt and o.volt.subscriber:
+            url_filter_enable = o.volt.subscriber.url_filter_enable
+            url_filter_level = o.volt.subscriber.url_filter_level
+            url_filter_users = o.volt.subscriber.devices
+
+        if service.url_filter_kind == "broadbandshield":
+            # disable url_filter if there are no bbs_addrs
+            if url_filter_enable and (not fields.get("bbs_addrs",[])):
+                logger.info("disabling url_filter because there are no bbs_addrs",extra=o.tologdict())
+                url_filter_enable = False
+
+            if url_filter_enable:
+                bbs_hostname = None
+                if service.bbs_api_hostname and service.bbs_api_port:
+                    bbs_hostname = service.bbs_api_hostname
+                else:
+                    # TODO: extract from slice
+                    bbs_hostname = "cordcompute01.onlab.us"
+
+                if service.bbs_api_port:
+                    bbs_port = service.bbs_api_port
+                else:
+                    bbs_port = 8018
+
+                if not bbs_hostname:
+                    logger.info("broadbandshield is not configured",extra=o.tologdict())
+                else:
+                    tStart = time.time()
+                    bbs = BBS(o.bbs_account, "123", bbs_hostname, bbs_port)
+                    bbs.sync(url_filter_level, url_filter_users)
+
+                    if o.hpc_client_ip:
+                        logger.info("associate account %s with ip %s" % (o.bbs_account, o.hpc_client_ip),extra=o.tologdict())
+                        bbs.associate(o.hpc_client_ip)
+                    else:
+                        logger.info("no hpc_client_ip to associate",extra=o.tologdict())
+
+                    logger.info("bbs update time %d" % int(time.time()-tStart),extra=o.tologdict())
+
+
+    def run_playbook(self, o, fields):
+        ansible_hash = hashlib.md5(repr(sorted(fields.items()))).hexdigest()
+        quick_update = (o.last_ansible_hash == ansible_hash)
+
+        if ENABLE_QUICK_UPDATE and quick_update:
+            logger.info("quick_update triggered; skipping ansible recipe",extra=o.tologdict())
+        else:
+            if o.instance.isolation in ["container", "container_vm"]:
+                super(SyncVEGTenant, self).run_playbook(o, fields, "sync_vegtenant_new.yaml")
+            else:
+                if CORD_USE_VTN:
+                    super(SyncVEGTenant, self).run_playbook(o, fields, template_name="sync_vegtenant_vtn.yaml")
+                else:
+                    super(SyncVEGTenant, self).run_playbook(o, fields)
+
+        o.last_ansible_hash = ansible_hash
+
+    def delete_record(self, m):
+        pass
+
+    def handle_service_monitoringagentinfo_watch_notification(self, monitoring_agent_info):
+        if not monitoring_agent_info.service:
+            logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
+            return
+
+        if not monitoring_agent_info.target_uri:
+            logger.info("handle watch notifications for service monitoring agent info...ignoring because target_uri attribute in monitoring agent info:%s is null" % (monitoring_agent_info))
+            return
+
+        objs = VEGTenant.get_tenant_objects().all()
+        for obj in objs:
+            if obj.provider_service.id != monitoring_agent_info.service.id:
+                logger.info("handle watch notifications for service monitoring agent info...ignoring because service attribute in monitoring agent info:%s is not matching" % (monitoring_agent_info))
+                return
+
+            instance = self.get_instance(obj)
+            if not instance:
+               logger.warn("handle watch notifications for service monitoring agent info...: No valid instance found for object %s" % (str(obj)))
+               return
+
+            logger.info("handling watch notification for monitoring agent info:%s for VEGTenant object:%s" % (monitoring_agent_info, obj))
+
+            #Run ansible playbook to update the routing table entries in the instance
+            fields = self.get_ansible_fields(instance)
+            fields["ansible_tag"] =  obj.__class__.__name__ + "_" + str(obj.id) + "_service_monitoring"
+            
+            #Parse the monitoring agent target_uri
+            url = urlparse(monitoring_agent_info.target_uri)
+
+            #Assuming target_uri is rabbitmq URI
+            fields["rabbit_user"] = url.username
+            fields["rabbit_password"] = url.password
+            fields["rabbit_host"] = url.hostname
+
+            template_name = "sync_monitoring_agent.yaml"
+            super(SyncVEGTenant, self).run_playbook(obj, fields, template_name)
+        pass
diff --git a/xos/synchronizer/steps/sync_vegtenant.yaml b/xos/synchronizer/steps/sync_vegtenant.yaml
new file mode 100644
index 0000000..eba2a97
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vegtenant.yaml
@@ -0,0 +1,179 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  vars:
+      cdn_enable: {{ cdn_enable }}
+      dnsdemux_ip: {{ dnsdemux_ip }}
+      firewall_enable: {{ firewall_enable }}
+      url_filter_enable: {{ url_filter_enable }}
+      c_tags:
+        {% for c_tag in c_tags %}
+        - {{ c_tag }}
+        {% endfor %}
+      s_tags:
+        {% for s_tag in s_tags %}
+        - {{ s_tag }}
+        {% endfor %}
+      firewall_rules:
+        {% for firewall_rule in firewall_rules.split("\n") %}
+        - {{ firewall_rule }}
+        {% endfor %}
+      cdn_prefixes:
+        {% for prefix in cdn_prefixes %}
+        - {{ prefix }}
+        {% endfor %}
+      bbs_addrs:
+        {% for bbs_addr in bbs_addrs %}
+        - {{ bbs_addr }}
+        {% endfor %}
+      dns_servers:
+        {% for dns_server in dns_servers %}
+        - {{ dns_server }}
+        {% endfor %}
+      nat_ip: {{ nat_ip }}
+      nat_mac: {{ nat_mac }}
+      lan_ip: {{ lan_ip }}
+      lan_mac: {{ lan_mac }}
+      wan_ip: {{ wan_ip }}
+      wan_mac: {{ wan_mac }}
+      wan_container_mac: {{ wan_container_mac }}
+      wan_next_hop: 10.0.1.253   # FIX ME
+      private_ip: {{ private_ip }}
+      private_mac: {{ private_mac }}
+      hpc_client_ip: {{ hpc_client_ip }}
+      hpc_client_mac: {{ hpc_client_mac }}
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+      safe_browsing:
+        {% for mac in safe_browsing_macs %}
+        - {{ mac }}
+        {% endfor %}
+      uplink_speed: {{ uplink_speed }}
+      downlink_speed: {{ downlink_speed }}
+      status: {{ status }}
+      enable_uverse: {{ enable_uverse }}
+      url_filter_kind: {{ url_filter_kind }}
+
+  tasks:
+{% if full_setup %}
+  - name: Docker repository
+    copy: src=/opt/xos/synchronizers/veg/files/docker.list
+      dest=/etc/apt/sources.list.d/docker.list
+
+  - name: Import the repository key
+    apt_key: keyserver=keyserver.ubuntu.com id=36A1D7869245C8950F966E92D8576A8BA88D21E9
+
+  - name: install Docker
+    apt: name=lxc-docker state=present update_cache=yes
+
+  - name: install python-setuptools
+    apt: name=python-setuptools state=present
+
+  - name: install pip
+    easy_install: name=pip
+
+  - name: install docker-py
+    pip: name=docker-py version=0.5.3
+
+  - name: install Pipework
+    get_url: url=https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework
+       dest=/usr/local/bin/pipework
+       mode=0755
+
+  - name: make sure /etc/dnsmasq.d exists
+    file: path=/etc/dnsmasq.d state=directory owner=root group=root
+
+  - name: Disable resolvconf service
+    shell: service resolvconf stop
+    shell: echo manual > /etc/init/resolvconf.override
+    shell: rm -f /etc/resolv.conf
+
+  - name: Install resolv.conf
+    copy: src=/opt/xos/synchronizers/veg/files/vm-resolv.conf
+      dest=/etc/resolv.conf
+
+  - name: Verify if veg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+    register: cron_job_pids_count
+
+#  - name: DEBUG
+#    debug: var=cron_job_pids_count.stdout
+
+#  - name: make sure ~/bin exists
+#    file: path=~/bin state=directory owner=root group=root
+#    when: cron_job_pids_count.stdout == "0"
+
+  - name: Copy cron job to destination
+    copy: src=/opt/xos/synchronizers/veg/veg_stats_notifier.py
+      dest=/usr/local/sbin/veg_stats_notifier.py
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Initiate veg_stats_notifier cron job
+    command: sudo python /usr/local/sbin/veg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vegservice_rabbit_exchange='vegservice'
+    async: 9999999999999999
+    poll: 0
+    when: cron_job_pids_count.stdout == "0"
+{% endif %}
+
+  - name: vEG upstart
+    template: src=/opt/xos/synchronizers/veg/templates/veg.conf.j2 dest=/etc/init/veg-{{ s_tags[0] }}-{{ c_tags[0] }}.conf
+
+  - name: vEG startup script
+    template: src=/opt/xos/synchronizers/veg/templates/start-veg.sh.j2 dest=/usr/local/sbin/start-veg-{{ s_tags[0] }}-{{ c_tags[0] }}.sh mode=0755
+    notify:
+#    - restart veg
+     - stop veg
+     - remove container
+     - start veg
+
+  - name: create /etc/veg-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d
+    file: path=/etc/veg-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d state=directory owner=root group=root
+
+  - name: vEG basic dnsmasq config
+    copy: src=/opt/xos/synchronizers/veg/files/veg.dnsmasq dest=/etc/veg-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/veg.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: dnsmasq config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_servers.j2 dest=/etc/veg-{{ s_tags[0] }}-{{ c_tags[0] }}/dnsmasq.d/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+# These are samples, not necessary for correct function of demo
+
+#  - name: networking info
+#    template: src=/opt/xos/synchronizers/veg/templates/vlan_sample.j2 dest=/etc/vlan_sample owner=root group=root
+
+#  - name: firewall info
+#    template: src=/opt/xos/synchronizers/veg/templates/firewall_sample.j2 dest=/etc/firewall_sample owner=root group=root
+
+  - name: Make sure vEG service is running
+    service: name=veg-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
+
+  handlers:
+  # Dnsmasq is automatically restarted in the container
+  - name: restart dnsmasq
+    shell: docker exec veg-{{ s_tags[0] }}-{{ c_tags[0] }} killall dnsmasq
+
+  - name: restart veg
+    shell: service veg-{{ s_tags[0] }}-{{ c_tags[0] }} stop; sleep 1; service veg-{{ s_tags[0] }}-{{ c_tags[0] }} start
+
+  - name: stop veg
+    service: name=veg-{{ s_tags[0] }}-{{ c_tags[0] }} state=stopped
+
+  - name: remove container
+    docker: name=veg-{{ s_tags[0] }}-{{ c_tags[0] }} state=absent image=docker-veg
+
+  - name: start veg
+    service: name=veg-{{ s_tags[0] }}-{{ c_tags[0] }} state=started
+
diff --git a/xos/synchronizer/steps/sync_vegtenant_new.yaml b/xos/synchronizer/steps/sync_vegtenant_new.yaml
new file mode 100644
index 0000000..daa30f8
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vegtenant_new.yaml
@@ -0,0 +1,136 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: {{ username }}
+  sudo: yes
+  vars:
+      container_name: {{ container_name }}
+      cdn_enable: {{ cdn_enable }}
+      dnsdemux_ip: {{ dnsdemux_ip }}
+      firewall_enable: {{ firewall_enable }}
+      url_filter_enable: {{ url_filter_enable }}
+      c_tags:
+        {% for c_tag in c_tags %}
+        - {{ c_tag }}
+        {% endfor %}
+      s_tags:
+        {% for s_tag in s_tags %}
+        - {{ s_tag }}
+        {% endfor %}
+      firewall_rules:
+        {% for firewall_rule in firewall_rules.split("\n") %}
+        - {{ firewall_rule }}
+        {% endfor %}
+      cdn_prefixes:
+        {% for prefix in cdn_prefixes %}
+        - {{ prefix }}
+        {% endfor %}
+      bbs_addrs:
+        {% for bbs_addr in bbs_addrs %}
+        - {{ bbs_addr }}
+        {% endfor %}
+      dns_servers:
+        {% for dns_server in dns_servers %}
+        - {{ dns_server }}
+        {% endfor %}
+      nat_ip: {{ nat_ip }}
+      nat_mac: {{ nat_mac }}
+      lan_ip: {{ lan_ip }}
+      lan_mac: {{ lan_mac }}
+      wan_ip: {{ wan_ip }}
+      wan_mac: {{ wan_mac }}
+      wan_container_mac: {{ wan_container_mac }}
+      wan_next_hop: 10.0.1.253   # FIX ME
+      private_ip: {{ private_ip }}
+      private_mac: {{ private_mac }}
+      hpc_client_ip: {{ hpc_client_ip }}
+      hpc_client_mac: {{ hpc_client_mac }}
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+      safe_browsing:
+        {% for mac in safe_browsing_macs %}
+        - {{ mac }}
+        {% endfor %}
+      uplink_speed: {{ uplink_speed }}
+      downlink_speed: {{ downlink_speed }}
+      status: {{ status }}
+      enable_uverse: {{ enable_uverse }}
+      url_filter_kind: {{ url_filter_kind }}
+
+  tasks:
+  - name: Verify if veg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+    register: cron_job_pids_count
+
+#  - name: DEBUG
+#    debug: var=cron_job_pids_count.stdout
+
+  - name: make sure ~/bin exists
+    file: path=~/bin state=directory owner=root group=root
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Copy cron job to destination
+    copy: src=/opt/xos/synchronizers/veg/veg_stats_notifier.py
+      dest=~/bin/veg_stats_notifier.py
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: install python-kombu
+    apt: name=python-kombu state=present
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: Initiate veg_stats_notifier cron job
+    command: python ~/bin/veg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vegservice_rabbit_exchange='vegservice'
+    async: 9999999999999999
+    poll: 0
+    when: cron_job_pids_count.stdout == "0"
+
+  - name: vEG basic dnsmasq config
+    copy: src=/opt/xos/synchronizers/veg/files/veg.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/veg.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: dnsmasq config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: create directory for "safe" config
+    file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory
+
+  - name: dnsmasq "safe" config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: copy base ufw files
+    synchronize: src=/opt/xos/synchronizers/veg/files/etc/ufw/ dest=/var/container_volumes/{{ container_name }}/etc/ufw/
+    notify:
+    - reload ufw
+
+  - name: redirection rules for safe DNS
+    template: src=/opt/xos/synchronizers/veg/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/etc/ufw/before.rules owner=root group=root
+    notify:
+    - reload ufw
+
+  - name: base ufw setup uses /etc/rc.local
+    copy: src=/opt/xos/synchronizers/veg/files/etc/rc.local dest=/var/container_volumes/{{ container_name }}/etc/ owner=root group=root
+    notify:
+    - copy in /etc/rc.local
+
+  handlers:
+  # Dnsmasq is automatically restarted in the container
+  - name: restart dnsmasq
+    shell: docker exec {{ container_name }} /usr/bin/killall dnsmasq
+
+  - name: reload ufw
+    shell: docker exec {{ container_name }} bash -c "/sbin/iptables -t nat -F PREROUTING; /usr/sbin/ufw reload"
+
+  # Use docker cp instead of single-file volume
+  # The reason is that changes to external file volume don't show up inside the container
+  # Probably Ansible deletes and then recreates the external file, and container has old version
+  - name: copy in /etc/rc.local
+    shell: docker cp /var/container_volumes/{{ container_name }}/etc/rc.local {{ container_name }}:/etc/
diff --git a/xos/synchronizer/steps/sync_vegtenant_vtn.yaml b/xos/synchronizer/steps/sync_vegtenant_vtn.yaml
new file mode 100644
index 0000000..fed64ab
--- /dev/null
+++ b/xos/synchronizer/steps/sync_vegtenant_vtn.yaml
@@ -0,0 +1,255 @@
+---
+- hosts: {{ instance_name }}
+  #gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  vars:
+      container_name: {{ container_name }}
+      cdn_enable: {{ cdn_enable }}
+      dnsdemux_ip: {{ dnsdemux_ip }}
+      firewall_enable: {{ firewall_enable }}
+      url_filter_enable: {{ url_filter_enable }}
+      docker_remote_image_name: {{ docker_remote_image_name }}
+      docker_local_image_name: {{ docker_local_image_name }}
+      docker_opts: {{ docker_opts }}
+      c_tags:
+        {% for c_tag in c_tags %}
+        - {{ c_tag }}
+        {% endfor %}
+      s_tags:
+        {% for s_tag in s_tags %}
+        - {{ s_tag }}
+        {% endfor %}
+      firewall_rules:
+        {% for firewall_rule in firewall_rules.split("\n") %}
+        - {{ firewall_rule }}
+        {% endfor %}
+      cdn_prefixes:
+        {% for prefix in cdn_prefixes %}
+        - {{ prefix }}
+        {% endfor %}
+      bbs_addrs:
+        {% for bbs_addr in bbs_addrs %}
+        - {{ bbs_addr }}
+        {% endfor %}
+      dns_servers:
+        {% for dns_server in dns_servers %}
+        - {{ dns_server }}
+        {% endfor %}
+      nat_ip: {{ nat_ip }}
+      nat_mac: {{ nat_mac }}
+      lan_ip: {{ lan_ip }}
+      lan_mac: {{ lan_mac }}
+      wan_ip: {{ wan_ip }}
+      wan_mac: {{ wan_mac }}
+      wan_container_ip: {{ wan_container_ip }}
+      wan_container_netbits: {{ wan_container_netbits }}
+      wan_container_mac: {{ wan_container_mac }}
+      wan_container_gateway_ip: {{ wan_container_gateway_ip }}
+      wan_vm_ip: {{ wan_vm_ip }}
+      wan_vm_mac: {{ wan_vm_mac }}
+      wan_next_hop: 10.0.1.253   # FIX ME
+      private_ip: {{ private_ip }}
+      private_mac: {{ private_mac }}
+      hpc_client_ip: {{ hpc_client_ip }}
+      hpc_client_mac: {{ hpc_client_mac }}
+      keystone_tenant_id: {{ keystone_tenant_id }}
+      keystone_user_id: {{ keystone_user_id }}
+      rabbit_user: {{ rabbit_user }}
+      rabbit_password: {{ rabbit_password }}
+      rabbit_host: {{ rabbit_host }}
+      safe_browsing:
+        {% for mac in safe_browsing_macs %}
+        - {{ mac }}
+        {% endfor %}
+      uplink_speed: {{ uplink_speed }}
+      downlink_speed: {{ downlink_speed }}
+      status: {{ status }}
+      enable_uverse: {{ enable_uverse }}
+      url_filter_kind: {{ url_filter_kind }}
+
+
+  tasks:
+  - name: Add hostname to /etc/hosts
+    lineinfile: dest=/etc/hosts
+      regexp='^127\.0\.0\.1'
+      line="127.0.0.1 localhost {{ '{{' }} ansible_hostname {{ '}}' }}"
+      owner=root group=root mode=0644
+
+  - name: Verify that bridge-utils is installed
+    shell: stat /sbin/brctl
+
+  - name: Verify that docker is installed
+    shell: stat /usr/bin/docker
+
+  - name: Check to see if network is setup
+    stat: path=/root/network_is_setup
+    register: network_is_setup
+
+  - name: set up the network
+    shell: "{{ '{{' }} item {{ '}}' }}"
+    with_items:
+       - ip link del link eth0 eth0.500 || true
+       - ip link add link eth0 eth0.500 type vlan id 500
+       - ip link set eth0.500 up
+       - ifconfig br-wan down || true
+       - brctl delbr br-wan || true
+       - brctl addbr br-wan
+       - brctl addif br-wan eth0.500
+       - ifconfig br-wan hw ether {{ wan_vm_mac }}
+       - ip addr add {{ wan_vm_ip }}/{{ wan_container_netbits }} dev br-wan
+       - ip link set br-wan up
+       - ip route del default || true
+       - ip route add default via {{ wan_container_gateway_ip }}
+       - ip link set dev br-wan promisc on
+    when: network_is_setup.stat.exists == False
+
+  - name: Remember that the network is setup, so we never do the above again
+    shell: touch /root/network_is_setup
+
+{% if full_setup %}
+  - name: Check to see if environment is setup
+    stat: path=/root/environment_is_setup
+    register: environment_is_setup
+
+# Everything here is now baked into the vEG image
+# Leave this spot in place for future temporary setup stuff
+
+  - name: Remember that the environment is setup, so we never do the above again
+    shell: touch /root/environment_is_setup
+
+  - name: Verify if veg_stats_notifier ([] is to avoid capturing the shell process) cron job is already running
+    shell: pgrep -f [v]cpe_stats_notifier | wc -l
+    register: cron_job_pids_count
+
+#  - name: DEBUG
+#    debug: var=cron_job_pids_count.stdout
+
+#  - name: make sure ~/bin exists
+#    file: path=~/bin state=directory owner=root group=root
+#    when: cron_job_pids_count.stdout == "0"
+
+#  - name: Copy cron job to destination
+#    copy: src=/opt/xos/synchronizers/veg/veg_stats_notifier.py
+#      dest=/usr/local/sbin/veg_stats_notifier.py
+#    when: cron_job_pids_count.stdout == "0"
+
+#  - name: install python-kombu
+#    apt: name=python-kombu state=present
+#    when: cron_job_pids_count.stdout == "0"
+
+#  - name: Initiate veg_stats_notifier cron job
+#    command: sudo python /usr/local/sbin/veg_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vegservice_rabbit_exchange='vegservice'
+#    async: 9999999999999999
+#    poll: 0
+#    when: cron_job_pids_count.stdout == "0"
+{% endif %}
+
+  - name: Set docker options
+    template: src=/opt/xos/synchronizers/veg/templates/docker.j2 dest=/etc/default/docker
+    notify:
+     - restart docker
+
+  - name: vEG upstart
+    template: src=/opt/xos/synchronizers/veg/templates/veg.conf.j2 dest=/etc/init/{{ container_name }}.conf
+
+  - name: vEG startup script
+    template: src=/opt/xos/synchronizers/veg/templates/start-veg-vtn.sh.j2 dest=/usr/local/sbin/start-{{ container_name }}.sh mode=0755
+    notify:
+#    - restart veg
+     - stop veg
+     - remove container
+     - start veg
+
+  - name: create /var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/
+    file: path=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe state=directory owner=root group=root
+
+  - name: vEG basic dnsmasq config
+    copy: src=/opt/xos/synchronizers/veg/files/veg.dnsmasq dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/veg.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: dnsmasq config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: dnsmasq "safe" config
+    template: src=/opt/xos/synchronizers/veg/templates/dnsmasq_safe_servers.j2 dest=/var/container_volumes/{{ container_name }}/etc/dnsmasq.d/safe/servers.conf owner=root group=root
+    notify:
+    - restart dnsmasq
+
+  - name: create /var/container_volumes/{{ container_name }}/mount/
+    file: path=/var/container_volumes/{{ container_name }}/mount state=directory owner=root group=root
+
+  - name: redirection rules for safe DNS
+    template: src=/opt/xos/synchronizers/veg/templates/before.rules.j2 dest=/var/container_volumes/{{ container_name }}/mount/before.rules owner=root group=root mode=0644
+    notify:
+    - reload ufw
+
+  - name: base ufw setup uses /etc/rc.local
+    template: src=/opt/xos/synchronizers/veg/templates/rc.local.j2 dest=/var/container_volumes/{{ container_name }}/mount/rc.local owner=root group=root mode=0755
+    notify:
+    - rerun /etc/rc.local
+
+  - name: create directory for local programs
+    file: path=/var/container_volumes/{{ container_name }}/usr/local/sbin state=directory
+
+  - name: bandwidth limit script
+    template: src=/opt/xos/synchronizers/veg/templates/bwlimit.sh.j2 dest=/var/container_volumes/{{ container_name }}/usr/local/sbin/bwlimit.sh owner=root group=root mode=0755
+    notify:
+    - reset bwlimits
+
+  - name: create directory for simple webserver
+    file: path=/var/container_volumes/{{ container_name }}/etc/service/message state=directory
+
+  - name: copy simple webserver
+    copy: src=/opt/xos/synchronizers/veg/files/etc/service/ dest=/var/container_volumes/{{ container_name }}/etc/service/ owner=root group=root
+    when: status != "enabled"
+
+  - name: make webserver script executable
+    file: path=/var/container_volumes/{{ container_name }}/etc/service/message/run mode=0755
+    when: status != "enabled"
+
+  - name: generate the message page
+    template: src=/opt/xos/synchronizers/veg/templates/message.html.j2 dest=/var/container_volumes/{{ container_name }}/etc/service/message/message.html owner=root group=root mode=0644
+    when: status != "enabled"
+    #notify: restart veg
+
+  - name: remove simple webserver
+    file: path=/var/container_volumes/{{ container_name }}/etc/service/message/run state=absent
+    when: status == "enabled"
+    #notify: restart veg
+
+  - name: Make sure vEG service is running
+    service: name={{ container_name }} state=started
+
+  handlers:
+  # Dnsmasq is automatically restarted in the container
+  - name: restart dnsmasq
+    shell: docker exec {{ container_name }} killall dnsmasq
+
+  - name: stop veg
+    service: name={{ container_name }} state=stopped
+
+  - name: remove container
+    docker: name={{ container_name }} state=absent image=docker-veg
+
+  - name: start veg
+    service: name={{ container_name }} state=started
+
+  - name: reload ufw
+    shell: docker exec {{ container_name }} bash -c "/sbin/iptables -t nat -F PREROUTING; /sbin/iptables -t nat -F POSTROUTING; /usr/sbin/ufw reload"
+
+  - name: rerun /etc/rc.local
+    shell: docker exec {{ container_name }} bash -c "/etc/rc.local"
+
+  - name: reset bwlimits
+    shell: docker exec {{ container_name }} bash -c "/usr/local/sbin/bwlimit.sh restart"
+
+  - name: restart veg
+    shell: service {{ container_name }} stop; sleep 1; service {{ container_name }} start
+
+  - name: restart docker
+    shell: service docker restart
diff --git a/xos/synchronizer/steps/test.yaml b/xos/synchronizer/steps/test.yaml
new file mode 100644
index 0000000..fc8251d
--- /dev/null
+++ b/xos/synchronizer/steps/test.yaml
@@ -0,0 +1,7 @@
+---
+- hosts: {{ instance_name }}
+  connection: ssh
+  user: ubuntu
+  tasks:
+    - name: foobar
+      shell: echo foo > /tmp/foobar
diff --git a/xos/synchronizer/stop.sh b/xos/synchronizer/stop.sh
new file mode 100755
index 0000000..0fc7b5e
--- /dev/null
+++ b/xos/synchronizer/stop.sh
@@ -0,0 +1 @@
+pkill -9 -f veg-observer.py
diff --git a/xos/synchronizer/supervisor/veg-observer.conf b/xos/synchronizer/supervisor/veg-observer.conf
new file mode 100644
index 0000000..d5868a6
--- /dev/null
+++ b/xos/synchronizer/supervisor/veg-observer.conf
@@ -0,0 +1,2 @@
+[program:veg-observer]
+command=python /opt/xos/synchronizers/veg/veg-synchronizer.py -C /opt/xos/synchronizers/veg/veg_synchronizer_config
diff --git a/xos/synchronizer/templates/before.rules.j2 b/xos/synchronizer/templates/before.rules.j2
new file mode 100644
index 0000000..b60aaef
--- /dev/null
+++ b/xos/synchronizer/templates/before.rules.j2
@@ -0,0 +1,101 @@
+#
+# rules.before
+#
+# Rules that should be run before the ufw command line added rules. Custom
+# rules should be added to one of these chains:
+#   ufw-before-input
+#   ufw-before-output
+#   ufw-before-forward
+#
+
+# nat Table rules
+*nat
+:POSTROUTING ACCEPT [0:0]
+
+# Forward traffic from eth1 through eth0.
+-A POSTROUTING -o eth0 -j MASQUERADE
+
+# Set up NAT for CDN services
+-A POSTROUTING -o eth2 -j MASQUERADE
+
+# DNS safe browsing
+{% if safe_browsing %}
+{% for mac in safe_browsing %}
+-A PREROUTING -i eth1 -m mac --mac-source {{ mac }} -p udp --dport 53 -j REDIRECT --to-port 5353
+-A PREROUTING -i eth1 -m mac --mac-source {{ mac }} -p tcp --dport 53 -j REDIRECT --to-port 5353
+{% endfor %}
+{% endif %}
+
+{% if status != "enabled" %}
+-A PREROUTING -i eth1 -p tcp --dport 80 -j REDIRECT --to-port 8000
+{% endif %}
+
+# don't delete the 'COMMIT' line or these nat table rules won't be processed
+COMMIT
+
+# Don't delete these required lines, otherwise there will be errors
+*filter
+:ufw-before-input - [0:0]
+:ufw-before-output - [0:0]
+:ufw-before-forward - [0:0]
+:ufw-not-local - [0:0]
+# End required lines
+
+# allow all on loopback
+-A ufw-before-input -i lo -j ACCEPT
+-A ufw-before-output -o lo -j ACCEPT
+
+# quickly process packets for which we already have a connection
+-A ufw-before-input -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
+-A ufw-before-output -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
+-A ufw-before-forward -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
+
+# drop INVALID packets (logs these in loglevel medium and higher)
+-A ufw-before-input -m conntrack --ctstate INVALID -j ufw-logging-deny
+-A ufw-before-input -m conntrack --ctstate INVALID -j DROP
+
+# ok icmp codes for INPUT
+-A ufw-before-input -p icmp --icmp-type destination-unreachable -j ACCEPT
+-A ufw-before-input -p icmp --icmp-type source-quench -j ACCEPT
+-A ufw-before-input -p icmp --icmp-type time-exceeded -j ACCEPT
+-A ufw-before-input -p icmp --icmp-type parameter-problem -j ACCEPT
+-A ufw-before-input -p icmp --icmp-type echo-request -j ACCEPT
+
+# ok icmp code for FORWARD
+-A ufw-before-forward -p icmp --icmp-type destination-unreachable -j ACCEPT
+-A ufw-before-forward -p icmp --icmp-type source-quench -j ACCEPT
+-A ufw-before-forward -p icmp --icmp-type time-exceeded -j ACCEPT
+-A ufw-before-forward -p icmp --icmp-type parameter-problem -j ACCEPT
+-A ufw-before-forward -p icmp --icmp-type echo-request -j ACCEPT
+
+# allow dhcp client to work
+-A ufw-before-input -p udp --sport 67 --dport 68 -j ACCEPT
+
+#
+# ufw-not-local
+#
+-A ufw-before-input -j ufw-not-local
+
+# if LOCAL, RETURN
+-A ufw-not-local -m addrtype --dst-type LOCAL -j RETURN
+
+# if MULTICAST, RETURN
+-A ufw-not-local -m addrtype --dst-type MULTICAST -j RETURN
+
+# if BROADCAST, RETURN
+-A ufw-not-local -m addrtype --dst-type BROADCAST -j RETURN
+
+# all other non-local packets are dropped
+-A ufw-not-local -m limit --limit 3/min --limit-burst 10 -j ufw-logging-deny
+-A ufw-not-local -j DROP
+
+# allow MULTICAST mDNS for service discovery (be sure the MULTICAST line above
+# is uncommented)
+-A ufw-before-input -p udp -d 224.0.0.251 --dport 5353 -j ACCEPT
+
+# allow MULTICAST UPnP for service discovery (be sure the MULTICAST line above
+# is uncommented)
+-A ufw-before-input -p udp -d 239.255.255.250 --dport 1900 -j ACCEPT
+
+# don't delete the 'COMMIT' line or these rules won't be processed
+COMMIT
diff --git a/xos/synchronizer/templates/bwlimit.sh.j2 b/xos/synchronizer/templates/bwlimit.sh.j2
new file mode 100644
index 0000000..b267ada
--- /dev/null
+++ b/xos/synchronizer/templates/bwlimit.sh.j2
@@ -0,0 +1,131 @@
+#!/bin/bash
+#  tc uses the following units when passed as a parameter.
+#  kbps: Kilobytes per second
+#  mbps: Megabytes per second
+#  kbit: Kilobits per second
+#  mbit: Megabits per second
+#  bps: Bytes per second
+#       Amounts of data can be specified in:
+#       kb or k: Kilobytes
+#       mb or m: Megabytes
+#       mbit: Megabits
+#       kbit: Kilobits
+#  To get the byte figure from bits, divide the number by 8 bit
+#
+
+TC=/sbin/tc
+
+WAN=eth0             # External (WAN side) interface
+LAN=eth1             # Customer (LAN side) interface
+
+MAXRATE=10gbit       # Maximum upload/download rate
+DNLD={{ downlink_speed }}          # DOWNLOAD Limit
+UPLD={{ uplink_speed }}            # UPLOAD Limit
+
+[ "$DNLD" == "None" ] && DNLD=$MAXRATE
+[ "$UPLD" == "None" ] && UPLD=$MAXRATE
+
+start() {
+
+# We'll use Hierarchical Token Bucket (HTB) to shape bandwidth.
+# For detailed configuration options, please consult Linux man
+# page.
+
+    #
+    # WAN side (upload limiting)
+    #
+    $TC qdisc add dev $WAN root handle 1: htb default 30
+    $TC class add dev $WAN parent 1: classid 1:1 htb rate $MAXRATE burst 15k
+
+    # The default class
+    $TC class add dev $WAN parent 1:1 classid 1:30 htb rate 1kbit ceil $UPLD burst 15k
+    $TC qdisc add dev $WAN parent 1:30 handle 30: sfq perturb 10
+
+    # This class is exempt from the upload limit
+    $TC class add dev $WAN parent 1:1 classid 1:50 htb rate 1kbit ceil $MAXRATE burst 15k
+    $TC qdisc add dev $WAN parent 1:50 handle 50: sfq perturb 10
+
+    #
+    # LAN side (download limiting)
+    #
+    $TC qdisc add dev $LAN root handle 1: htb default 30
+    $TC class add dev $LAN parent 1: classid 1:1 htb rate $MAXRATE burst 15k
+
+    # The default class
+    $TC class add dev $LAN parent 1:1 classid 1:30 htb rate 1kbit ceil $DNLD burst 15k
+    $TC qdisc add dev $LAN parent 1:30 handle 30: sfq perturb 10
+
+    # This class is exempt from the download limit
+    $TC class add dev $LAN parent 1:1 classid 1:50 htb rate 1kbit ceil $MAXRATE burst 15k
+    $TC qdisc add dev $LAN parent 1:50 handle 50: sfq perturb 10
+
+}
+
+stop() {
+
+# Stop the bandwidth shaping.
+    $TC qdisc del dev $WAN root
+    $TC qdisc del dev $LAN root
+
+}
+
+restart() {
+
+# Self-explanatory.
+    stop
+    sleep 1
+    start
+
+}
+
+show() {
+
+# Display status of traffic control status.
+    echo "Download ($LAN):"
+    $TC -s class show dev $LAN
+
+    echo ""
+    echo "Upload ($WAN):"
+    $TC -s class show dev $WAN
+
+}
+
+case "$1" in
+
+  start)
+
+    echo -n "Starting bandwidth shaping: "
+    start
+    echo "done"
+    ;;
+
+  stop)
+
+    echo -n "Stopping bandwidth shaping: "
+    stop
+    echo "done"
+    ;;
+
+  restart)
+
+    echo -n "Restarting bandwidth shaping: "
+    restart
+    echo "done"
+    ;;
+
+  show)
+
+    echo "Bandwidth shaping status:"
+    show
+    echo ""
+    ;;
+
+  *)
+
+    pwd=$(pwd)
+    echo "Usage: tc.bash {start|stop|restart|show}"
+    ;;
+
+esac
+
+exit 0
diff --git a/xos/synchronizer/templates/dnsmasq_safe_servers.j2 b/xos/synchronizer/templates/dnsmasq_safe_servers.j2
new file mode 100644
index 0000000..0b3c807
--- /dev/null
+++ b/xos/synchronizer/templates/dnsmasq_safe_servers.j2
@@ -0,0 +1,16 @@
+# This file autogenerated by vCPE observer
+# It contains a list of DNS servers for dnsmasq to use.
+no-resolv
+
+{% if cdn_enable %}
+{% if cdn_prefixes %}
+# CDN
+{% for prefix in cdn_prefixes %}
+server=/{{ prefix }}/{{ dnsdemux_ip }}
+{% endfor %}
+{% endif %}
+{% endif %}
+
+# use OpenDNS service
+server=208.67.222.123
+server=208.67.220.123
diff --git a/xos/synchronizer/templates/dnsmasq_servers.j2 b/xos/synchronizer/templates/dnsmasq_servers.j2
new file mode 100644
index 0000000..7ecb319
--- /dev/null
+++ b/xos/synchronizer/templates/dnsmasq_servers.j2
@@ -0,0 +1,26 @@
+# This file autogenerated by vCPE observer
+# It contains a list of DNS servers for dnsmasq to use.
+no-resolv
+
+{% if cdn_enable %}
+{% if cdn_prefixes %}
+# CDN
+{% for prefix in cdn_prefixes %}
+server=/{{ prefix }}/{{ dnsdemux_ip }}
+{% endfor %}
+{% endif %}
+{% endif %}
+
+{% if url_filter_kind=="answerx" %}
+cache-size=0
+add-mac
+{% endif %}
+
+# temporary for ONS demo
+address=/z.cdn.turner.com/207.141.192.134
+
+# use google's DNS service
+{% for dns_server in dns_servers %}
+server={{ dns_server }}
+{% endfor %}
+
diff --git a/xos/synchronizer/templates/docker.j2 b/xos/synchronizer/templates/docker.j2
new file mode 100644
index 0000000..48f5c5c
--- /dev/null
+++ b/xos/synchronizer/templates/docker.j2
@@ -0,0 +1,17 @@
+# Docker Upstart and SysVinit configuration file
+
+# Customize location of Docker binary (especially for development testing).
+#DOCKER="/usr/local/bin/docker"
+
+# Use DOCKER_OPTS to modify the daemon startup options.
+#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4"
+
+DOCKER_OPTS="{{ docker_opts }}"
+
+#"--insecure-registry 10.1.0.1:5000"
+
+# If you need Docker to use an HTTP proxy, it can also be specified here.
+#export http_proxy="http://127.0.0.1:3128/"
+
+# This is also a handy place to tweak where Docker's temporary files go.
+#export TMPDIR="/mnt/bigdrive/docker-tmp"
diff --git a/xos/synchronizer/templates/firewall_sample.j2 b/xos/synchronizer/templates/firewall_sample.j2
new file mode 100644
index 0000000..ce85e68
--- /dev/null
+++ b/xos/synchronizer/templates/firewall_sample.j2
@@ -0,0 +1,5 @@
+firewall_enable = {{ firewall_enable }}
+
+{% for firewall_rule in firewall_rules %}
+{{ firewall_rule }}
+{% endfor %}
diff --git a/xos/synchronizer/templates/message.html.j2 b/xos/synchronizer/templates/message.html.j2
new file mode 100644
index 0000000..eb4497a
--- /dev/null
+++ b/xos/synchronizer/templates/message.html.j2
@@ -0,0 +1,111 @@
+<!DOCTYPE html>
+<html lang="en">
+<head>
+  <meta charset="UTF-8">
+  <title>Service Suspended</title>
+  <style>
+    
+    .row {
+      width: 100%;
+    }
+
+    .col-sm-offset-2 {
+      margin-left: 16.66666667%;
+    }
+
+    .col-sm-8 {
+      width: 66.66666667%;
+    }
+
+    .alert-danger {
+        color: #a94442;
+        background-color: #f2dede;
+        border-color: #a94442;
+    }
+    .alert {
+        padding: 15px;
+        margin-bottom: 20px;
+        border: 1px solid transparent;
+        border-radius: 4px;
+    }
+
+    body {
+      background-size: cover;
+      background-color: #00BFEC;
+      font-family: "Helvetica Neue",Helvetica,Arial,sans-serif;
+      font-size: 18px;
+      line-height: 1.42857143;
+    }
+
+    .vertical-center {
+      min-height: 100%;  /* Fallback for browsers do NOT support vh unit */
+      min-height: 100vh; /* These two lines are counted as one :-)       */
+
+      display: flex;
+      align-items: center;
+    }
+
+    .jumbotron {
+      padding: 60px;
+      border-radius: 6px;
+      background-color: #eee;
+      box-shadow: 4px 4px 18px black;
+    }
+
+    .cord-logo-title{
+      font-size: 150px;
+      display: inline-block;
+      color: #007EC4;
+    }
+
+    path {
+      fill: #B2181E;
+    }
+
+    #cord-logo {
+      transform: scale(1.3)
+    }
+
+    @media only screen 
+    and (min-device-width : 768px) 
+    and (max-device-width : 1024px)  {
+      #cord-logo {
+        transform: scale(1.2)
+      }
+      .cord-logo-title{
+        font-size: 100px;
+      }
+    }
+  </style>
+</head>
+<body>
+  
+  <div class="container vertical-center">
+    <div class="row">
+      <div class="col-sm-8 col-sm-offset-2">
+        <div class="jumbotron">
+          <div class="cord-logo-title">
+            <svg height="150" width="150">
+              <path id="cord-logo" d="M92.5,62.3l-33,33,2.5,2.5c4.1,4.1,7.4,3.6,11.2-.1L95.9,75l-4.5-4.5,4.7-4.7-3.6-3.6Zm2.6,7L98.4,66l3.3,3.3-3.3,3.3-3.3-3.3ZM94.5,60l4.9-4.9,4.9,4.9-4.9,4.9ZM36.2,36.1L18.6,53.8c-7.8,7.8-5.8,17.4-2.4,22l-2.2-2.2c-10.6-10.6-11.2-20,0-31.2L28.2,28.1L31.3,25l8,8-3.1,3.1ZM55.5,55.4l3.6-3.6L66.9,44l-8-8l-2.5,2.5-5.2,5.2l-3.6,3.6L33.2,61.6C22,72.7,22.5,82.2,33.2,92.8L35.4,95c-3.4-4.5-5.4-14.1,2.4-22L55.5,55.4ZM50.7,21.7l-8-8L35,21.2l8,8,7.6-7.6ZM62.8,9.6L55.4,17l-8-8,7.4-7.4,8,8Zm0.7,18.3-7.6,7.6-8-8,7.6-7.6,8,8Zm26.1-6.6-8.1,8.1-8-8,8.1-8.1,8,8ZM79.3,31.5l-7.4,7.4-8-8,7.4-7.4,8,8ZM45.7,45.6L54.3,37l-8-8-8.6,8.6L23.4,51.8C12.2,63,12.8,72.4,23.4,83l2.2,2.2c-3.4-4.5-5.4-14.1,2.4-22ZM34.9,80.7l20.6,20.5c2,2,4.6,4.1,7.9,3.2-2.9,2.9-8.9,1.7-11.9-1.3L35.1,86.8,35,86.6H34.9l-0.8-.8a15,15,0,0,1,.1-1.9,14.7,14.7,0,0,1,.7-3.2Zm-0.6,7.4a21.3,21.3,0,0,0,5.9,11.7l5.7,5.7c3,3,9,4.1,11.9,1.3-3.3.9-5.9-1.2-7.9-3.2L34.3,88.1Zm3.5-12.4a16.6,16.6,0,0,0-2.3,3.6L57,100.8c3,3,9,4.1,11.9,1.3-3.3.9-5.9-1.2-7.9-3.2Z" />
+              Sorry, your browser does not support inline SVG.
+            </svg>
+            CORD
+          </div>
+          <div class="alert alert-danger">
+            {% if status == "delinquent" %}
+            Your account is delinquent.  Please visit the customer portal to pay your bill.
+            {% elif status == "copyrightviolation" %}
+            Someone in your home has been illegally downloading copyrighted material.
+            Please visit the customer portal and perform the Copyright Training course.
+            {% else %}
+            Your service has been suspended.  Please visit the customer portal to resume.
+            {% endif %}
+          </div>
+        </div>
+      </div>
+    </div>
+  </div>
+
+
+</body>
+</html>
\ No newline at end of file
diff --git a/xos/synchronizer/templates/rc.local.j2 b/xos/synchronizer/templates/rc.local.j2
new file mode 100755
index 0000000..4226a48
--- /dev/null
+++ b/xos/synchronizer/templates/rc.local.j2
@@ -0,0 +1,28 @@
+#!/bin/sh -e
+#
+# rc.local
+#
+# This script is executed at the end of each multiuser runlevel.
+# Make sure that the script will "exit 0" on success or any other
+# value on error.
+#
+# In order to enable or disable this script just change the execution
+# bits.
+#
+# By default this script does nothing.
+
+ufw enable
+ufw allow bootps
+ufw allow from 192.168.0.0/24
+{% if status == "enabled" %}
+ufw route allow in on eth1 out on eth0
+ufw route allow in on eth1 out on eth2
+{% else %}
+ufw route deny in on eth1 out on eth0
+ufw route deny in on eth1 out on eth2
+{% endif %}
+
+BWLIMIT=/usr/local/sbin/bwlimit.sh
+[ -e $BWLIMIT ] && $BWLIMIT restart || true
+
+exit 0
diff --git a/xos/synchronizer/templates/start-veg-vtn.sh.j2 b/xos/synchronizer/templates/start-veg-vtn.sh.j2
new file mode 100644
index 0000000..a4f50f7
--- /dev/null
+++ b/xos/synchronizer/templates/start-veg-vtn.sh.j2
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+function mac_to_iface {
+    MAC=$1
+    ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
+}
+
+iptables -L > /dev/null
+ip6tables -L > /dev/null
+
+STAG={{ s_tags[0] }}
+CTAG={{ c_tags[0] }}
+VEG=veg-$STAG-$CTAG
+
+docker inspect $VEG > /dev/null 2>&1
+if [ "$?" == 1 ]
+then
+    docker pull {{ docker_remote_image_name }}
+    docker run -d --name=$VEG --privileged=true --net=none \
+    -v /var/container_volumes/$VEG/mount:/mount:ro \
+    -v /var/container_volumes/$VEG/etc/dnsmasq.d:/etc/dnsmasq.d:ro \
+    -v /var/container_volumes/$VEG/etc/service/message:/etc/service/message \
+    -v /var/container_volumes/$VEG/usr/local/sbin:/usr/local/sbin:ro \
+    {{ docker_local_image_name }}
+else
+    docker start $VEG
+fi
+
+# Set up networking via pipework
+WAN_IFACE=br-wan
+docker exec $VEG ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VEG {{ wan_container_ip }}/{{ wan_container_netbits }}@{{ wan_container_gateway_ip }} {{ wan_container_mac }}
+
+LAN_IFACE=eth0
+ifconfig $LAN_IFACE >> /dev/null
+if [ "$?" == 0 ]
+then
+    ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
+    ifconfig $LAN_IFACE.$STAG up
+    docker exec $VEG ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VEG 192.168.0.1/24 @$CTAG
+fi
+
+#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
+#docker exec $VEG ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VEG {{ hpc_client_ip }}/24
+
+# Make sure VM's eth0 (hpc_client) has no IP address
+#ifconfig $HPC_IFACE 0.0.0.0
+
+# Attach to container
+docker start -a $VEG
diff --git a/xos/synchronizer/templates/start-veg.sh.j2 b/xos/synchronizer/templates/start-veg.sh.j2
new file mode 100755
index 0000000..5f21abf
--- /dev/null
+++ b/xos/synchronizer/templates/start-veg.sh.j2
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+function mac_to_iface {
+    MAC=$1
+    ifconfig|grep $MAC| awk '{print $1}'|grep -v '\.'
+}
+
+iptables -L > /dev/null
+ip6tables -L > /dev/null
+
+STAG={{ s_tags[0] }}
+CTAG={{ c_tags[0] }}
+VEG=veg-$STAG-$CTAG
+
+docker inspect $VEG > /dev/null 2>&1
+if [ "$?" == 1 ]
+then
+    docker pull andybavier/docker-veg
+    docker run -d --name=$VEG --privileged=true --net=none -v /etc/$VEG/dnsmasq.d:/etc/dnsmasq.d andybavier/docker-veg
+else
+    docker start $VEG
+fi
+
+# Set up networking via pipework
+WAN_IFACE=$( mac_to_iface {{ wan_mac }} )
+docker exec $VEG ifconfig eth0 >> /dev/null || pipework $WAN_IFACE -i eth0 $VEG {{ wan_ip }}/24@{{ wan_next_hop }} {{ wan_container_mac }}
+
+# LAN_IFACE=$( mac_to_iface {{ lan_mac }} )
+# Need to encapsulate VLAN traffic so that Neutron doesn't eat it
+# Assumes that br-lan has been set up appropriately by a previous step
+LAN_IFACE=br-lan
+ifconfig $LAN_IFACE >> /dev/null
+if [ "$?" == 0 ]
+then
+    ifconfig $LAN_IFACE.$STAG >> /dev/null || ip link add link $LAN_IFACE name $LAN_IFACE.$STAG type vlan id $STAG
+    ifconfig $LAN_IFACE.$STAG up
+    docker exec $VEG ifconfig eth1 >> /dev/null || pipework $LAN_IFACE.$STAG -i eth1 $VEG 192.168.0.1/24 @$CTAG
+fi
+
+#HPC_IFACE=$( mac_to_iface {{ hpc_client_mac }} )
+#docker exec $VEG ifconfig eth2 >> /dev/null || pipework $HPC_IFACE -i eth2 $VEG {{ hpc_client_ip }}/24
+
+# Make sure VM's eth0 (hpc_client) has no IP address
+#ifconfig $HPC_IFACE 0.0.0.0
+
+# Now can start up dnsmasq
+docker exec $VEG service dnsmasq start
+
+# Attach to container
+docker start -a $VEG
diff --git a/xos/synchronizer/templates/veg.conf.j2 b/xos/synchronizer/templates/veg.conf.j2
new file mode 100644
index 0000000..39df9c0
--- /dev/null
+++ b/xos/synchronizer/templates/veg.conf.j2
@@ -0,0 +1,10 @@
+# Upstart script for vEG
+description "vEG container"
+author "andrea@onlab.us"
+start on filesystem and started docker
+stop on runlevel [!2345]
+respawn
+
+script
+  /usr/local/sbin/start-veg-{{ s_tags[0] }}-{{ c_tags[0] }}.sh
+end script
diff --git a/xos/synchronizer/templates/vlan_sample.j2 b/xos/synchronizer/templates/vlan_sample.j2
new file mode 100644
index 0000000..51d7902
--- /dev/null
+++ b/xos/synchronizer/templates/vlan_sample.j2
@@ -0,0 +1,5 @@
+# below is a list of all vlan_ids associated with this veg
+
+{% for vlan_id in c_tags %}
+{{ vlan_id }}
+{% endfor %}
diff --git a/xos/synchronizer/veg-synchronizer.py b/xos/synchronizer/veg-synchronizer.py
new file mode 100755
index 0000000..84bec4f
--- /dev/null
+++ b/xos/synchronizer/veg-synchronizer.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),"../../synchronizers/base")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-synchronizer")
+mod.main()
diff --git a/xos/synchronizer/veg_stats_notifier.py b/xos/synchronizer/veg_stats_notifier.py
new file mode 100644
index 0000000..a380b58
--- /dev/null
+++ b/xos/synchronizer/veg_stats_notifier.py
@@ -0,0 +1,344 @@
+import six
+import uuid
+import datetime
+from kombu.connection import BrokerConnection
+from kombu.messaging import Exchange, Queue, Consumer, Producer
+import subprocess
+import re
+import time, threading
+import sys, getopt
+import logging
+import os
+
+
+logfile = "veg_stats_notifier.log"
+level=logging.INFO
+logger=logging.getLogger('veg_stats_notifier')
+logger.setLevel(level)
+# create formatter
+formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s")
+handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=1)
+# add formatter to handler
+handler.setFormatter(formatter)
+logger.addHandler(handler)
+
+def get_all_docker_containers():
+    p = subprocess.Popen('docker ps --no-trunc', shell=True, stdout=subprocess.PIPE) 
+    firstline = True
+    dockercontainers = {}
+    while True:
+        out = p.stdout.readline()
+        if out == '' and p.poll() != None:
+            break
+        if out != '':
+            if firstline is True:
+                firstline = False
+            else:
+                fields = out.split()
+                container_fields = {}
+                container_fields['id'] = fields[0]
+                dockercontainers[fields[-1]] = container_fields
+    return dockercontainers
+
+def extract_compute_stats_from_all_vegs(dockercontainers):
+    for k,v in dockercontainers.iteritems():
+        cmd = 'sudo docker stats --no-stream=true ' + v['id'] 
+        p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) 
+        firstline = True
+        while True:
+            out = p.stdout.readline()
+            if out == '' and p.poll() != None:
+                break
+            if out != '':
+                if firstline is True:
+                    firstline = False
+                else:
+                    fields = out.split()
+                    #['CONTAINER_ID', 'CPU%', 'MEMUSE', 'UNITS', '/', 'MEMLIMIT', 'UNITS', 'MEM%', 'NET I/O', 'UNITS', '/', 'NET I/O LIMIT', 'UNITS', 'BLOCK I/O', 'UNITS', '/', 'BLOCK I/O LIMIT', 'UNITS']
+                    v['cpu_util'] = fields[1][:-1]
+                    if fields[6] == 'GB':
+                       v['memory'] = str(float(fields[5]) * 1000)
+                    else:
+                       v['memory'] = fields[5]
+                    if fields[3] == 'GB':
+                       v['memory_usage'] = str(float(fields[2]) * 1000)
+                    else:
+                       v['memory_usage'] = fields[2]
+        v['network_stats'] = []
+        for intf in ['eth0', 'eth1']:
+            cmd = 'sudo docker exec ' + v['id'] + ' ifconfig ' + intf
+            p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+            out,err = p.communicate()
+            if out:
+                intf_stats = {}
+                m = re.search("RX bytes:(\d+)", str(out))
+                if m:
+                    intf_stats['rx_bytes'] = m.group(1)
+                m = re.search("TX bytes:(\d+)", str(out))
+                if m:
+                    intf_stats['tx_bytes'] = m.group(1)
+                m = re.search("RX packets:(\d+)", str(out))
+                if m:
+                    intf_stats['rx_packets'] = m.group(1)
+                m = re.search("TX packets:(\d+)", str(out))
+                if m:
+                    intf_stats['tx_packets'] = m.group(1)
+                if intf_stats:
+                    intf_stats['intf'] = intf
+                    v['network_stats'].append(intf_stats)
+
+def extract_dns_stats_from_all_vegs(dockercontainers):
+    for k,v in dockercontainers.iteritems():
+         cmd = 'docker exec ' + v['id'] + ' killall -10 dnsmasq'
+         p = subprocess.Popen (cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
+         (output, error) = p.communicate()
+         if error:
+             logger.error("killall dnsmasq command failed with error = %s",error)
+             continue
+         cmd = 'docker exec ' + v['id'] + ' tail -7 /var/log/syslog'
+         p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+         (output, error) = p.communicate()
+         if error:
+             logger.error("tail on dnsmasq log command failed with error = %s",error)
+             continue
+         log_list = output.splitlines()
+         i = 0
+         while i < len(log_list):
+             m = re.search('(?<=:\scache size\s)(\S*)(?=,\s),\s(\S*)(?=/)/(\S*)(?=\scache insertions re-used unexpired cache entries)', log_list[i])
+             if m == None:
+                 i = i+1
+                 continue;
+             v['cache_size'] = m.group(1)
+             v['replaced_unexpired_entries'] = m.group(2)
+             v['total_inserted_entries'] = m.group(3)
+             i = i+1
+             m = re.search('(?<=:\squeries forwarded\s)(\S*)(?=,),\squeries answered locally\s(\S*)(?=$)', log_list[i])
+             v['queries_forwarded'] = m.group(1)
+             v['queries_answered_locally'] = m.group(2)
+             break;
+         i = i+2
+         v['server_stats'] = []
+         while i < len(log_list):
+             m = re.search('(?<=:\sserver\s)(\S*)(?=#)#\d*:\squeries sent\s(\S*)(?=,),\sretried or failed\s(\S*)(?=$)', log_list[i])
+             if m == None:
+                 i = i+1
+                 continue
+             dns_server = {}
+             dns_server['id'] = m.group(1)
+             dns_server['queries_sent'] = m.group(2)
+             dns_server['queries_failed'] = m.group(3)
+             v['server_stats'].append(dns_server)
+             i = i+1
+    return dockercontainers
+
+
+keystone_tenant_id='3a397e70f64e4e40b69b6266c634d9d0'
+keystone_user_id='1e3ce043029547f1a61c1996d1a531a2'
+rabbit_user='openstack'
+rabbit_password='80608318c273f348a7c3'
+rabbit_host='10.11.10.1'
+vegservice_rabbit_exchange='vegservice'
+cpe_publisher_id='veg_publisher'
+
+producer = None
+
+def setup_rabbit_mq_channel():
+     global producer
+     global rabbit_user, rabbit_password, rabbit_host, vegservice_rabbit_exchange,cpe_publisher_id
+     vegservice_exchange = Exchange(vegservice_rabbit_exchange, "topic", durable=False)
+     # connections/channels
+     connection = BrokerConnection(rabbit_host, rabbit_user, rabbit_password)
+     logger.info('Connection to RabbitMQ server successful')
+     channel = connection.channel()
+     # produce
+     producer = Producer(channel, exchange=vegservice_exchange, routing_key='notifications.info')
+     p = subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE)
+     (hostname, error) = p.communicate()
+     cpe_publisher_id = cpe_publisher_id + '_on_' + hostname
+     logger.info('cpe_publisher_id=%s',cpe_publisher_id)
+
+def publish_cpe_stats():
+     global producer
+     global keystone_tenant_id, keystone_user_id, cpe_publisher_id
+
+     logger.debug('publish_cpe_stats invoked')
+
+     dockercontainers = get_all_docker_containers()
+     cpe_container_compute_stats = extract_compute_stats_from_all_vegs(dockercontainers)
+     cpe_container_dns_stats = extract_dns_stats_from_all_vegs(dockercontainers)
+
+     for k,v in cpe_container_dns_stats.iteritems():
+          msg = {'event_type': 'veg',
+                 'message_id':six.text_type(uuid.uuid4()),
+                 'publisher_id': cpe_publisher_id,
+                 'timestamp':datetime.datetime.now().isoformat(),
+                 'priority':'INFO',
+                 'payload': {'veg_id':k,
+                             'user_id':keystone_user_id, 
+                             'tenant_id':keystone_tenant_id 
+                            }
+                }
+          producer.publish(msg)
+          logger.debug('Publishing veg event: %s', msg)
+
+          compute_payload = {}
+          if 'cpu_util' in v:
+               compute_payload['cpu_util']= v['cpu_util']
+          if 'memory' in v:
+               compute_payload['memory']= v['memory']
+          if 'memory_usage' in v:
+               compute_payload['memory_usage']= v['memory_usage']
+          if ('network_stats' in v) and (v['network_stats']):
+               compute_payload['network_stats']= v['network_stats']
+          if compute_payload:
+               compute_payload['veg_id'] = k
+               compute_payload['user_id'] = keystone_user_id
+               compute_payload['tenant_id'] = keystone_tenant_id
+               msg = {'event_type': 'veg.compute.stats',
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': compute_payload 
+                     }
+               producer.publish(msg)
+               logger.debug('Publishing veg.dns.cache.size event: %s', msg)
+
+          if 'cache_size' in v:
+               msg = {'event_type': 'veg.dns.cache.size',
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'veg_id':k,
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'cache_size':v['cache_size'] 
+                                 }
+                     }
+               producer.publish(msg)
+               logger.debug('Publishing veg.dns.cache.size event: %s', msg)
+
+          if 'total_inserted_entries' in v:
+               msg = {'event_type': 'veg.dns.total_inserted_entries',
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'veg_id':k,
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'total_inserted_entries':v['total_inserted_entries'] 
+                                 }
+                     }
+               producer.publish(msg)
+               logger.debug('Publishing veg.dns.total_inserted_entries event: %s', msg)
+
+          if 'replaced_unexpired_entries' in v:
+               msg = {'event_type': 'veg.dns.replaced_unexpired_entries',
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'veg_id':k,
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'replaced_unexpired_entries':v['replaced_unexpired_entries'] 
+                                 }
+                     }
+               producer.publish(msg)
+               logger.debug('Publishing veg.dns.replaced_unexpired_entries event: %s', msg)
+
+          if 'queries_forwarded' in v:
+               msg = {'event_type': 'veg.dns.queries_forwarded',
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'veg_id':k,
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'queries_forwarded':v['queries_forwarded'] 
+                                 }
+                     }
+               producer.publish(msg)
+               logger.debug('Publishing veg.dns.queries_forwarded event: %s', msg)
+
+          if 'queries_answered_locally' in v:
+               msg = {'event_type': 'veg.dns.queries_answered_locally',
+                      'message_id':six.text_type(uuid.uuid4()),
+                      'publisher_id': cpe_publisher_id,
+                      'timestamp':datetime.datetime.now().isoformat(),
+                      'priority':'INFO',
+                      'payload': {'veg_id':k,
+                                  'user_id':keystone_user_id,
+                                  'tenant_id':keystone_tenant_id, 
+                                  'queries_answered_locally':v['queries_answered_locally'] 
+                                 }
+                     }
+               producer.publish(msg)
+               logger.debug('Publishing veg.dns.queries_answered_locally event: %s', msg)
+
+          if 'server_stats' in v:
+               for server in v['server_stats']:
+                   msg = {'event_type': 'veg.dns.server.queries_sent',
+                          'message_id':six.text_type(uuid.uuid4()),
+                          'publisher_id': cpe_publisher_id,
+                          'timestamp':datetime.datetime.now().isoformat(),
+                          'priority':'INFO',
+                          'payload': {'veg_id':k,
+                                      'user_id':keystone_user_id,
+                                      'tenant_id':keystone_tenant_id, 
+                                      'upstream_server':server['id'],
+                                      'queries_sent':server['queries_sent'] 
+                                     }
+                         }
+                   producer.publish(msg)
+                   logger.debug('Publishing veg.dns.server.queries_sent event: %s', msg)
+
+                   msg = {'event_type': 'veg.dns.server.queries_failed',
+                          'message_id':six.text_type(uuid.uuid4()),
+                          'publisher_id': cpe_publisher_id,
+                          'timestamp':datetime.datetime.now().isoformat(),
+                          'priority':'INFO',
+                          'payload': {'veg_id':k,
+                                      'user_id':keystone_user_id,
+                                      'tenant_id':keystone_tenant_id, 
+                                      'upstream_server':server['id'],
+                                      'queries_failed':server['queries_failed'] 
+                                     }
+                         }
+                   producer.publish(msg)
+                   logger.debug('Publishing veg.dns.server.queries_failed event: %s', msg)
+
+def periodic_publish():
+     publish_cpe_stats()
+     #Publish every 5minutes
+     threading.Timer(300, periodic_publish).start()
+
+def main(argv):
+   global keystone_tenant_id, keystone_user_id, rabbit_user, rabbit_password, rabbit_host, vegservice_rabbit_exchange
+   try:
+      opts, args = getopt.getopt(argv,"",["keystone_tenant_id=","keystone_user_id=","rabbit_host=","rabbit_user=","rabbit_password=","vegservice_rabbit_exchange="])
+   except getopt.GetoptError:
+      print 'veg_stats_notifier.py keystone_tenant_id=<keystone_tenant_id> keystone_user_id=<keystone_user_id> rabbit_host=<IP addr> rabbit_user=<user> rabbit_password=<password> vegservice_rabbit_exchange=<exchange name>'
+      sys.exit(2)
+   for opt, arg in opts:
+      if opt in ("--keystone_tenant_id"):
+         keystone_tenant_id = arg
+      elif opt in ("--keystone_user_id"):
+         keystone_user_id = arg
+      elif opt in ("--rabbit_user"):
+         rabbit_user = arg
+      elif opt in ("--rabbit_password"):
+         rabbit_password = arg
+      elif opt in ("--rabbit_host"):
+         rabbit_host = arg
+      elif opt in ("--vegservice_rabbit_exchange"):
+         vegservice_rabbit_exchange = arg
+   logger.info("veg_stats_notifier args:keystone_tenant_id=%s keystone_user_id=%s rabbit_user=%s rabbit_host=%s vegservice_rabbit_exchange=%s",keystone_tenant_id,keystone_user_id,rabbit_user,rabbit_host,vegservice_rabbit_exchange)
+   setup_rabbit_mq_channel()
+   periodic_publish()
+
+if __name__ == "__main__":
+   main(sys.argv[1:])
diff --git a/xos/synchronizer/veg_synchronizer_config b/xos/synchronizer/veg_synchronizer_config
new file mode 100644
index 0000000..0e346ef
--- /dev/null
+++ b/xos/synchronizer/veg_synchronizer_config
@@ -0,0 +1,43 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=veg
+dependency_graph=/opt/xos/synchronizers/veg/model-deps
+steps_dir=/opt/xos/synchronizers/veg/steps
+sys_dir=/opt/xos/synchronizers/veg/sys
+deleters_dir=/opt/xos/synchronizers/veg/deleters
+log_file=console
+#/var/log/hpc.log
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+proxy_ssh=False
+full_setup=True
+enable_watchers=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/xos/synchronizer/vtn_veg_synchronizer_config b/xos/synchronizer/vtn_veg_synchronizer_config
new file mode 100644
index 0000000..420006d
--- /dev/null
+++ b/xos/synchronizer/vtn_veg_synchronizer_config
@@ -0,0 +1,47 @@
+
+[plc]
+name=plc
+deployment=VICCI
+
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+[api]
+host=128.112.171.237
+port=8000
+ssl_key=None
+ssl_cert=None
+ca_ssl_cert=None
+ratelimit_enabled=0
+omf_enabled=0
+mail_support_address=support@localhost
+nova_enabled=True
+
+[observer]
+name=veg
+dependency_graph=/opt/xos/synchronizers/veg/model-deps
+steps_dir=/opt/xos/synchronizers/veg/steps
+sys_dir=/opt/xos/synchronizers/veg/sys
+deleters_dir=/opt/xos/synchronizers/veg/deleters
+log_file=console
+#/var/log/hpc.log
+driver=None
+pretend=False
+backoff_disabled=True
+save_ansible_output=True
+# set proxy_ssh to false on cloudlab
+full_setup=True
+proxy_ssh=True
+proxy_ssh_key=/opt/xos/synchronizers/veg/node_key
+proxy_ssh_user=root
+
+[networking]
+use_vtn=True
+
+[feefie]
+client_id='vicci_dev_central'
+user_id='pl'
diff --git a/xos/templates/vegadmin.html b/xos/templates/vegadmin.html
new file mode 100644
index 0000000..c81ad70
--- /dev/null
+++ b/xos/templates/vegadmin.html
@@ -0,0 +1,9 @@
+<div class = "row text-center">
+    <div class="col-xs-6">
+        <a class="btn btn-primary" href="/admin/veg/vegtenant/">vEG Tenants</a>
+    </div>
+    <div class="col-xs-6">
+        <a class="btn btn-primary" href="/admin/dashboard/subscribers/">Subscriber View</a>
+    </div>
+</div>
+
diff --git a/xos/tosca/resources/vegservice.py b/xos/tosca/resources/vegservice.py
new file mode 100644
index 0000000..85bcac7
--- /dev/null
+++ b/xos/tosca/resources/vegservice.py
@@ -0,0 +1,10 @@
+from services.veg.models import VEGService
+from service import XOSService
+
+class XOSVegService(XOSService):
+    provides = "tosca.nodes.VEGService"
+    xos_model = VEGService
+    copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key",
+                    "private_key_fn", "versionNumber", "backend_network_label",
+                    "dns_servers", "node_label", "docker_image_name", "docker_insecure_registry"]
+
diff --git a/xos/veg-onboard.yaml b/xos/veg-onboard.yaml
new file mode 100644
index 0000000..71771b7
--- /dev/null
+++ b/xos/veg-onboard.yaml
@@ -0,0 +1,27 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Onboard the exampleservice
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    servicecontroller#veg:
+      type: tosca.nodes.ServiceController
+      properties:
+          base_url: file:///opt/xos_services/vEG/xos/
+          # The following will concatenate with base_url automatically, if
+          # base_url is non-null.
+          models: models.py
+          admin: admin.py
+          admin_template: templates/vegadmin.html
+          synchronizer: synchronizer/manifest
+          synchronizer_run: veg-synchronizer.py
+          tosca_custom_types: veg.yaml
+          tosca_resource: tosca/resources/vegservice.py
+          rest_service: subdirectory:veg api/service/veg/vegservice.py
+          rest_tenant: subdirectory:cord api/tenant/cord/veg.py
+          private_key: file:///opt/xos/key_import/veg_rsa
+          public_key: file:///opt/xos/key_import/veg_rsa.pub
+
diff --git a/xos/veg.m4 b/xos/veg.m4
new file mode 100644
index 0000000..3053c9b
--- /dev/null
+++ b/xos/veg.m4
@@ -0,0 +1,36 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+# compile this with "m4 veg.m4 > veg.yaml"
+
+# include macros
+include(macros.m4)
+
+node_types:
+    
+    tosca.nodes.VEGService:
+        description: >
+            CORD: The vEG Service.
+        derived_from: tosca.nodes.Root
+        capabilities:
+            xos_base_service_caps
+        properties:
+            xos_base_props
+            xos_base_service_props
+            backend_network_label:
+                type: string
+                required: false
+                description: Label that matches network used to connect HPC and BBS services.
+            dns_servers:
+                type: string
+                required: false
+            node_label:
+                type: string
+                required: false
+            docker_image_name:
+                type: string
+                required: false
+                description: Name of docker image to pull for vEG
+            docker_insecure_registry:
+                type: boolean
+                required: false
+                description: If true, then the hostname:port specified in docker_image_name will be treated as an insecure registry
\ No newline at end of file
diff --git a/xos/veg.yaml b/xos/veg.yaml
new file mode 100644
index 0000000..8a2ddf2
--- /dev/null
+++ b/xos/veg.yaml
@@ -0,0 +1,99 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+# compile this with "m4 veg.m4 > veg.yaml"
+
+# include macros
+# Note: Tosca derived_from isn't working the way I think it should, it's not
+#    inheriting from the parent template. Until we get that figured out, use
+#    m4 macros do our inheritance
+
+
+# Service
+
+
+# Subscriber
+
+
+
+
+# end m4 macros
+
+
+
+node_types:
+    
+    tosca.nodes.VEGService:
+        description: >
+            CORD: The vEG Service.
+        derived_from: tosca.nodes.Root
+        capabilities:
+            scalable:
+                type: tosca.capabilities.Scalable
+            service:
+                type: tosca.capabilities.xos.Service
+        properties:
+            no-delete:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to delete this object
+            no-create:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to create this object
+            no-update:
+                type: boolean
+                default: false
+                description: Do not allow Tosca to update this object
+            replaces:
+                type: string
+                required: false
+                descrption: Replaces/renames this object
+            kind:
+                type: string
+                default: generic
+                description: Type of service.
+            view_url:
+                type: string
+                required: false
+                description: URL to follow when icon is clicked in the Service Directory.
+            icon_url:
+                type: string
+                required: false
+                description: ICON to display in the Service Directory.
+            enabled:
+                type: boolean
+                default: true
+            published:
+                type: boolean
+                default: true
+                description: If True then display this Service in the Service Directory.
+            public_key:
+                type: string
+                required: false
+                description: Public key to install into Instances to allows Services to SSH into them.
+            private_key_fn:
+                type: string
+                required: false
+                description: Location of private key file
+            versionNumber:
+                type: string
+                required: false
+                description: Version number of Service.
+            backend_network_label:
+                type: string
+                required: false
+                description: Label that matches network used to connect HPC and BBS services.
+            dns_servers:
+                type: string
+                required: false
+            node_label:
+                type: string
+                required: false
+            docker_image_name:
+                type: string
+                required: false
+                description: Name of docker image to pull for vEG
+            docker_insecure_registry:
+                type: boolean
+                required: false
+                description: If true, then the hostname:port specified in docker_image_name will be treated as an insecure registry
\ No newline at end of file