Merge branch 'feature/mcord' of https://github.com/open-cloud/xos
diff --git a/containers/Makefile b/containers/Makefile
index 92685e7..64749a5 100644
--- a/containers/Makefile
+++ b/containers/Makefile
@@ -5,6 +5,7 @@
 	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/setup.yaml
 	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/nodes.yaml
 	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/images.yaml
+	sudo docker exec containers_xos_1 cp /opt/xos/configurations/frontend/mocks/xos_mcord_config /opt/xos/xos_configuration/
 
 nodes.yaml:
 	export SETUPDIR=.; bash ../xos/configurations/common/make-nodes-yaml.sh
diff --git a/containers/admin-openrc.sh b/containers/admin-openrc.sh
index f27fdac..24cd509 100644
--- a/containers/admin-openrc.sh
+++ b/containers/admin-openrc.sh
@@ -1,6 +1,8 @@
 # Replace with the OpenStack admin credentials for your cluster
+export OS_PROJECT_DOMAIN_ID=default
+export OS_USER_DOMAIN_ID=default
 export OS_TENANT_NAME=admin
 export OS_USERNAME=admin
-export OS_PASSWORD=admin
-export OS_AUTH_URL=http://localhost:35357/v2.0
+export OS_PASSWORD=mcord
+export OS_AUTH_URL=http://10.102.81.3:35357/v2.0
 
diff --git a/containers/docker-compose.yml b/containers/docker-compose.yml
index 24596a3..40a11f9 100644
--- a/containers/docker-compose.yml
+++ b/containers/docker-compose.yml
@@ -14,6 +14,17 @@
     volumes:
         - .:/root/setup:ro
 
+xos_synchronizer_mcordservice:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/mcordservice/mcordservice-synchronizer.py -C /opt/xos/synchronizers/mcordservice/mcordservice_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: mcordservice
+    links:
+        - xos_db
+    volumes:
+#        - ../setup/id_rsa:/opt/xos/synchronizers/mcordservice/mcordservice_private_key:ro  # private key
+        - ../setup:/root/setup:ro
 # FUTURE
 #xos_swarm_synchronizer:
 #    image: xosproject/xos-swarm-synchronizer
diff --git a/containers/images.yaml b/containers/images.yaml
new file mode 100644
index 0000000..90cbbb6
--- /dev/null
+++ b/containers/images.yaml
@@ -0,0 +1,17 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: autogenerated nodes file
+
+topology_template:
+  node_templates:
+    mysite:
+        type: tosca.nodes.Site
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
diff --git a/containers/nodes.yaml b/containers/nodes.yaml
new file mode 100644
index 0000000..1f6ff45
--- /dev/null
+++ b/containers/nodes.yaml
@@ -0,0 +1,13 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: autogenerated nodes file
+
+topology_template:
+  node_templates:
+    MyDeployment:
+        type: tosca.nodes.Deployment
+    mysite:
+        type: tosca.nodes.Site
diff --git a/xos/configurations/common/xos_common_config b/xos/configurations/common/xos_common_config
index f7b335e..f0446f1 100644
--- a/xos/configurations/common/xos_common_config
+++ b/xos/configurations/common/xos_common_config
@@ -40,7 +40,9 @@
 
 [gui]
 disable_minidashboard=True
-branding_name=Open Cloud
-branding_icon=/static/logo.png
-branding_favicon=/static/favicon.png
-branding_bg=/static/bg.jpg
+branding_name=M-CORD
+branding_icon=/static/cord-logo.png
+branding_favicon=/static/cord-favicon.png
+branding_bg=/static/mcord-bg2.jpg
+service_view_class=core.views.mCordServiceGrid.ServiceGridView
+
diff --git a/xos/configurations/devel/docker-compose.yml b/xos/configurations/devel/docker-compose.yml
index 803e57c..882904a 100644
--- a/xos/configurations/devel/docker-compose.yml
+++ b/xos/configurations/devel/docker-compose.yml
@@ -16,6 +16,33 @@
     volumes:
         - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config:ro
 
+xos_synchronizer_helloworldservice_complete:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/helloworldservice_complete/helloworldservice-synchronizer.py -C /opt/xos/synchronizers/helloworldservice_complete/helloworldservice_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: helloworldservice_complete
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/helloworldservice_complete/helloworldservice_complete_private_key:ro  # private key
+        - ../setup:/root/setup:ro
+
+xos_synchronizer_mcordservice:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/mcordservice/mcordservice-synchronizer.py -C /opt/xos/synchronizers/mcordservice/mcordservice_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: mcordservice
+    links:
+        - xos_db
+    extra_hosts:
+        - ctl:${MYIP}
+    volumes:
+        - ../setup/id_rsa:/opt/xos/synchronizers/mcordservice/mcordservice_private_key:ro  # private key
+        - ../setup:/root/setup:ro
 # FUTURE
 #xos_swarm_synchronizer:
 #    image: xosproject/xos-swarm-synchronizer
diff --git a/xos/configurations/frontend/Makefile b/xos/configurations/frontend/Makefile
index 20f997a..dcc5af2 100644
--- a/xos/configurations/frontend/Makefile
+++ b/xos/configurations/frontend/Makefile
@@ -5,6 +5,9 @@
 	sudo docker-compose up -d
 	bash ../common/wait_for_xos.sh
 	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/frontend/sample.yaml
+	# sudo docker-compose run xos python manage.py makemigrations mcordservice
+	# sudo docker-compose run xos python manage.py syncdb
+	# sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/tosca/MCORDServiceN.yaml
 
 containers:
 	cd ../../../containers/xos; make devel
diff --git a/xos/configurations/frontend/docker-compose.yml b/xos/configurations/frontend/docker-compose.yml
index 2ad8f7a..45893ee 100644
--- a/xos/configurations/frontend/docker-compose.yml
+++ b/xos/configurations/frontend/docker-compose.yml
@@ -21,6 +21,7 @@
     volumes:
       - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config
       - ../../core/xoslib:/opt/xos/core/xoslib
+      - ../../tosca:/opt/xos/tosca
       - ../../core/static:/opt/xos/core/static
       - ../../templates/admin:/opt/xos/templates/admin
       - ../../configurations:/opt/xos/configurations
diff --git a/xos/configurations/frontend/mocks/MCORDServiceN.yaml b/xos/configurations/frontend/mocks/MCORDServiceN.yaml
new file mode 100644
index 0000000..bef7bb3
--- /dev/null
+++ b/xos/configurations/frontend/mocks/MCORDServiceN.yaml
@@ -0,0 +1,89 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+    tosca.nodes.MCORDComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: A Service Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+

+topology_template:

+  node_templates:

+    service_mcord:

+      type: tosca.nodes.Service

+      requirements:

+      properties:

+          kind: mcordservice

+

+

+    Private:

+      type: tosca.nodes.NetworkTemplate

+

+    mcord_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: mcord_service_internal_net

+          cidr: 172.16.16.0/24

+          start_ip: 172.16.16.1

+          end_ip: 172.16.16.5

+          gateway_ip: 172.16.16.1

+

+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    mysite:

+      type: tosca.nodes.Site

+

+

+    ubuntu-14.04-server-cloudimg-amd64-disk1:

+      type: tosca.nodes.Image

+

+    trusty-server-multi-nic:

+      type: tosca.nodes.Image

+

+    mysite_mcord_slice1:

+      description: MCORD Service Slice 1

+      type: tosca.nodes.Slice

+      requirements:

+          - mcord_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+          - default_image:

+                node: ubuntu-14.04-server-cloudimg-amd64-disk1 

+#                node: mcord-server-image-s1

+                relationship: tosca.relationships.DefaultImage

+      properties:

+          default_flavor: m1.medium

+          default_node: compute9 

+

+    my_service_mcord_component1:

+      description: MCORD Service default Component

+      type: tosca.nodes.MCORDComponent

+      requirements:

+          - provider_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - mcord_slice:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

diff --git a/xos/configurations/setup/admin-openrc.sh b/xos/configurations/setup/admin-openrc.sh
new file mode 100644
index 0000000..276c39c
--- /dev/null
+++ b/xos/configurations/setup/admin-openrc.sh
@@ -0,0 +1,4 @@
+export OS_TENANT_NAME=admin
+export OS_USERNAME=admin
+export OS_PASSWORD=df130e830b013289192e
+export OS_AUTH_URL=http://10.0.10.125:5000/v2.0
diff --git a/xos/configurations/setup/ceilometer_url b/xos/configurations/setup/ceilometer_url
new file mode 100644
index 0000000..9b52a64
--- /dev/null
+++ b/xos/configurations/setup/ceilometer_url
@@ -0,0 +1 @@
+http://127.0.0.1/xosmetering/
diff --git a/xos/configurations/setup/controller_settings b/xos/configurations/setup/controller_settings
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/configurations/setup/controller_settings
diff --git a/xos/configurations/setup/flat_net_name b/xos/configurations/setup/flat_net_name
new file mode 100644
index 0000000..c8c6761
--- /dev/null
+++ b/xos/configurations/setup/flat_net_name
@@ -0,0 +1 @@
+private
\ No newline at end of file
diff --git a/xos/configurations/setup/id_rsa b/xos/configurations/setup/id_rsa
new file mode 100644
index 0000000..427bf89
--- /dev/null
+++ b/xos/configurations/setup/id_rsa
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAr7ezZV9wU4O9F/fMOsG9Zm0kIbjLGNsL/MJXuGlqw0SRRbkx
+YipvtP+pJmCzkHmnUFCE1BMVHcnCJRfhcwabF08c+t7H5mj6GPo/QKR7seLr2IKM
+jfG3846u3k2Oo8wNfy8HJ5g+bZFhBk+Z0scYkqQFDL0IW1JtWkl+Yu2VcZsiSJCq
+j+7XjjM1QoiDCyx3p6z/jHi5K1XIFxvQaeBddm3ICau2x6ezd5wnYjPaCANuwisJ
+KgmwSvX/lr8NZkjpETME+ghMPDq7KvXFZL9MCmv8IFe2fKVzIDqHkbgcZ/W0maed
+A/2y9p55B+SQmN3PXW1EhOXHH0SNP31ZS+N5dwIDAQABAoIBAGrudaN5ItgP0WDm
+kUgoYmQUgupqlF2531+fvNYigK/36BfwDRdaD8Sr2HncWynOfn0nos2UF0ObZiRA
+lhfzqynSISahsDCNLbVJhHiIICYum6uUNoii0njLGat6sxUGtifxrH7x7Pusfsji
+ZA+azV9fpRsNZip8zMMm+lyljE4nQbVZv09aExq0Mh2n+mH6OWS43mZ1N7TxWtgd
+MmtoLBAPoMiqXlCxZOcznptVR9hY7CSG0uOQUSui44DOXOyqEI7z57eoYM1hWmng
+Ery07Qr9BbEVl4epLaEyLIGXcUsUbcQz3kbXCg0NbXHiFtr0kdIVwJXHg5M9MAsf
+fDaxJZECgYEA29oLRkI+0L9rSErteaf4spo4FJAARWbsVM3uj1gKEClUUHklI97A
+UVTcOFC7Drr/rwqfHy8fQq9ASIMDnj+FulYQSMna3SLdkgsbNSDuesbG4wp6+chQ
+uSzZP1YtaYrjMxz6s8s/zmFkqAssyOuscsOhgF16945hc63GLro4GwUCgYEAzJv4
+eqWrY6Il7p/Oir4AJxbdfO50Oj1dECsFNZ1KhtA280FslW6Za+oUfD1+Xv13XRZP
+O62IvXXJT67NOq0rKVUixPJJFXQqSRU1QljLgREM6dqr4pS4NirkaPvGwuuej8I4
+dKLqVPcNxDSAXfMwR0KQu7+IVEdvzrw5hrsgg0sCgYB21YUClQwfCViT2uxBtelX
+oMRvWObMnLVhoW4xTQUjdzN7y/+nQ9/wFk5yojB55doOY09fK7lZ8iBtEWQDRZKj
+BaIHthP3M8FQD3DFZueAtbELR77xBLWdYgCLm6kwQ0JLfn6EcHgstbgSnPe4Iqsz
+3UqOd/jflrZWMLfOyhlJgQKBgCGCRa5oZWo6yvWKjHviZAoCz6E/OB+1nwEf2omO
+Sf9MKEOsakkKxOuMeXBjbcfGwP6owa8nW2aT3LVFDm1WoOPzAm+4sklmLeqsI33L
+JwDrNu8xlcbUzlpoqeGbolCX3+7xQuevKqthjoqcgo1gX368IxHsazpKPMBhyRYM
+nWWDAoGBANOG/59508uQqZvWtByA092ARXjEUYLgNTwDo1N4kM5zgV8NETtv7qs/
+P/ze2e88sI230jzbU3iq2OGjk6S1c6LHVG9QohZPwtnwTCeKRhSG+CYHMcXSLK7D
+xf4C0kAbPsaG5F0w3vbGTTF4uuGXyijOQSXMhiG4756VaMEGvb9k
+-----END RSA PRIVATE KEY-----
diff --git a/xos/configurations/setup/id_rsa.pub b/xos/configurations/setup/id_rsa.pub
new file mode 100644
index 0000000..81dd872
--- /dev/null
+++ b/xos/configurations/setup/id_rsa.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvt7NlX3BTg70X98w6wb1mbSQhuMsY2wv8wle4aWrDRJFFuTFiKm+0/6kmYLOQeadQUITUExUdycIlF+FzBpsXTxz63sfmaPoY+j9ApHux4uvYgoyN8bfzjq7eTY6jzA1/LwcnmD5tkWEGT5nSxxiSpAUMvQhbUm1aSX5i7ZVxmyJIkKqP7teOMzVCiIMLLHenrP+MeLkrVcgXG9Bp4F12bcgJq7bHp7N3nCdiM9oIA27CKwkqCbBK9f+Wvw1mSOkRMwT6CEw8Orsq9cVkv0wKa/wgV7Z8pXMgOoeRuBxn9bSZp50D/bL2nnkH5JCY3c9dbUSE5ccfRI0/fVlL43l3 ubuntu@ip-10-0-10-125
diff --git a/xos/configurations/setup/node_key b/xos/configurations/setup/node_key
new file mode 100644
index 0000000..427bf89
--- /dev/null
+++ b/xos/configurations/setup/node_key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAr7ezZV9wU4O9F/fMOsG9Zm0kIbjLGNsL/MJXuGlqw0SRRbkx
+YipvtP+pJmCzkHmnUFCE1BMVHcnCJRfhcwabF08c+t7H5mj6GPo/QKR7seLr2IKM
+jfG3846u3k2Oo8wNfy8HJ5g+bZFhBk+Z0scYkqQFDL0IW1JtWkl+Yu2VcZsiSJCq
+j+7XjjM1QoiDCyx3p6z/jHi5K1XIFxvQaeBddm3ICau2x6ezd5wnYjPaCANuwisJ
+KgmwSvX/lr8NZkjpETME+ghMPDq7KvXFZL9MCmv8IFe2fKVzIDqHkbgcZ/W0maed
+A/2y9p55B+SQmN3PXW1EhOXHH0SNP31ZS+N5dwIDAQABAoIBAGrudaN5ItgP0WDm
+kUgoYmQUgupqlF2531+fvNYigK/36BfwDRdaD8Sr2HncWynOfn0nos2UF0ObZiRA
+lhfzqynSISahsDCNLbVJhHiIICYum6uUNoii0njLGat6sxUGtifxrH7x7Pusfsji
+ZA+azV9fpRsNZip8zMMm+lyljE4nQbVZv09aExq0Mh2n+mH6OWS43mZ1N7TxWtgd
+MmtoLBAPoMiqXlCxZOcznptVR9hY7CSG0uOQUSui44DOXOyqEI7z57eoYM1hWmng
+Ery07Qr9BbEVl4epLaEyLIGXcUsUbcQz3kbXCg0NbXHiFtr0kdIVwJXHg5M9MAsf
+fDaxJZECgYEA29oLRkI+0L9rSErteaf4spo4FJAARWbsVM3uj1gKEClUUHklI97A
+UVTcOFC7Drr/rwqfHy8fQq9ASIMDnj+FulYQSMna3SLdkgsbNSDuesbG4wp6+chQ
+uSzZP1YtaYrjMxz6s8s/zmFkqAssyOuscsOhgF16945hc63GLro4GwUCgYEAzJv4
+eqWrY6Il7p/Oir4AJxbdfO50Oj1dECsFNZ1KhtA280FslW6Za+oUfD1+Xv13XRZP
+O62IvXXJT67NOq0rKVUixPJJFXQqSRU1QljLgREM6dqr4pS4NirkaPvGwuuej8I4
+dKLqVPcNxDSAXfMwR0KQu7+IVEdvzrw5hrsgg0sCgYB21YUClQwfCViT2uxBtelX
+oMRvWObMnLVhoW4xTQUjdzN7y/+nQ9/wFk5yojB55doOY09fK7lZ8iBtEWQDRZKj
+BaIHthP3M8FQD3DFZueAtbELR77xBLWdYgCLm6kwQ0JLfn6EcHgstbgSnPe4Iqsz
+3UqOd/jflrZWMLfOyhlJgQKBgCGCRa5oZWo6yvWKjHviZAoCz6E/OB+1nwEf2omO
+Sf9MKEOsakkKxOuMeXBjbcfGwP6owa8nW2aT3LVFDm1WoOPzAm+4sklmLeqsI33L
+JwDrNu8xlcbUzlpoqeGbolCX3+7xQuevKqthjoqcgo1gX368IxHsazpKPMBhyRYM
+nWWDAoGBANOG/59508uQqZvWtByA092ARXjEUYLgNTwDo1N4kM5zgV8NETtv7qs/
+P/ze2e88sI230jzbU3iq2OGjk6S1c6LHVG9QohZPwtnwTCeKRhSG+CYHMcXSLK7D
+xf4C0kAbPsaG5F0w3vbGTTF4uuGXyijOQSXMhiG4756VaMEGvb9k
+-----END RSA PRIVATE KEY-----
diff --git a/xos/configurations/setup/node_key.pub b/xos/configurations/setup/node_key.pub
new file mode 100644
index 0000000..81dd872
--- /dev/null
+++ b/xos/configurations/setup/node_key.pub
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvt7NlX3BTg70X98w6wb1mbSQhuMsY2wv8wle4aWrDRJFFuTFiKm+0/6kmYLOQeadQUITUExUdycIlF+FzBpsXTxz63sfmaPoY+j9ApHux4uvYgoyN8bfzjq7eTY6jzA1/LwcnmD5tkWEGT5nSxxiSpAUMvQhbUm1aSX5i7ZVxmyJIkKqP7teOMzVCiIMLLHenrP+MeLkrVcgXG9Bp4F12bcgJq7bHp7N3nCdiM9oIA27CKwkqCbBK9f+Wvw1mSOkRMwT6CEw8Orsq9cVkv0wKa/wgV7Z8pXMgOoeRuBxn9bSZp50D/bL2nnkH5JCY3c9dbUSE5ccfRI0/fVlL43l3 ubuntu@ip-10-0-10-125
diff --git a/xos/configurations/setup/nodes.yaml b/xos/configurations/setup/nodes.yaml
new file mode 100644
index 0000000..3901208
--- /dev/null
+++ b/xos/configurations/setup/nodes.yaml
@@ -0,0 +1,22 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: autogenerated nodes file
+
+topology_template:
+  node_templates:
+    MyDeployment:
+        type: tosca.nodes.Deployment
+    mysite:
+        type: tosca.nodes.Site
+    ip-10-0-10-125:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: mysite
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: MyDeployment
+            relationship: tosca.relationships.MemberOfDeployment
diff --git a/xos/configurations/setup/padmin_public_key b/xos/configurations/setup/padmin_public_key
new file mode 100644
index 0000000..81dd872
--- /dev/null
+++ b/xos/configurations/setup/padmin_public_key
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvt7NlX3BTg70X98w6wb1mbSQhuMsY2wv8wle4aWrDRJFFuTFiKm+0/6kmYLOQeadQUITUExUdycIlF+FzBpsXTxz63sfmaPoY+j9ApHux4uvYgoyN8bfzjq7eTY6jzA1/LwcnmD5tkWEGT5nSxxiSpAUMvQhbUm1aSX5i7ZVxmyJIkKqP7teOMzVCiIMLLHenrP+MeLkrVcgXG9Bp4F12bcgJq7bHp7N3nCdiM9oIA27CKwkqCbBK9f+Wvw1mSOkRMwT6CEw8Orsq9cVkv0wKa/wgV7Z8pXMgOoeRuBxn9bSZp50D/bL2nnkH5JCY3c9dbUSE5ccfRI0/fVlL43l3 ubuntu@ip-10-0-10-125
diff --git a/xos/core/models/instance.py b/xos/core/models/instance.py
index 7f13eb8..6657c69 100644
--- a/xos/core/models/instance.py
+++ b/xos/core/models/instance.py
@@ -128,10 +128,10 @@
 
         if (self.isolation == "container") or (self.isolation == "container_vm"):
             if (self.image.kind != "container"):
-                raise ValidationError("Container instance must use container image")
+               raise ValidationError("Container instance must use container image")
         elif (self.isolation == "vm"):
             if (self.image.kind != "vm"):
-                raise ValidationError("VM instance must use VM image")
+               raise ValidationError("VM instance must use VM image")
 
         if (self.isolation == "container_vm") and (not self.parent):
             raise ValidationError("Container-vm instance must have a parent")
diff --git a/xos/core/models/network.py b/xos/core/models/network.py
index 80ee9ba..4f2c5dd 100644
--- a/xos/core/models/network.py
+++ b/xos/core/models/network.py
@@ -138,6 +138,8 @@
     name = models.CharField(max_length=32)
     template = models.ForeignKey(NetworkTemplate)
     subnet = models.CharField(max_length=32, blank=True)
+    start_ip = models.CharField(max_length=32, blank=True)
+    end_ip = models.CharField(max_length=32, blank=True)
     ports = models.CharField(max_length=1024, blank=True, null=True, validators=[ValidateNatList])
     labels = models.CharField(max_length=1024, blank=True, null=True)
     owner = models.ForeignKey(Slice, related_name="ownedNetworks", help_text="Slice that owns control of this Network")
@@ -165,6 +167,7 @@
         if (not self.subnet) and (NO_OBSERVER):
             from util.network_subnet_allocator import find_unused_subnet
             self.subnet = find_unused_subnet(existing_subnets=[x.subnet for x in Network.objects.all()])
+            print "DEF_MOD_NET_IP", self.start_ip
         super(Network, self).save(*args, **kwds)
 
     def can_update(self, user):
@@ -203,6 +206,8 @@
     router_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum router id")
     subnet_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum subnet id")
     subnet = models.CharField(max_length=32, blank=True)
+    start_ip = models.CharField(max_length=32, blank=True)
+    stop_ip = models.CharField(max_length=32, blank=True)
 
     class Meta:
         unique_together = ('network', 'controller')
diff --git a/xos/core/models/node.py b/xos/core/models/node.py
index 5496d6b..52d33e8 100644
--- a/xos/core/models/node.py
+++ b/xos/core/models/node.py
@@ -2,7 +2,7 @@
 from django.db import models
 from core.models import PlCoreBase
 from core.models.plcorebase import StrippedCharField
-from core.models import Site, SiteDeployment, SitePrivilege
+from core.models.site import Site, SiteDeployment, SitePrivilege
 from core.models import Tag
 from django.contrib.contenttypes import generic
 
@@ -13,6 +13,7 @@
     site_deployment = models.ForeignKey(SiteDeployment, related_name='nodes')
     site = models.ForeignKey(Site, null=True, blank=True, related_name='nodes')
     tags = generic.GenericRelation(Tag)
+#    default = models.BooleanField(default=False, help_text="make this a default node to use when creating new instances")
 
     def __unicode__(self):  return u'%s' % (self.name)
 
diff --git a/xos/core/models/service.py b/xos/core/models/service.py
index 6ece1b3..5d8fb3d 100644
--- a/xos/core/models/service.py
+++ b/xos/core/models/service.py
@@ -431,11 +431,15 @@
 
     def pick(self):
         from core.models import Node
-        nodes = list(Node.objects.all())
-
+#        nodes = list(Node.objects.all())
+        if not self.slice.default_node:
+            nodes = list(Node.objects.all())
+            nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+        else:
+            nodes = list(Node.objects.filter(name = self.slice.default_node))
         # TODO: logic to filter nodes by which nodes are up, and which
         #   nodes the slice can instantiate on.
-        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+#        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
         return [nodes[0], None]
 
 class ContainerVmScheduler(Scheduler):
@@ -597,7 +601,6 @@
         from core.models import Image
         # Implement the logic here to pick the image that should be used when
         # instantiating the VM that will hold the container.
-
         slice = self.provider_service.slices.all()
         if not slice:
             raise XOSProgrammingError("provider service has no slice")
@@ -609,7 +612,7 @@
             look_for_images = self.LOOK_FOR_IMAGES
 
         for image_name in look_for_images:
-            images = Image.objects.filter(name = image_name)
+            images = Image.objects.filter(name = slice.default_image)
             if images:
                 return images[0]
 
@@ -662,27 +665,31 @@
 
             if not instance:
                 slice = self.provider_service.slices.all()[0]
+                flavors = Flavor.objects.filter(name=slice.default_flavor)
+#                flavors = Flavor.objects.filter(name="m1.small")
+                if not flavors:
+                    raise XOSConfigurationError("No slice default flavor")
+#                    raise XOSConfigurationError("No m1.small flavor")
 
-                flavor = slice.default_flavor
-                if not flavor:
-                    flavors = Flavor.objects.filter(name="m1.small")
-                    if not flavors:
-                        raise XOSConfigurationError("No m1.small flavor")
-                    flavor = flavors[0]
-
+#                slice = self.provider_service.slices.all()[0]
+                default_flavor = slice.default_flavor
                 if slice.default_isolation == "container_vm":
                     (node, parent) = ContainerVmScheduler(slice).pick()
                 else:
                     (node, parent) = LeastLoadedNodeScheduler(slice).pick()
-
+#                     print "DEF_NODE", slice.default_node
+#                self.image = slice.default_image
                 instance = Instance(slice = slice,
                                 node = node,
+#                                image = slice.default_image,
                                 image = self.image,
                                 creator = self.creator,
                                 deployment = node.site_deployment.deployment,
-                                flavor = flavor,
+                                flavor = flavors[0],
+#                                flavor = slice.default_flavor,
                                 isolation = slice.default_isolation,
                                 parent = parent)
+                print "DEF_NODE", instance.node
                 self.save_instance(instance)
                 new_instance_created = True
 
diff --git a/xos/core/models/service.py.new b/xos/core/models/service.py.new
new file mode 100644
index 0000000..d3b6a7d
--- /dev/null
+++ b/xos/core/models/service.py.new
@@ -0,0 +1,791 @@
+from django.db import models
+from core.models import PlCoreBase,SingletonModel,PlCoreBaseManager
+from core.models.plcorebase import StrippedCharField
+from xos.exceptions import *
+from operator import attrgetter
+import json
+
+COARSE_KIND="coarse"
+
+class AttributeMixin(object):
+    # helper for extracting things from a json-encoded service_specific_attribute
+    def get_attribute(self, name, default=None):
+        if self.service_specific_attribute:
+            attributes = json.loads(self.service_specific_attribute)
+        else:
+            attributes = {}
+        return attributes.get(name, default)
+
+    def set_attribute(self, name, value):
+        if self.service_specific_attribute:
+            attributes = json.loads(self.service_specific_attribute)
+        else:
+            attributes = {}
+        attributes[name]=value
+        self.service_specific_attribute = json.dumps(attributes)
+
+    def get_initial_attribute(self, name, default=None):
+        if self._initial["service_specific_attribute"]:
+            attributes = json.loads(self._initial["service_specific_attribute"])
+        else:
+            attributes = {}
+        return attributes.get(name, default)
+
+    @classmethod
+    def setup_simple_attributes(cls):
+        for (attrname, default) in cls.simple_attributes:
+            setattr(cls, attrname, property(lambda self, attrname=attrname, default=default: self.get_attribute(attrname, default),
+                                            lambda self, value, attrname=attrname: self.set_attribute(attrname, value),
+                                            None,
+                                            attrname))
+
+class Service(PlCoreBase, AttributeMixin):
+    # when subclassing a service, redefine KIND to describe the new service
+    KIND = "generic"
+
+    description = models.TextField(max_length=254,null=True, blank=True,help_text="Description of Service")
+    enabled = models.BooleanField(default=True)
+    kind = StrippedCharField(max_length=30, help_text="Kind of service", default=KIND)
+    name = StrippedCharField(max_length=30, help_text="Service Name")
+    versionNumber = StrippedCharField(max_length=30, help_text="Version of Service Definition")
+    published = models.BooleanField(default=True)
+    view_url = StrippedCharField(blank=True, null=True, max_length=1024)
+    icon_url = StrippedCharField(blank=True, null=True, max_length=1024)
+    public_key = models.TextField(null=True, blank=True, max_length=1024, help_text="Public key string")
+    private_key_fn = StrippedCharField(blank=True, null=True, max_length=1024)
+
+    # Service_specific_attribute and service_specific_id are opaque to XOS
+    service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
+    service_specific_attribute = models.TextField(blank=True, null=True)
+
+    def __init__(self, *args, **kwargs):
+        # for subclasses, set the default kind appropriately
+        self._meta.get_field("kind").default = self.KIND
+        super(Service, self).__init__(*args, **kwargs)
+
+    @classmethod
+    def get_service_objects(cls):
+        return cls.objects.filter(kind = cls.KIND)
+
+    @classmethod
+    def get_deleted_service_objects(cls):
+        return cls.deleted_objects.filter(kind = cls.KIND)
+
+    @classmethod
+    def get_service_objects_by_user(cls, user):
+        return cls.select_by_user(user).filter(kind = cls.KIND)
+
+    @classmethod
+    def select_by_user(cls, user):
+        if user.is_admin:
+            return cls.objects.all()
+        else:
+            service_ids = [sp.slice.id for sp in ServicePrivilege.objects.filter(user=user)]
+            return cls.objects.filter(id__in=service_ids)
+
+    @property
+    def serviceattribute_dict(self):
+        attrs = {}
+        for attr in self.serviceattributes.all():
+            attrs[attr.name] = attr.value
+        return attrs
+
+    def __unicode__(self): return u'%s' % (self.name)
+
+    def can_update(self, user):
+        return user.can_update_service(self, allow=['admin'])
+
+    def get_scalable_nodes(self, slice, max_per_node=None, exclusive_slices=[]):
+        """
+             Get a list of nodes that can be used to scale up a slice.
+
+                slice - slice to scale up
+                max_per_node - maximum numbers of instances that 'slice' can have on a single node
+                exclusive_slices - list of slices that must have no nodes in common with 'slice'.
+        """
+
+        from core.models import Node, Instance # late import to get around order-of-imports constraint in __init__.py
+
+        nodes = list(Node.objects.all())
+
+        conflicting_instances = Instance.objects.filter(slice__in = exclusive_slices)
+        conflicting_nodes = Node.objects.filter(instances__in = conflicting_instances)
+
+        nodes = [x for x in nodes if x not in conflicting_nodes]
+
+        # If max_per_node is set, then limit the number of instances this slice
+        # can have on a single node.
+        if max_per_node:
+            acceptable_nodes = []
+            for node in nodes:
+                existing_count = node.instances.filter(slice=slice).count()
+                if existing_count < max_per_node:
+                    acceptable_nodes.append(node)
+            nodes = acceptable_nodes
+
+        return nodes
+
+    def pick_node(self, slice, max_per_node=None, exclusive_slices=[]):
+        # Pick the best node to scale up a slice.
+
+        nodes = self.get_scalable_nodes(slice, max_per_node, exclusive_slices)
+        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+        if not nodes:
+            return None
+        return nodes[0]
+
+    def adjust_scale(self, slice_hint, scale, max_per_node=None, exclusive_slices=[]):
+        from core.models import Instance # late import to get around order-of-imports constraint in __init__.py
+
+        slices = [x for x in self.slices.all() if slice_hint in x.name]
+        for slice in slices:
+            while slice.instances.all().count() > scale:
+                s = slice.instances.all()[0]
+                # print "drop instance", s
+                s.delete()
+
+            while slice.instances.all().count() < scale:
+                node = self.pick_node(slice, max_per_node, exclusive_slices)
+                if not node:
+                    # no more available nodes
+                    break
+
+                image = slice.default_image
+                if not image:
+                    raise XOSConfigurationError("No default_image for slice %s" % slice.name)
+
+                flavor = slice.default_flavor
+                if not flavor:
+                    raise XOSConfigurationError("No default_flavor for slice %s" % slice.name)
+
+                s = Instance(slice=slice,
+                           node=node,
+                           creator=slice.creator,
+                           image=image,
+                           flavor=flavor,
+                           deployment=node.site_deployment.deployment)
+                s.save()
+
+                # print "add instance", s
+
+    def get_vtn_src_nets(self):
+        nets=[]
+        for slice in self.slices.all():
+            for ns in slice.networkslices.all():
+                if not ns.network:
+                    continue
+#                if ns.network.template.access in ["direct", "indirect"]:
+#                    # skip access networks; we want to use the private network
+#                    continue
+                if ns.network.name in ["wan_network", "lan_network"]:
+                    # we don't want to attach to the vCPE's lan or wan network
+                    # we only want to attach to its private network
+                    # TODO: fix hard-coding of network name
+                    continue
+                for cn in ns.network.controllernetworks.all():
+                    if cn.net_id:
+                        net = {"name": ns.network.name, "net_id": cn.net_id}
+                        nets.append(net)
+        return nets
+
+    def get_vtn_nets(self):
+        nets=[]
+        for slice in self.slices.all():
+            for ns in slice.networkslices.all():
+                if not ns.network:
+                    continue
+                if ns.network.template.access not in ["direct", "indirect"]:
+                    # skip anything that's not an access network
+                    continue
+                for cn in ns.network.controllernetworks.all():
+                    if cn.net_id:
+                        net = {"name": ns.network.name, "net_id": cn.net_id}
+                        nets.append(net)
+        return nets
+
+    def get_vtn_dependencies_nets(self):
+        provider_nets = []
+        for tenant in self.subscribed_tenants.all():
+            if tenant.provider_service:
+                for net in tenant.provider_service.get_vtn_nets():
+                    if not net in provider_nets:
+                        provider_nets.append(net)
+        return provider_nets
+
+    def get_vtn_dependencies_ids(self):
+        return [x["net_id"] for x in self.get_vtn_dependencies_nets()]
+
+    def get_vtn_dependencies_names(self):
+        return [x["name"]+"_"+x["net_id"] for x in self.get_vtn_dependencies_nets()]
+
+    def get_vtn_src_ids(self):
+        return [x["net_id"] for x in self.get_vtn_src_nets()]
+
+    def get_vtn_src_names(self):
+        return [x["name"]+"_"+x["net_id"] for x in self.get_vtn_src_nets()]
+
+
+class ServiceAttribute(PlCoreBase):
+    name = models.CharField(help_text="Attribute Name", max_length=128)
+    value = StrippedCharField(help_text="Attribute Value", max_length=1024)
+    service = models.ForeignKey(Service, related_name='serviceattributes', help_text="The Service this attribute is associated with")
+
+class ServiceRole(PlCoreBase):
+    ROLE_CHOICES = (('admin','Admin'),)
+    role = StrippedCharField(choices=ROLE_CHOICES, unique=True, max_length=30)
+
+    def __unicode__(self):  return u'%s' % (self.role)
+
+class ServicePrivilege(PlCoreBase):
+    user = models.ForeignKey('User', related_name='serviceprivileges')
+    service = models.ForeignKey('Service', related_name='serviceprivileges')
+    role = models.ForeignKey('ServiceRole',related_name='serviceprivileges')
+
+    class Meta:
+        unique_together =  ('user', 'service', 'role')
+
+    def __unicode__(self):  return u'%s %s %s' % (self.service, self.user, self.role)
+
+    def can_update(self, user):
+        if not self.service.enabled:
+            raise PermissionDenied, "Cannot modify permission(s) of a disabled service"
+        return self.service.can_update(user)
+
+    def save(self, *args, **kwds):
+        if not self.service.enabled:
+            raise PermissionDenied, "Cannot modify permission(s) of a disabled service"
+        super(ServicePrivilege, self).save(*args, **kwds)
+
+    def delete(self, *args, **kwds):
+        if not self.service.enabled:
+            raise PermissionDenied, "Cannot modify permission(s) of a disabled service"
+        super(ServicePrivilege, self).delete(*args, **kwds)
+
+    @classmethod
+    def select_by_user(cls, user):
+        if user.is_admin:
+            qs = cls.objects.all()
+        else:
+            qs = cls.objects.filter(user=user)
+        return qs
+
+class TenantRoot(PlCoreBase, AttributeMixin):
+    """ A tenantRoot is one of the things that can sit at the root of a chain
+        of tenancy. This object represents a node.
+    """
+
+    KIND= "generic"
+    kind = StrippedCharField(max_length=30, default=KIND)
+    name = StrippedCharField(max_length=255, help_text="name", blank=True, null=True)
+
+    service_specific_attribute = models.TextField(blank=True, null=True)
+    service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
+
+    def __init__(self, *args, **kwargs):
+        # for subclasses, set the default kind appropriately
+        self._meta.get_field("kind").default = self.KIND
+        super(TenantRoot, self).__init__(*args, **kwargs)
+
+    def __unicode__(self):
+        if not self.name:
+            return u"%s-tenant_root-#%s" % (str(self.kind), str(self.id))
+        else:
+            return self.name
+
+    def can_update(self, user):
+        return user.can_update_tenant_root(self, allow=['admin'])
+
+    def get_subscribed_tenants(self, tenant_class):
+        ids = self.subscribed_tenants.filter(kind=tenant_class.KIND)
+        return tenant_class.objects.filter(id__in = ids)
+
+    def get_newest_subscribed_tenant(self, kind):
+        st = list(self.get_subscribed_tenants(kind))
+        if not st:
+            return None
+        return sorted(st, key=attrgetter('id'))[0]
+
+    @classmethod
+    def get_tenant_objects(cls):
+        return cls.objects.filter(kind = cls.KIND)
+
+    @classmethod
+    def get_tenant_objects_by_user(cls, user):
+        return cls.select_by_user(user).filter(kind = cls.KIND)
+
+    @classmethod
+    def select_by_user(cls, user):
+        if user.is_admin:
+            return cls.objects.all()
+        else:
+            tr_ids = [trp.tenant_root.id for trp in TenantRootPrivilege.objects.filter(user=user)]
+            return cls.objects.filter(id__in=tr_ids)
+
+class Tenant(PlCoreBase, AttributeMixin):
+    """ A tenant is a relationship between two entities, a subscriber and a
+        provider. This object represents an edge.
+
+        The subscriber can be a User, a Service, or a Tenant.
+
+        The provider is always a Service.
+
+        TODO: rename "Tenant" to "Tenancy"
+    """
+
+    CONNECTIVITY_CHOICES = (('public', 'Public'), ('private', 'Private'), ('na', 'Not Applicable'))
+
+    # when subclassing a service, redefine KIND to describe the new service
+    KIND = "generic"
+
+    kind = StrippedCharField(max_length=30, default=KIND)
+    provider_service = models.ForeignKey(Service, related_name='provided_tenants')
+
+    # The next four things are the various type of objects that can be subscribers of this Tenancy
+    # relationship. One and only one can be used at a time.
+    subscriber_service = models.ForeignKey(Service, related_name='subscribed_tenants', blank=True, null=True)
+    subscriber_tenant = models.ForeignKey("Tenant", related_name='subscribed_tenants', blank=True, null=True)
+    subscriber_user = models.ForeignKey("User", related_name='subscribed_tenants', blank=True, null=True)
+    subscriber_root = models.ForeignKey("TenantRoot", related_name="subscribed_tenants", blank=True, null=True)
+
+    # Service_specific_attribute and service_specific_id are opaque to XOS
+    service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
+    service_specific_attribute = models.TextField(blank=True, null=True)
+
+    # Connect_method is only used by Coarse tenants
+    connect_method = models.CharField(null=False, blank=False, max_length=30, choices=CONNECTIVITY_CHOICES, default="na")
+
+    def __init__(self, *args, **kwargs):
+        # for subclasses, set the default kind appropriately
+        self._meta.get_field("kind").default = self.KIND
+        super(Tenant, self).__init__(*args, **kwargs)
+
+    def __unicode__(self):
+        return u"%s-tenant-%s" % (str(self.kind), str(self.id))
+
+    @classmethod
+    def get_tenant_objects(cls):
+        return cls.objects.filter(kind = cls.KIND)
+
+    @classmethod
+    def get_tenant_objects_by_user(cls, user):
+        return cls.select_by_user(user).filter(kind = cls.KIND)
+
+    @classmethod
+    def get_deleted_tenant_objects(cls):
+        return cls.deleted_objects.filter(kind = cls.KIND)
+
+    @property
+    def tenantattribute_dict(self):
+        attrs = {}
+        for attr in self.tenantattributes.all():
+            attrs[attr.name] = attr.value
+        return attrs
+
+    # helper function to be used in subclasses that want to ensure service_specific_id is unique
+    def validate_unique_service_specific_id(self):
+        if self.pk is None:
+            if self.service_specific_id is None:
+                raise XOSMissingField("subscriber_specific_id is None, and it's a required field", fields={"service_specific_id": "cannot be none"})
+
+            conflicts = self.get_tenant_objects().filter(service_specific_id=self.service_specific_id)
+            if conflicts:
+                raise XOSDuplicateKey("service_specific_id %s already exists" % self.service_specific_id, fields={"service_specific_id": "duplicate key"})
+
+    def save(self, *args, **kwargs):
+        subCount = sum( [1 for e in [self.subscriber_service, self.subscriber_tenant, self.subscriber_user, self.subscriber_root] if e is not None])
+        if (subCount > 1):
+            raise XOSConflictingField("Only one of subscriber_service, subscriber_tenant, subscriber_user, subscriber_root should be set")
+
+        super(Tenant, self).save(*args, **kwargs)
+
+    def get_subscribed_tenants(self, tenant_class):
+        ids = self.subscribed_tenants.filter(kind=tenant_class.KIND)
+        return tenant_class.objects.filter(id__in = ids)
+
+    def get_newest_subscribed_tenant(self, kind):
+        st = list(self.get_subscribed_tenants(kind))
+        if not st:
+            return None
+        return sorted(st, key=attrgetter('id'))[0]
+
+class Scheduler(object):
+    # XOS Scheduler Abstract Base Class
+    # Used to implement schedulers that pick which node to put instances on
+
+    def __init__(self, slice):
+        self.slice = slice
+
+    def pick(self):
+        # this method should return a tuple (node, parent)
+        #    node is the node to instantiate on
+        #    parent is for container_vm instances only, and is the VM that will
+        #      hold the container
+
+        raise Exception("Abstract Base")
+
+class LeastLoadedNodeScheduler(Scheduler):
+    # This scheduler always return the node with the fewest number of instances.
+
+    def __init__(self, slice):
+        super(LeastLoadedNodeScheduler, self).__init__(slice)
+
+    def pick(self):
+        from core.models import Node
+#        nodes = list(Node.objects.all())
+        if not self.slice.default_node:
+            nodes = list(Node.objects.all())
+            nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+        else:
+            nodes = list(Node.objects.filter(name = self.slice.default_node))
+        # TODO: logic to filter nodes by which nodes are up, and which
+        #   nodes the slice can instantiate on.
+#        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+        return [nodes[0], None]
+
+class ContainerVmScheduler(Scheduler):
+    # This scheduler picks a VM in the slice with the fewest containers inside
+    # of it. If no VMs are suitable, then it creates a VM.
+
+    # this is a hack and should be replaced by something smarter...
+    LOOK_FOR_IMAGES=["ubuntu-vcpe4",        # ONOS demo machine -- preferred vcpe image
+                     "Ubuntu 14.04 LTS",    # portal
+                     "Ubuntu-14.04-LTS",    # ONOS demo machine
+                     "trusty-server-multi-nic", # CloudLab
+                    ]
+
+    MAX_VM_PER_CONTAINER = 10
+
+    def __init__(self, slice):
+        super(ContainerVmScheduler, self).__init__(slice)
+
+    @property
+    def image(self):
+        from core.models import Image
+
+        look_for_images = self.LOOK_FOR_IMAGES
+        for image_name in look_for_images:
+            images = Image.objects.filter(name = image_name)
+            if images:
+                return images[0]
+
+        raise XOSProgrammingError("No ContainerVM image (looked for %s)" % str(look_for_images))
+
+    def make_new_instance(self):
+        from core.models import Instance, Flavor
+
+        flavors = Flavor.objects.filter(name="m1.small")
+        if not flavors:
+            raise XOSConfigurationError("No m1.small flavor")
+
+        (node,parent) = LeastLoadedNodeScheduler(self.slice).pick()
+
+        instance = Instance(slice = self.slice,
+                        node = node,
+                        image = self.image,
+                        creator = self.slice.creator,
+                        deployment = node.site_deployment.deployment,
+                        flavor = flavors[0],
+                        isolation = "vm",
+                        parent = parent)
+        instance.save()
+        # We rely on a special naming convention to identify the VMs that will
+        # hole containers.
+        instance.name = "%s-outer-%s" % (instance.slice.name, instance.id)
+        instance.save()
+        return instance
+
+    def pick(self):
+        from core.models import Instance, Flavor
+
+        for vm in self.slice.instances.filter(isolation="vm"):
+            avail_vms = []
+            if (vm.name.startswith("%s-outer-" % self.slice.name)):
+                container_count = Instance.objects.filter(parent=vm).count()
+                if (container_count < self.MAX_VM_PER_CONTAINER):
+                    avail_vms.append( (vm, container_count) )
+            # sort by least containers-per-vm
+            avail_vms = sorted(avail_vms, key = lambda x: x[1])
+            print "XXX", avail_vms
+            if avail_vms:
+                instance = avail_vms[0][0]
+                return (instance.node, instance)
+
+        instance = self.make_new_instance()
+        return (instance.node, instance)
+
+class TenantWithContainer(Tenant):
+    """ A tenant that manages a container """
+
+    # this is a hack and should be replaced by something smarter...
+    LOOK_FOR_IMAGES=["ubuntu-vcpe4",        # ONOS demo machine -- preferred vcpe image
+                     "Ubuntu 14.04 LTS",    # portal
+                     "Ubuntu-14.04-LTS",    # ONOS demo machine
+                     "trusty-server-multi-nic", # CloudLab
+                    ]
+
+    LOOK_FOR_CONTAINER_IMAGES=["docker-vcpe"]
+
+    class Meta:
+        proxy = True
+
+    def __init__(self, *args, **kwargs):
+        super(TenantWithContainer, self).__init__(*args, **kwargs)
+        self.cached_instance=None
+        self.orig_instance_id = self.get_initial_attribute("instance_id")
+
+    @property
+    def instance(self):
+        from core.models import Instance
+        if getattr(self, "cached_instance", None):
+            return self.cached_instance
+        instance_id=self.get_attribute("instance_id")
+        if not instance_id:
+            return None
+        instances=Instance.objects.filter(id=instance_id)
+        if not instances:
+            return None
+        instance=instances[0]
+        instance.caller = self.creator
+        self.cached_instance = instance
+        return instance
+
+    @instance.setter
+    def instance(self, value):
+        if value:
+            value = value.id
+        if (value != self.get_attribute("instance_id", None)):
+            self.cached_instance=None
+        self.set_attribute("instance_id", value)
+
+    @property
+    def external_hostname(self):
+        return self.get_attribute("external_hostname", "")
+
+    @external_hostname.setter
+    def external_hostname(self, value):
+        self.set_attribute("external_hostname", value)
+
+    @property
+    def external_container(self):
+        return self.get_attribute("external_container", "")
+
+    @external_container.setter
+    def external_container(self, value):
+        self.set_attribute("external_container", value)
+
+    @property
+    def creator(self):
+        from core.models import User
+        if getattr(self, "cached_creator", None):
+            return self.cached_creator
+        creator_id=self.get_attribute("creator_id")
+        if not creator_id:
+            return None
+        users=User.objects.filter(id=creator_id)
+        if not users:
+            return None
+        user=users[0]
+        self.cached_creator = users[0]
+        return user
+
+    @creator.setter
+    def creator(self, value):
+        if value:
+            value = value.id
+        if (value != self.get_attribute("creator_id", None)):
+            self.cached_creator=None
+        self.set_attribute("creator_id", value)
+
+    @property
+    def image(self):
+        from core.models import Image
+        # Implement the logic here to pick the image that should be used when
+        # instantiating the VM that will hold the container.
+
+        slice = self.provider_service.slices.all()
+        if not slice:
+            raise XOSProgrammingError("provider service has no slice")
+        slice = slice[0]
+
+        if slice.default_isolation in ["container", "container_vm"]:
+            look_for_images = self.LOOK_FOR_CONTAINER_IMAGES
+        else:
+            look_for_images = self.LOOK_FOR_IMAGES
+
+        for image_name in look_for_images:
+            images = Image.objects.filter(name = image_name)
+            if images:
+                return images[0]
+
+        raise XOSProgrammingError("No VPCE image (looked for %s)" % str(look_for_images))
+
+    def save_instance(self, instance):
+        # Override this function to do custom pre-save or post-save processing,
+        # such as creating ports for containers.
+        instance.save()
+
+    def pick_least_loaded_instance_in_slice(self, slices):
+        for slice in slices:
+            if slice.instances.all().count() > 0:
+                for instance in slice.instances.all():
+                     #Pick the first instance that has lesser than 5 tenants 
+                     if self.count_of_tenants_of_an_instance(instance) < 5:
+                         return instance
+        return None
+
+    #TODO: Ideally the tenant count for an instance should be maintained using a 
+    #many-to-one relationship attribute, however this model being proxy, it does 
+    #not permit any new attributes to be defined. Find if any better solutions 
+    def count_of_tenants_of_an_instance(self, instance):
+        tenant_count = 0
+        for tenant in self.get_tenant_objects().all():
+            if tenant.get_attribute("instance_id", None) == instance.id:
+                tenant_count += 1
+        return tenant_count
+
+    def manage_container(self):
+        from core.models import Instance, Flavor
+
+        if self.deleted:
+            return
+
+        if (self.instance is not None): #  and (self.instance.image != self.image):
+            self.instance.delete()
+            self.instance = None
+
+        if self.instance is None:
+            if not self.provider_service.slices.count():
+                raise XOSConfigurationError("The service has no slices")
+
+            new_instance_created = False
+            instance = None
+            if self.get_attribute("use_same_instance_for_multiple_tenants", default=False):
+                #Find if any existing instances can be used for this tenant
+                slices = self.provider_service.slices.all()
+                instance = self.pick_least_loaded_instance_in_slice(slices)
+
+            if not instance:
+                flavors = Flavor.objects.filter(name="m1.small")
+                if not flavors:
+                    raise XOSConfigurationError("No m1.small flavor")
+
+                slice = self.provider_service.slices.all()[0]
+
+                if slice.default_isolation == "container_vm":
+                    (node, parent) = ContainerVmScheduler(slice).pick()
+                else:
+                    (node, parent) = LeastLoadedNodeScheduler(slice).pick()
+
+                instance = Instance(slice = slice,
+                                node = node,
+#                                image = slice.default_image,
+                                image = self.image,
+                                creator = self.creator,
+                                deployment = node.site_deployment.deployment,
+#                                flavor = flavors[0],
+                                flavor = slice.default_flavor,
+                                isolation = slice.default_isolation,
+                                parent = parent)
+                self.save_instance(instance)
+                new_instance_created = True
+
+            try:
+                self.instance = instance
+                super(TenantWithContainer, self).save()
+            except:
+                if new_instance_created:
+                    instance.delete()
+                raise
+
+    def cleanup_container(self):
+        if self.instance:
+            if self.get_attribute("use_same_instance_for_multiple_tenants", default=False):
+                #Delete the instance only if this is last tenant in that instance
+                tenant_count = self.count_of_tenants_of_an_instance(self.instance)
+                if tenant_count == 0:
+                    self.instance.delete()
+            else:
+                self.instance.delete()
+            self.instance = None
+
+    def save(self, *args, **kwargs):
+        if (not self.creator) and (hasattr(self, "caller")) and (self.caller):
+            self.creator = self.caller
+        super(TenantWithContainer, self).save(*args, **kwargs)
+
+class CoarseTenant(Tenant):
+    """ TODO: rename "CoarseTenant" --> "StaticTenant" """
+    class Meta:
+        proxy = True
+
+    KIND = COARSE_KIND
+
+    def save(self, *args, **kwargs):
+        if (not self.subscriber_service):
+            raise XOSValidationError("subscriber_service cannot be null")
+        if (self.subscriber_tenant or self.subscriber_user):
+            raise XOSValidationError("subscriber_tenant and subscriber_user must be null")
+
+        super(CoarseTenant,self).save()
+
+class Subscriber(TenantRoot):
+    """ Intermediate class for TenantRoots that are to be Subscribers """
+
+    class Meta:
+        proxy = True
+
+    KIND = "Subscriber"
+
+class Provider(TenantRoot):
+    """ Intermediate class for TenantRoots that are to be Providers """
+
+    class Meta:
+        proxy = True
+
+    KIND = "Provider"
+
+class TenantAttribute(PlCoreBase):
+    name = models.CharField(help_text="Attribute Name", max_length=128)
+    value = models.TextField(help_text="Attribute Value")
+    tenant = models.ForeignKey(Tenant, related_name='tenantattributes', help_text="The Tenant this attribute is associated with")
+
+class TenantRootRole(PlCoreBase):
+    ROLE_CHOICES = (('admin','Admin'), ('access','Access'))
+
+    role = StrippedCharField(choices=ROLE_CHOICES, unique=True, max_length=30)
+
+    def __unicode__(self):  return u'%s' % (self.role)
+
+class TenantRootPrivilege(PlCoreBase):
+    user = models.ForeignKey('User', related_name="tenant_root_privileges")
+    tenant_root = models.ForeignKey('TenantRoot', related_name="tenant_root_privileges")
+    role = models.ForeignKey('TenantRootRole', related_name="tenant_root_privileges")
+
+    class Meta:
+        unique_together = ('user', 'tenant_root', 'role')
+
+    def __unicode__(self):  return u'%s %s %s' % (self.tenant_root, self.user, self.role)
+
+    def save(self, *args, **kwds):
+        if not self.user.is_active:
+            raise PermissionDenied, "Cannot modify role(s) of a disabled user"
+        super(TenantRootPrivilege, self).save(*args, **kwds)
+
+    def can_update(self, user):
+        return user.can_update_tenant_root_privilege(self)
+
+    @classmethod
+    def select_by_user(cls, user):
+        if user.is_admin:
+            return cls.objects.all()
+        else:
+            # User can see his own privilege
+            trp_ids = [trp.id for trp in cls.objects.filter(user=user)]
+
+            # A slice admin can see the SlicePrivileges for his Slice
+            for priv in cls.objects.filter(user=user, role__role="admin"):
+                trp_ids.extend( [trp.id for trp in cls.objects.filter(tenant_root=priv.tenant_root)] )
+
+            return cls.objects.filter(id__in=trp_ids)
+
+
diff --git a/xos/core/models/service.py.old b/xos/core/models/service.py.old
new file mode 100644
index 0000000..1a1b6ef
--- /dev/null
+++ b/xos/core/models/service.py.old
@@ -0,0 +1,785 @@
+from django.db import models
+from core.models import PlCoreBase,SingletonModel,PlCoreBaseManager
+from core.models.plcorebase import StrippedCharField
+from xos.exceptions import *
+from operator import attrgetter
+import json
+
+COARSE_KIND="coarse"
+
+class AttributeMixin(object):
+    # helper for extracting things from a json-encoded service_specific_attribute
+    def get_attribute(self, name, default=None):
+        if self.service_specific_attribute:
+            attributes = json.loads(self.service_specific_attribute)
+        else:
+            attributes = {}
+        return attributes.get(name, default)
+
+    def set_attribute(self, name, value):
+        if self.service_specific_attribute:
+            attributes = json.loads(self.service_specific_attribute)
+        else:
+            attributes = {}
+        attributes[name]=value
+        self.service_specific_attribute = json.dumps(attributes)
+
+    def get_initial_attribute(self, name, default=None):
+        if self._initial["service_specific_attribute"]:
+            attributes = json.loads(self._initial["service_specific_attribute"])
+        else:
+            attributes = {}
+        return attributes.get(name, default)
+
+    @classmethod
+    def setup_simple_attributes(cls):
+        for (attrname, default) in cls.simple_attributes:
+            setattr(cls, attrname, property(lambda self, attrname=attrname, default=default: self.get_attribute(attrname, default),
+                                            lambda self, value, attrname=attrname: self.set_attribute(attrname, value),
+                                            None,
+                                            attrname))
+
+class Service(PlCoreBase, AttributeMixin):
+    # when subclassing a service, redefine KIND to describe the new service
+    KIND = "generic"
+
+    description = models.TextField(max_length=254,null=True, blank=True,help_text="Description of Service")
+    enabled = models.BooleanField(default=True)
+    kind = StrippedCharField(max_length=30, help_text="Kind of service", default=KIND)
+    name = StrippedCharField(max_length=30, help_text="Service Name")
+    versionNumber = StrippedCharField(max_length=30, help_text="Version of Service Definition")
+    published = models.BooleanField(default=True)
+    view_url = StrippedCharField(blank=True, null=True, max_length=1024)
+    icon_url = StrippedCharField(blank=True, null=True, max_length=1024)
+    public_key = models.TextField(null=True, blank=True, max_length=1024, help_text="Public key string")
+    private_key_fn = StrippedCharField(blank=True, null=True, max_length=1024)
+
+    # Service_specific_attribute and service_specific_id are opaque to XOS
+    service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
+    service_specific_attribute = models.TextField(blank=True, null=True)
+
+    def __init__(self, *args, **kwargs):
+        # for subclasses, set the default kind appropriately
+        self._meta.get_field("kind").default = self.KIND
+        super(Service, self).__init__(*args, **kwargs)
+
+    @classmethod
+    def get_service_objects(cls):
+        return cls.objects.filter(kind = cls.KIND)
+
+    @classmethod
+    def get_deleted_service_objects(cls):
+        return cls.deleted_objects.filter(kind = cls.KIND)
+
+    @classmethod
+    def get_service_objects_by_user(cls, user):
+        return cls.select_by_user(user).filter(kind = cls.KIND)
+
+    @classmethod
+    def select_by_user(cls, user):
+        if user.is_admin:
+            return cls.objects.all()
+        else:
+            service_ids = [sp.slice.id for sp in ServicePrivilege.objects.filter(user=user)]
+            return cls.objects.filter(id__in=service_ids)
+
+    @property
+    def serviceattribute_dict(self):
+        attrs = {}
+        for attr in self.serviceattributes.all():
+            attrs[attr.name] = attr.value
+        return attrs
+
+    def __unicode__(self): return u'%s' % (self.name)
+
+    def can_update(self, user):
+        return user.can_update_service(self, allow=['admin'])
+
+    def get_scalable_nodes(self, slice, max_per_node=None, exclusive_slices=[]):
+        """
+             Get a list of nodes that can be used to scale up a slice.
+
+                slice - slice to scale up
+                max_per_node - maximum numbers of instances that 'slice' can have on a single node
+                exclusive_slices - list of slices that must have no nodes in common with 'slice'.
+        """
+
+        from core.models import Node, Instance # late import to get around order-of-imports constraint in __init__.py
+
+        nodes = list(Node.objects.all())
+
+        conflicting_instances = Instance.objects.filter(slice__in = exclusive_slices)
+        conflicting_nodes = Node.objects.filter(instances__in = conflicting_instances)
+
+        nodes = [x for x in nodes if x not in conflicting_nodes]
+
+        # If max_per_node is set, then limit the number of instances this slice
+        # can have on a single node.
+        if max_per_node:
+            acceptable_nodes = []
+            for node in nodes:
+                existing_count = node.instances.filter(slice=slice).count()
+                if existing_count < max_per_node:
+                    acceptable_nodes.append(node)
+            nodes = acceptable_nodes
+
+        return nodes
+
+    def pick_node(self, slice, max_per_node=None, exclusive_slices=[]):
+        # Pick the best node to scale up a slice.
+
+        nodes = self.get_scalable_nodes(slice, max_per_node, exclusive_slices)
+        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+        if not nodes:
+            return None
+        return nodes[0]
+
+    def adjust_scale(self, slice_hint, scale, max_per_node=None, exclusive_slices=[]):
+        from core.models import Instance # late import to get around order-of-imports constraint in __init__.py
+
+        slices = [x for x in self.slices.all() if slice_hint in x.name]
+        for slice in slices:
+            while slice.instances.all().count() > scale:
+                s = slice.instances.all()[0]
+                # print "drop instance", s
+                s.delete()
+
+            while slice.instances.all().count() < scale:
+                node = self.pick_node(slice, max_per_node, exclusive_slices)
+                if not node:
+                    # no more available nodes
+                    break
+
+                image = slice.default_image
+                if not image:
+                    raise XOSConfigurationError("No default_image for slice %s" % slice.name)
+
+                flavor = slice.default_flavor
+                if not flavor:
+                    raise XOSConfigurationError("No default_flavor for slice %s" % slice.name)
+
+                s = Instance(slice=slice,
+                           node=node,
+                           creator=slice.creator,
+                           image=image,
+                           flavor=flavor,
+                           deployment=node.site_deployment.deployment)
+                s.save()
+
+                # print "add instance", s
+
+    def get_vtn_src_nets(self):
+        nets=[]
+        for slice in self.slices.all():
+            for ns in slice.networkslices.all():
+                if not ns.network:
+                    continue
+#                if ns.network.template.access in ["direct", "indirect"]:
+#                    # skip access networks; we want to use the private network
+#                    continue
+                if ns.network.name in ["wan_network", "lan_network"]:
+                    # we don't want to attach to the vCPE's lan or wan network
+                    # we only want to attach to its private network
+                    # TODO: fix hard-coding of network name
+                    continue
+                for cn in ns.network.controllernetworks.all():
+                    if cn.net_id:
+                        net = {"name": ns.network.name, "net_id": cn.net_id}
+                        nets.append(net)
+        return nets
+
+    def get_vtn_nets(self):
+        nets=[]
+        for slice in self.slices.all():
+            for ns in slice.networkslices.all():
+                if not ns.network:
+                    continue
+                if ns.network.template.access not in ["direct", "indirect"]:
+                    # skip anything that's not an access network
+                    continue
+                for cn in ns.network.controllernetworks.all():
+                    if cn.net_id:
+                        net = {"name": ns.network.name, "net_id": cn.net_id}
+                        nets.append(net)
+        return nets
+
+    def get_vtn_dependencies_nets(self):
+        provider_nets = []
+        for tenant in self.subscribed_tenants.all():
+            if tenant.provider_service:
+                for net in tenant.provider_service.get_vtn_nets():
+                    if not net in provider_nets:
+                        provider_nets.append(net)
+        return provider_nets
+
+    def get_vtn_dependencies_ids(self):
+        return [x["net_id"] for x in self.get_vtn_dependencies_nets()]
+
+    def get_vtn_dependencies_names(self):
+        return [x["name"]+"_"+x["net_id"] for x in self.get_vtn_dependencies_nets()]
+
+    def get_vtn_src_ids(self):
+        return [x["net_id"] for x in self.get_vtn_src_nets()]
+
+    def get_vtn_src_names(self):
+        return [x["name"]+"_"+x["net_id"] for x in self.get_vtn_src_nets()]
+
+
+class ServiceAttribute(PlCoreBase):
+    name = models.CharField(help_text="Attribute Name", max_length=128)
+    value = StrippedCharField(help_text="Attribute Value", max_length=1024)
+    service = models.ForeignKey(Service, related_name='serviceattributes', help_text="The Service this attribute is associated with")
+
+class ServiceRole(PlCoreBase):
+    ROLE_CHOICES = (('admin','Admin'),)
+    role = StrippedCharField(choices=ROLE_CHOICES, unique=True, max_length=30)
+
+    def __unicode__(self):  return u'%s' % (self.role)
+
+class ServicePrivilege(PlCoreBase):
+    user = models.ForeignKey('User', related_name='serviceprivileges')
+    service = models.ForeignKey('Service', related_name='serviceprivileges')
+    role = models.ForeignKey('ServiceRole',related_name='serviceprivileges')
+
+    class Meta:
+        unique_together =  ('user', 'service', 'role')
+
+    def __unicode__(self):  return u'%s %s %s' % (self.service, self.user, self.role)
+
+    def can_update(self, user):
+        if not self.service.enabled:
+            raise PermissionDenied, "Cannot modify permission(s) of a disabled service"
+        return self.service.can_update(user)
+
+    def save(self, *args, **kwds):
+        if not self.service.enabled:
+            raise PermissionDenied, "Cannot modify permission(s) of a disabled service"
+        super(ServicePrivilege, self).save(*args, **kwds)
+
+    def delete(self, *args, **kwds):
+        if not self.service.enabled:
+            raise PermissionDenied, "Cannot modify permission(s) of a disabled service"
+        super(ServicePrivilege, self).delete(*args, **kwds)
+
+    @classmethod
+    def select_by_user(cls, user):
+        if user.is_admin:
+            qs = cls.objects.all()
+        else:
+            qs = cls.objects.filter(user=user)
+        return qs
+
+class TenantRoot(PlCoreBase, AttributeMixin):
+    """ A tenantRoot is one of the things that can sit at the root of a chain
+        of tenancy. This object represents a node.
+    """
+
+    KIND= "generic"
+    kind = StrippedCharField(max_length=30, default=KIND)
+    name = StrippedCharField(max_length=255, help_text="name", blank=True, null=True)
+
+    service_specific_attribute = models.TextField(blank=True, null=True)
+    service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
+
+    def __init__(self, *args, **kwargs):
+        # for subclasses, set the default kind appropriately
+        self._meta.get_field("kind").default = self.KIND
+        super(TenantRoot, self).__init__(*args, **kwargs)
+
+    def __unicode__(self):
+        if not self.name:
+            return u"%s-tenant_root-#%s" % (str(self.kind), str(self.id))
+        else:
+            return self.name
+
+    def can_update(self, user):
+        return user.can_update_tenant_root(self, allow=['admin'])
+
+    def get_subscribed_tenants(self, tenant_class):
+        ids = self.subscribed_tenants.filter(kind=tenant_class.KIND)
+        return tenant_class.objects.filter(id__in = ids)
+
+    def get_newest_subscribed_tenant(self, kind):
+        st = list(self.get_subscribed_tenants(kind))
+        if not st:
+            return None
+        return sorted(st, key=attrgetter('id'))[0]
+
+    @classmethod
+    def get_tenant_objects(cls):
+        return cls.objects.filter(kind = cls.KIND)
+
+    @classmethod
+    def get_tenant_objects_by_user(cls, user):
+        return cls.select_by_user(user).filter(kind = cls.KIND)
+
+    @classmethod
+    def select_by_user(cls, user):
+        if user.is_admin:
+            return cls.objects.all()
+        else:
+            tr_ids = [trp.tenant_root.id for trp in TenantRootPrivilege.objects.filter(user=user)]
+            return cls.objects.filter(id__in=tr_ids)
+
+class Tenant(PlCoreBase, AttributeMixin):
+    """ A tenant is a relationship between two entities, a subscriber and a
+        provider. This object represents an edge.
+
+        The subscriber can be a User, a Service, or a Tenant.
+
+        The provider is always a Service.
+
+        TODO: rename "Tenant" to "Tenancy"
+    """
+
+    CONNECTIVITY_CHOICES = (('public', 'Public'), ('private', 'Private'), ('na', 'Not Applicable'))
+
+    # when subclassing a service, redefine KIND to describe the new service
+    KIND = "generic"
+
+    kind = StrippedCharField(max_length=30, default=KIND)
+    provider_service = models.ForeignKey(Service, related_name='provided_tenants')
+
+    # The next four things are the various type of objects that can be subscribers of this Tenancy
+    # relationship. One and only one can be used at a time.
+    subscriber_service = models.ForeignKey(Service, related_name='subscribed_tenants', blank=True, null=True)
+    subscriber_tenant = models.ForeignKey("Tenant", related_name='subscribed_tenants', blank=True, null=True)
+    subscriber_user = models.ForeignKey("User", related_name='subscribed_tenants', blank=True, null=True)
+    subscriber_root = models.ForeignKey("TenantRoot", related_name="subscribed_tenants", blank=True, null=True)
+
+    # Service_specific_attribute and service_specific_id are opaque to XOS
+    service_specific_id = StrippedCharField(max_length=30, blank=True, null=True)
+    service_specific_attribute = models.TextField(blank=True, null=True)
+
+    # Connect_method is only used by Coarse tenants
+    connect_method = models.CharField(null=False, blank=False, max_length=30, choices=CONNECTIVITY_CHOICES, default="na")
+
+    def __init__(self, *args, **kwargs):
+        # for subclasses, set the default kind appropriately
+        self._meta.get_field("kind").default = self.KIND
+        super(Tenant, self).__init__(*args, **kwargs)
+
+    def __unicode__(self):
+        return u"%s-tenant-%s" % (str(self.kind), str(self.id))
+
+    @classmethod
+    def get_tenant_objects(cls):
+        return cls.objects.filter(kind = cls.KIND)
+
+    @classmethod
+    def get_tenant_objects_by_user(cls, user):
+        return cls.select_by_user(user).filter(kind = cls.KIND)
+
+    @classmethod
+    def get_deleted_tenant_objects(cls):
+        return cls.deleted_objects.filter(kind = cls.KIND)
+
+    @property
+    def tenantattribute_dict(self):
+        attrs = {}
+        for attr in self.tenantattributes.all():
+            attrs[attr.name] = attr.value
+        return attrs
+
+    # helper function to be used in subclasses that want to ensure service_specific_id is unique
+    def validate_unique_service_specific_id(self):
+        if self.pk is None:
+            if self.service_specific_id is None:
+                raise XOSMissingField("subscriber_specific_id is None, and it's a required field", fields={"service_specific_id": "cannot be none"})
+
+            conflicts = self.get_tenant_objects().filter(service_specific_id=self.service_specific_id)
+            if conflicts:
+                raise XOSDuplicateKey("service_specific_id %s already exists" % self.service_specific_id, fields={"service_specific_id": "duplicate key"})
+
+    def save(self, *args, **kwargs):
+        subCount = sum( [1 for e in [self.subscriber_service, self.subscriber_tenant, self.subscriber_user, self.subscriber_root] if e is not None])
+        if (subCount > 1):
+            raise XOSConflictingField("Only one of subscriber_service, subscriber_tenant, subscriber_user, subscriber_root should be set")
+
+        super(Tenant, self).save(*args, **kwargs)
+
+    def get_subscribed_tenants(self, tenant_class):
+        ids = self.subscribed_tenants.filter(kind=tenant_class.KIND)
+        return tenant_class.objects.filter(id__in = ids)
+
+    def get_newest_subscribed_tenant(self, kind):
+        st = list(self.get_subscribed_tenants(kind))
+        if not st:
+            return None
+        return sorted(st, key=attrgetter('id'))[0]
+
+class Scheduler(object):
+    # XOS Scheduler Abstract Base Class
+    # Used to implement schedulers that pick which node to put instances on
+
+    def __init__(self, slice):
+        self.slice = slice
+
+    def pick(self):
+        # this method should return a tuple (node, parent)
+        #    node is the node to instantiate on
+        #    parent is for container_vm instances only, and is the VM that will
+        #      hold the container
+
+        raise Exception("Abstract Base")
+
+class LeastLoadedNodeScheduler(Scheduler):
+    # This scheduler always return the node with the fewest number of instances.
+
+    def __init__(self, slice):
+        super(LeastLoadedNodeScheduler, self).__init__(slice)
+
+    def pick(self):
+        from core.models import Node
+        nodes = list(Node.objects.all())
+
+        # TODO: logic to filter nodes by which nodes are up, and which
+        #   nodes the slice can instantiate on.
+        nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+        return [nodes[0], None]
+
+class ContainerVmScheduler(Scheduler):
+    # This scheduler picks a VM in the slice with the fewest containers inside
+    # of it. If no VMs are suitable, then it creates a VM.
+
+    # this is a hack and should be replaced by something smarter...
+    LOOK_FOR_IMAGES=["ubuntu-vcpe4",        # ONOS demo machine -- preferred vcpe image
+                     "Ubuntu 14.04 LTS",    # portal
+                     "Ubuntu-14.04-LTS",    # ONOS demo machine
+                     "trusty-server-multi-nic", # CloudLab
+                    ]
+
+    MAX_VM_PER_CONTAINER = 10
+
+    def __init__(self, slice):
+        super(ContainerVmScheduler, self).__init__(slice)
+
+    @property
+    def image(self):
+        from core.models import Image
+
+        look_for_images = self.LOOK_FOR_IMAGES
+        for image_name in look_for_images:
+            images = Image.objects.filter(name = image_name)
+            if images:
+                return images[0]
+
+        raise XOSProgrammingError("No ContainerVM image (looked for %s)" % str(look_for_images))
+
+    def make_new_instance(self):
+        from core.models import Instance, Flavor
+
+        flavors = Flavor.objects.filter(name="m1.small")
+        if not flavors:
+            raise XOSConfigurationError("No m1.small flavor")
+
+        (node,parent) = LeastLoadedNodeScheduler(self.slice).pick()
+
+        instance = Instance(slice = self.slice,
+                        node = node,
+                        image = self.image,
+                        creator = self.slice.creator,
+                        deployment = node.site_deployment.deployment,
+                        flavor = flavors[0],
+                        isolation = "vm",
+                        parent = parent)
+        instance.save()
+        # We rely on a special naming convention to identify the VMs that will
+        # hole containers.
+        instance.name = "%s-outer-%s" % (instance.slice.name, instance.id)
+        instance.save()
+        return instance
+
+    def pick(self):
+        from core.models import Instance, Flavor
+
+        for vm in self.slice.instances.filter(isolation="vm"):
+            avail_vms = []
+            if (vm.name.startswith("%s-outer-" % self.slice.name)):
+                container_count = Instance.objects.filter(parent=vm).count()
+                if (container_count < self.MAX_VM_PER_CONTAINER):
+                    avail_vms.append( (vm, container_count) )
+            # sort by least containers-per-vm
+            avail_vms = sorted(avail_vms, key = lambda x: x[1])
+            print "XXX", avail_vms
+            if avail_vms:
+                instance = avail_vms[0][0]
+                return (instance.node, instance)
+
+        instance = self.make_new_instance()
+        return (instance.node, instance)
+
+class TenantWithContainer(Tenant):
+    """ A tenant that manages a container """
+
+    # this is a hack and should be replaced by something smarter...
+    LOOK_FOR_IMAGES=["ubuntu-vcpe4",        # ONOS demo machine -- preferred vcpe image
+                     "Ubuntu 14.04 LTS",    # portal
+                     "Ubuntu-14.04-LTS",    # ONOS demo machine
+                     "trusty-server-multi-nic", # CloudLab
+                    ]
+
+    LOOK_FOR_CONTAINER_IMAGES=["docker-vcpe"]
+
+    class Meta:
+        proxy = True
+
+    def __init__(self, *args, **kwargs):
+        super(TenantWithContainer, self).__init__(*args, **kwargs)
+        self.cached_instance=None
+        self.orig_instance_id = self.get_initial_attribute("instance_id")
+
+    @property
+    def instance(self):
+        from core.models import Instance
+        if getattr(self, "cached_instance", None):
+            return self.cached_instance
+        instance_id=self.get_attribute("instance_id")
+        if not instance_id:
+            return None
+        instances=Instance.objects.filter(id=instance_id)
+        if not instances:
+            return None
+        instance=instances[0]
+        instance.caller = self.creator
+        self.cached_instance = instance
+        return instance
+
+    @instance.setter
+    def instance(self, value):
+        if value:
+            value = value.id
+        if (value != self.get_attribute("instance_id", None)):
+            self.cached_instance=None
+        self.set_attribute("instance_id", value)
+
+    @property
+    def external_hostname(self):
+        return self.get_attribute("external_hostname", "")
+
+    @external_hostname.setter
+    def external_hostname(self, value):
+        self.set_attribute("external_hostname", value)
+
+    @property
+    def external_container(self):
+        return self.get_attribute("external_container", "")
+
+    @external_container.setter
+    def external_container(self, value):
+        self.set_attribute("external_container", value)
+
+    @property
+    def creator(self):
+        from core.models import User
+        if getattr(self, "cached_creator", None):
+            return self.cached_creator
+        creator_id=self.get_attribute("creator_id")
+        if not creator_id:
+            return None
+        users=User.objects.filter(id=creator_id)
+        if not users:
+            return None
+        user=users[0]
+        self.cached_creator = users[0]
+        return user
+
+    @creator.setter
+    def creator(self, value):
+        if value:
+            value = value.id
+        if (value != self.get_attribute("creator_id", None)):
+            self.cached_creator=None
+        self.set_attribute("creator_id", value)
+
+    @property
+    def image(self):
+        from core.models import Image
+        # Implement the logic here to pick the image that should be used when
+        # instantiating the VM that will hold the container.
+
+        slice = self.provider_service.slices.all()
+        if not slice:
+            raise XOSProgrammingError("provider service has no slice")
+        slice = slice[0]
+
+        if slice.default_isolation in ["container", "container_vm"]:
+            look_for_images = self.LOOK_FOR_CONTAINER_IMAGES
+        else:
+            look_for_images = self.LOOK_FOR_IMAGES
+
+        for image_name in look_for_images:
+            images = Image.objects.filter(name = image_name)
+            if images:
+                return images[0]
+
+        raise XOSProgrammingError("No VPCE image (looked for %s)" % str(look_for_images))
+
+    def save_instance(self, instance):
+        # Override this function to do custom pre-save or post-save processing,
+        # such as creating ports for containers.
+        instance.save()
+
+    def pick_least_loaded_instance_in_slice(self, slices):
+        for slice in slices:
+            if slice.instances.all().count() > 0:
+                for instance in slice.instances.all():
+                     #Pick the first instance that has lesser than 5 tenants 
+                     if self.count_of_tenants_of_an_instance(instance) < 5:
+                         return instance
+        return None
+
+    #TODO: Ideally the tenant count for an instance should be maintained using a 
+    #many-to-one relationship attribute, however this model being proxy, it does 
+    #not permit any new attributes to be defined. Find if any better solutions 
+    def count_of_tenants_of_an_instance(self, instance):
+        tenant_count = 0
+        for tenant in self.get_tenant_objects().all():
+            if tenant.get_attribute("instance_id", None) == instance.id:
+                tenant_count += 1
+        return tenant_count
+
+    def manage_container(self):
+        from core.models import Instance, Flavor
+
+        if self.deleted:
+            return
+
+        if (self.instance is not None) and (self.instance.image != self.image):
+            self.instance.delete()
+            self.instance = None
+
+        if self.instance is None:
+            if not self.provider_service.slices.count():
+                raise XOSConfigurationError("The service has no slices")
+
+            new_instance_created = False
+            instance = None
+            if self.get_attribute("use_same_instance_for_multiple_tenants", default=False):
+                #Find if any existing instances can be used for this tenant
+                slices = self.provider_service.slices.all()
+                instance = self.pick_least_loaded_instance_in_slice(slices)
+
+            if not instance:
+                flavors = Flavor.objects.filter(name="m1.small")
+                if not flavors:
+                    raise XOSConfigurationError("No m1.small flavor")
+
+                slice = self.provider_service.slices.all()[0]
+
+                if slice.default_isolation == "container_vm":
+                    (node, parent) = ContainerVmScheduler(slice).pick()
+                else:
+                    (node, parent) = LeastLoadedNodeScheduler(slice).pick()
+
+                instance = Instance(slice = slice,
+                                node = node,
+                                image = self.image,
+                                creator = self.creator,
+                                deployment = node.site_deployment.deployment,
+                                flavor = flavors[0],
+                                isolation = slice.default_isolation,
+                                parent = parent)
+                self.save_instance(instance)
+                new_instance_created = True
+
+            try:
+                self.instance = instance
+                super(TenantWithContainer, self).save()
+            except:
+                if new_instance_created:
+                    instance.delete()
+                raise
+
+    def cleanup_container(self):
+        if self.instance:
+            if self.get_attribute("use_same_instance_for_multiple_tenants", default=False):
+                #Delete the instance only if this is last tenant in that instance
+                tenant_count = self.count_of_tenants_of_an_instance(self.instance)
+                if tenant_count == 0:
+                    self.instance.delete()
+            else:
+                self.instance.delete()
+            self.instance = None
+
+    def save(self, *args, **kwargs):
+        if (not self.creator) and (hasattr(self, "caller")) and (self.caller):
+            self.creator = self.caller
+        super(TenantWithContainer, self).save(*args, **kwargs)
+
+class CoarseTenant(Tenant):
+    """ TODO: rename "CoarseTenant" --> "StaticTenant" """
+    class Meta:
+        proxy = True
+
+    KIND = COARSE_KIND
+
+    def save(self, *args, **kwargs):
+        if (not self.subscriber_service):
+            raise XOSValidationError("subscriber_service cannot be null")
+        if (self.subscriber_tenant or self.subscriber_user):
+            raise XOSValidationError("subscriber_tenant and subscriber_user must be null")
+
+        super(CoarseTenant,self).save()
+
+class Subscriber(TenantRoot):
+    """ Intermediate class for TenantRoots that are to be Subscribers """
+
+    class Meta:
+        proxy = True
+
+    KIND = "Subscriber"
+
+class Provider(TenantRoot):
+    """ Intermediate class for TenantRoots that are to be Providers """
+
+    class Meta:
+        proxy = True
+
+    KIND = "Provider"
+
+class TenantAttribute(PlCoreBase):
+    name = models.CharField(help_text="Attribute Name", max_length=128)
+    value = models.TextField(help_text="Attribute Value")
+    tenant = models.ForeignKey(Tenant, related_name='tenantattributes', help_text="The Tenant this attribute is associated with")
+
+class TenantRootRole(PlCoreBase):
+    ROLE_CHOICES = (('admin','Admin'), ('access','Access'))
+
+    role = StrippedCharField(choices=ROLE_CHOICES, unique=True, max_length=30)
+
+    def __unicode__(self):  return u'%s' % (self.role)
+
+class TenantRootPrivilege(PlCoreBase):
+    user = models.ForeignKey('User', related_name="tenant_root_privileges")
+    tenant_root = models.ForeignKey('TenantRoot', related_name="tenant_root_privileges")
+    role = models.ForeignKey('TenantRootRole', related_name="tenant_root_privileges")
+
+    class Meta:
+        unique_together = ('user', 'tenant_root', 'role')
+
+    def __unicode__(self):  return u'%s %s %s' % (self.tenant_root, self.user, self.role)
+
+    def save(self, *args, **kwds):
+        if not self.user.is_active:
+            raise PermissionDenied, "Cannot modify role(s) of a disabled user"
+        super(TenantRootPrivilege, self).save(*args, **kwds)
+
+    def can_update(self, user):
+        return user.can_update_tenant_root_privilege(self)
+
+    @classmethod
+    def select_by_user(cls, user):
+        if user.is_admin:
+            return cls.objects.all()
+        else:
+            # User can see his own privilege
+            trp_ids = [trp.id for trp in cls.objects.filter(user=user)]
+
+            # A slice admin can see the SlicePrivileges for his Slice
+            for priv in cls.objects.filter(user=user, role__role="admin"):
+                trp_ids.extend( [trp.id for trp in cls.objects.filter(tenant_root=priv.tenant_root)] )
+
+            return cls.objects.filter(id__in=trp_ids)
+
+
diff --git a/xos/core/models/slice.py b/xos/core/models/slice.py
index 12c1ea2..5645709 100644
--- a/xos/core/models/slice.py
+++ b/xos/core/models/slice.py
@@ -12,6 +12,7 @@
 from django.contrib.contenttypes import generic
 from core.models import Service
 from core.models import Controller
+from core.models.node import Node
 from core.models import Flavor, Image
 from core.models.plcorebase import StrippedCharField
 from django.core.exceptions import PermissionDenied, ValidationError
@@ -40,6 +41,7 @@
     # for tenant view
     default_flavor = models.ForeignKey(Flavor, related_name = "slices", null=True, blank=True)
     default_image = models.ForeignKey(Image, related_name = "slices", null=True, blank=True);
+    default_node = models.ForeignKey(Node, related_name = "slices", null=True, blank=True)
     mount_data_sets = StrippedCharField(default="GenBank",null=True, blank=True, max_length=256)
 
     default_isolation = models.CharField(null=False, blank=False, max_length=30, choices=ISOLATION_CHOICES, default="vm")
diff --git a/xos/core/views/mCordServiceGrid.py b/xos/core/views/mCordServiceGrid.py
index 866d8a9..56c820a 100644
--- a/xos/core/views/mCordServiceGrid.py
+++ b/xos/core/views/mCordServiceGrid.py
@@ -45,10 +45,11 @@
                 image_url = service.icon_url
                 if (not image_url):
                     image_url = "/static/mCordServices/service_common.png"
-                if service.view_url.startswith("http"):
-                    target = 'target="_blank"'
-                else:
-                    target = ''
+                #if service.view_url.startswith("http"):
+                #    target = 'target="_blank"'
+                #else:
+                #    target = ''
+                target = ''
 
                 html = html + '<div class="col-xs-4 text-center service-container">'
                 html = html + '<a href="%s" %s>' % (service.view_url, target)
diff --git a/xos/services/mcordservice/__init__.py b/xos/services/mcordservice/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/services/mcordservice/__init__.py
diff --git a/xos/services/mcordservice/admin.py b/xos/services/mcordservice/admin.py
new file mode 100644
index 0000000..6205d9b
--- /dev/null
+++ b/xos/services/mcordservice/admin.py
@@ -0,0 +1,141 @@
+
+from core.admin import ReadOnlyAwareAdmin, SliceInline
+from core.middleware import get_request
+from core.models import User
+from django import forms
+from django.contrib import admin
+from services.mcordservice.models import MCORDService, MCORDServiceComponent, MCORD_KIND
+
+# The class to provide an admin interface on the web for the service.
+# We do only configuration here and don't change any logic because the logic
+# is taken care of for us by ReadOnlyAwareAdmin
+class MCORDServiceAdmin(ReadOnlyAwareAdmin):
+    # We must set the model so that the admin knows what fields to use
+    model = MCORDService
+    verbose_name = "MCORD Service"
+    verbose_name_plural = "MCORD Services"
+
+    # Setting list_display creates columns on the admin page, each value here
+    # is a column, the column is populated for every instance of the model.
+    list_display = ("backend_status_icon", "name", "enabled")
+
+    # Used to indicate which values in the columns of the admin form are links.
+    list_display_links = ('backend_status_icon', 'name', )
+
+    # Denotes the sections of the form, the fields in the section, and the
+    # CSS classes used to style them. We represent this as a set of tuples, each
+    # tuple as a name (or None) and a set of fields and classes.
+    # Here the first section does not have a name so we use none. That first
+    # section has several fields indicated in the 'fields' attribute, and styled
+    # by the classes indicated in the 'classes' attribute. The classes given
+    # here are important for rendering the tabs on the form. To give the tabs
+    # we must assign the classes suit-tab and suit-tab-<name> where
+    # where <name> will be used later.
+    fieldsets = [(None, {'fields': ['backend_status_text', 'name', 'enabled',
+                                    'versionNumber', 'description', "view_url"],
+                         'classes':['suit-tab suit-tab-general']})]
+
+    # Denotes the fields that are readonly and cannot be changed.
+    readonly_fields = ('backend_status_text', )
+
+    # Inlines are used to denote other models that can be edited on the same
+    # form as this one. In this case the service form also allows changes
+    # to slices.
+    inlines = [SliceInline]
+
+    extracontext_registered_admins = True
+
+    # Denotes the fields that can be changed by an admin but not be all users
+    user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
+
+    # Associates fieldsets from this form and from the inlines.
+    # The format here are tuples, of (<name>, tab title). <name> comes from the
+    # <name> in the fieldsets.
+    suit_form_tabs = (('general', 'MCORD Service Details'),
+                      ('administration', 'Components'),
+                      ('slices', 'Slices'),)
+
+    # Used to include a template for a tab. Here we include the
+    # helloworldserviceadmin template in the top position for the administration
+    # tab.
+    suit_form_includes = (('mcordserviceadmin.html',
+                           'top',
+                           'administration'),)
+
+    # Used to get the objects for this model that are associated with the
+    # requesting user.
+    def queryset(self, request):
+        return MCORDService.get_service_objects_by_user(request.user)
+
+# Class to represent the form to add and edit tenants.
+# We need to define this instead of just using an admin like we did for the
+# service because tenants vary more than services and there isn't a common form.
+# This allows us to change the python behavior for the admin form to save extra
+# fields and control defaults.
+class MCORDServiceComponentForm(forms.ModelForm):
+    # Defines a field for the creator of this service. It is a dropdown which
+    # is populated with all of the users.
+    creator = forms.ModelChoiceField(queryset=User.objects.all())
+    # Defines a text field for the display message, it is not required.
+    display_message = forms.CharField(required=False)
+
+    def __init__(self, *args, **kwargs):
+        super(MCORDServiceComponentForm, self).__init__(*args, **kwargs)
+        # Set the kind field to readonly
+        self.fields['kind'].widget.attrs['readonly'] = True
+        # Define the logic for obtaining the objects for the provider_service
+        # dropdown of the tenant form.
+        self.fields[
+            'provider_service'].queryset = MCORDService.get_service_objects().all()
+        # Set the initial kind to HELLO_WORLD_KIND for this tenant.
+        self.fields['kind'].initial = MCORD_KIND
+        # If there is an instance of this model then we can set the initial
+        # form values to the existing values.
+        if self.instance:
+            self.fields['creator'].initial = self.instance.creator
+            self.fields[
+                'display_message'].initial = self.instance.display_message
+
+        # If there is not an instance then we need to set initial values.
+        if (not self.instance) or (not self.instance.pk):
+            self.fields['creator'].initial = get_request().user
+            if MCORDService.get_service_objects().exists():
+                self.fields["provider_service"].initial = MCORDService.get_service_objects().all()[0]
+
+    # This function describes what happens when the save button is pressed on
+    # the tenant form. In this case we set the values for the instance that were
+    # entered.
+    def save(self, commit=True):
+        self.instance.creator = self.cleaned_data.get("creator")
+        self.instance.display_message = self.cleaned_data.get(
+            "display_message")
+        return super(MCORDServiceComponentForm, self).save(commit=commit)
+
+    class Meta:
+        model = MCORDServiceComponent
+
+# Define the admin form for the tenant. This uses a similar structure as the
+# service but uses HelloWorldTenantCompleteForm to change the python behavior.
+
+
+class MCORDServiceComponentAdmin(ReadOnlyAwareAdmin):
+    verbose_name = "MCORD Component"
+    verbose_name_plural = "MCORD Components"
+    list_display = ('id', 'backend_status_icon', 'instance', 'display_message')
+    list_display_links = ('backend_status_icon', 'instance', 'display_message',
+                          'id')
+    fieldsets = [(None, {'fields': ['backend_status_text', 'kind',
+                                    'provider_service', 'instance', 'creator',
+                                    'display_message'],
+                         'classes': ['suit-tab suit-tab-general']})]
+    readonly_fields = ('backend_status_text', 'instance',)
+    form = MCORDServiceComponentForm
+
+    suit_form_tabs = (('general', 'Details'),)
+
+    def queryset(self, request):
+        return MCORDServiceComponent.get_tenant_objects_by_user(request.user)
+
+# Associate the admin forms with the models.
+admin.site.register(MCORDService, MCORDServiceAdmin)
+admin.site.register(MCORDServiceComponent, MCORDServiceComponentAdmin)
diff --git a/xos/services/mcordservice/models.py b/xos/services/mcordservice/models.py
new file mode 100644
index 0000000..369dd89
--- /dev/null
+++ b/xos/services/mcordservice/models.py
@@ -0,0 +1,156 @@
+from django.db import models
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber
+from core.models.plcorebase import StrippedCharField
+import os
+from django.db import models, transaction
+from django.forms.models import model_to_dict
+from django.db.models import Q
+from operator import itemgetter, attrgetter, methodcaller
+import traceback
+from xos.exceptions import *
+from core.models import SlicePrivilege, SitePrivilege
+from sets import Set
+
+MCORD_KIND = "mcordservice"
+
+# The class to represent the service. Most of the service logic is given for us
+# in the Service class but, we have some configuration that is specific for
+# this example.
+class MCORDService(Service):
+    KIND = MCORD_KIND
+
+    class Meta:
+        # When the proxy field is set to True the model is represented as
+        # it's superclass in the database, but we can still change the python
+        # behavior. In this case HelloWorldServiceComplete is a Service in the
+        # database.
+        proxy = True
+        # The name used to find this service, all directories are named this
+        app_label = "mcordservice"
+        verbose_name = "MCORD Service"
+
+# This is the class to represent the tenant. Most of the logic is given to use
+# in TenantWithContainer, however there is some configuration and logic that
+# we need to define for this example.
+class MCORDServiceComponent(TenantWithContainer):
+
+    class Meta:
+        # Same as a above, HelloWorldTenantComplete is represented as a
+        # TenantWithContainer, but we change the python behavior.
+        proxy = True
+        verbose_name = "MCORD Service Component"
+
+    # The kind of the service is used on forms to differentiate this service
+    # from the other services.
+    KIND = MCORD_KIND
+
+    # Ansible requires that the sync_attributes field contain nat_ip and nat_mac
+    # these will be used to determine where to SSH to for ansible.
+    # Getters must be defined for every attribute specified here.
+    sync_attributes = ("private_ip", "private_mac",
+                       "mcord_ip", "mcord_mac",
+                       "nat_ip", "nat_mac",)
+
+    # default_attributes is used cleanly indicate what the default values for
+    # the fields are.
+    default_attributes = {'display_message': 'Hello MCORD!'}
+
+    def __init__(self, *args, **kwargs):
+        mcord_services = MCORDService.get_service_objects().all()
+        # When the tenant is created the default service in the form is set
+        # to be the first created HelloWorldServiceComplete
+        if mcord_services:
+            self._meta.get_field(
+                "provider_service").default = mcord_services[0].id
+        super(MCORDServiceComponent, self).__init__(*args, **kwargs)
+
+    def can_update(self, user):
+        #Allow creation of this model instances for non-admin users also
+        return True
+
+    def save(self, *args, **kwargs):
+        if not self.creator:
+            if not getattr(self, "caller", None):
+                # caller must be set when creating a monitoring channel since it creates a slice
+                raise XOSProgrammingError("ServiceComponents's self.caller was not set")
+            self.creator = self.caller
+            if not self.creator:
+                raise XOSProgrammingError("ServiceComponents's self.creator was not set")
+
+        super(MCORDServiceComponent, self).save(*args, **kwargs)
+        # This call needs to happen so that an instance is created for this
+        # tenant is created in the slice. One instance is created per tenant.
+        model_policy_mcord_servicecomponent(self.pk)
+
+    def delete(self, *args, **kwargs):
+        # Delete the instance that was created for this tenant
+        self.cleanup_container()
+        super(MCORDServiceComponent, self).delete(*args, **kwargs)
+
+    # Getter for the message that will appear on the webpage
+    # By default it is "Hello World!"
+    @property
+    def display_message(self):
+        return self.get_attribute(
+            "display_message",
+            self.default_attributes['display_message'])
+
+    # Setter for the message that will appear on the webpage
+    @display_message.setter
+    def display_message(self, value):
+        self.set_attribute("display_message", value)
+
+    @property
+    def addresses(self):
+        if (not self.id) or (not self.instance):
+            return {}
+
+        addresses = {}
+        for ns in self.instance.ports.all():
+            if "private" in ns.network.name.lower():
+                addresses["private"] = (ns.ip, ns.mac)
+            elif "nat" in ns.network.name.lower():
+                addresses["nat"] = (ns.ip, ns.mac)
+            elif "mcord_service_internal_net" in ns.network.labels.lower():
+                addresses["mcordservice"] = (ns.ip, ns.mac)
+        return addresses
+
+    # This getter is necessary because nat_ip is a sync_attribute
+    @property
+    def nat_ip(self):
+        return self.addresses.get("nat", (None, None))[0]
+
+    # This getter is necessary because nat_mac is a sync_attribute
+    @property
+    def nat_mac(self):
+        return self.addresses.get("nat", (None, None))[1]
+
+    @property
+    def private_ip(self):
+        return self.addresses.get("nat", (None, None))[0]
+
+    @property
+    def private_mac(self):
+        return self.addresses.get("nat", (None, None))[1]
+
+
+    @property
+    def mcord_ip(self):
+        return self.addresses.get("nat", (None, None))[0]
+
+    @property
+    def mcord_mac(self):
+        return self.addresses.get("nat", (None, None))[1]
+
+
+
+def model_policy_mcord_servicecomponent(pk):
+    # This section of code is atomic to prevent race conditions
+    with transaction.atomic():
+        # We find all of the tenants that are waiting to update
+        component = MCORDServiceComponent.objects.select_for_update().filter(pk=pk)
+        if not component:
+            return
+        # Since this code is atomic it is safe to always use the first tenant
+        component = component[0]
+        component.manage_container()
diff --git a/xos/services/mcordservice/templates/mcordserviceadmin.html b/xos/services/mcordservice/templates/mcordserviceadmin.html
new file mode 100644
index 0000000..001af9a
--- /dev/null
+++ b/xos/services/mcordservice/templates/mcordserviceadmin.html
@@ -0,0 +1,10 @@
+<!-- Template used to for the button leading to the HelloWorldTenantComplete form. -->
+<div class = "left-nav">
+  <ul>
+    <li>
+      <a href="/admin/mcordservice/mcordservicecomponent/">
+        MCORD Service Components
+      </a>
+    </li>
+  </ul>
+</div>
diff --git a/xos/synchronizers/helloworldservice_complete/helloworldservice_complete_private_key b/xos/synchronizers/helloworldservice_complete/helloworldservice_complete_private_key
new file mode 100644
index 0000000..427bf89
--- /dev/null
+++ b/xos/synchronizers/helloworldservice_complete/helloworldservice_complete_private_key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAr7ezZV9wU4O9F/fMOsG9Zm0kIbjLGNsL/MJXuGlqw0SRRbkx
+YipvtP+pJmCzkHmnUFCE1BMVHcnCJRfhcwabF08c+t7H5mj6GPo/QKR7seLr2IKM
+jfG3846u3k2Oo8wNfy8HJ5g+bZFhBk+Z0scYkqQFDL0IW1JtWkl+Yu2VcZsiSJCq
+j+7XjjM1QoiDCyx3p6z/jHi5K1XIFxvQaeBddm3ICau2x6ezd5wnYjPaCANuwisJ
+KgmwSvX/lr8NZkjpETME+ghMPDq7KvXFZL9MCmv8IFe2fKVzIDqHkbgcZ/W0maed
+A/2y9p55B+SQmN3PXW1EhOXHH0SNP31ZS+N5dwIDAQABAoIBAGrudaN5ItgP0WDm
+kUgoYmQUgupqlF2531+fvNYigK/36BfwDRdaD8Sr2HncWynOfn0nos2UF0ObZiRA
+lhfzqynSISahsDCNLbVJhHiIICYum6uUNoii0njLGat6sxUGtifxrH7x7Pusfsji
+ZA+azV9fpRsNZip8zMMm+lyljE4nQbVZv09aExq0Mh2n+mH6OWS43mZ1N7TxWtgd
+MmtoLBAPoMiqXlCxZOcznptVR9hY7CSG0uOQUSui44DOXOyqEI7z57eoYM1hWmng
+Ery07Qr9BbEVl4epLaEyLIGXcUsUbcQz3kbXCg0NbXHiFtr0kdIVwJXHg5M9MAsf
+fDaxJZECgYEA29oLRkI+0L9rSErteaf4spo4FJAARWbsVM3uj1gKEClUUHklI97A
+UVTcOFC7Drr/rwqfHy8fQq9ASIMDnj+FulYQSMna3SLdkgsbNSDuesbG4wp6+chQ
+uSzZP1YtaYrjMxz6s8s/zmFkqAssyOuscsOhgF16945hc63GLro4GwUCgYEAzJv4
+eqWrY6Il7p/Oir4AJxbdfO50Oj1dECsFNZ1KhtA280FslW6Za+oUfD1+Xv13XRZP
+O62IvXXJT67NOq0rKVUixPJJFXQqSRU1QljLgREM6dqr4pS4NirkaPvGwuuej8I4
+dKLqVPcNxDSAXfMwR0KQu7+IVEdvzrw5hrsgg0sCgYB21YUClQwfCViT2uxBtelX
+oMRvWObMnLVhoW4xTQUjdzN7y/+nQ9/wFk5yojB55doOY09fK7lZ8iBtEWQDRZKj
+BaIHthP3M8FQD3DFZueAtbELR77xBLWdYgCLm6kwQ0JLfn6EcHgstbgSnPe4Iqsz
+3UqOd/jflrZWMLfOyhlJgQKBgCGCRa5oZWo6yvWKjHviZAoCz6E/OB+1nwEf2omO
+Sf9MKEOsakkKxOuMeXBjbcfGwP6owa8nW2aT3LVFDm1WoOPzAm+4sklmLeqsI33L
+JwDrNu8xlcbUzlpoqeGbolCX3+7xQuevKqthjoqcgo1gX368IxHsazpKPMBhyRYM
+nWWDAoGBANOG/59508uQqZvWtByA092ARXjEUYLgNTwDo1N4kM5zgV8NETtv7qs/
+P/ze2e88sI230jzbU3iq2OGjk6S1c6LHVG9QohZPwtnwTCeKRhSG+CYHMcXSLK7D
+xf4C0kAbPsaG5F0w3vbGTTF4uuGXyijOQSXMhiG4756VaMEGvb9k
+-----END RSA PRIVATE KEY-----
diff --git a/xos/synchronizers/helloworldservice_complete/helloworldservice_private_key b/xos/synchronizers/helloworldservice_complete/helloworldservice_private_key
new file mode 100644
index 0000000..427bf89
--- /dev/null
+++ b/xos/synchronizers/helloworldservice_complete/helloworldservice_private_key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAr7ezZV9wU4O9F/fMOsG9Zm0kIbjLGNsL/MJXuGlqw0SRRbkx
+YipvtP+pJmCzkHmnUFCE1BMVHcnCJRfhcwabF08c+t7H5mj6GPo/QKR7seLr2IKM
+jfG3846u3k2Oo8wNfy8HJ5g+bZFhBk+Z0scYkqQFDL0IW1JtWkl+Yu2VcZsiSJCq
+j+7XjjM1QoiDCyx3p6z/jHi5K1XIFxvQaeBddm3ICau2x6ezd5wnYjPaCANuwisJ
+KgmwSvX/lr8NZkjpETME+ghMPDq7KvXFZL9MCmv8IFe2fKVzIDqHkbgcZ/W0maed
+A/2y9p55B+SQmN3PXW1EhOXHH0SNP31ZS+N5dwIDAQABAoIBAGrudaN5ItgP0WDm
+kUgoYmQUgupqlF2531+fvNYigK/36BfwDRdaD8Sr2HncWynOfn0nos2UF0ObZiRA
+lhfzqynSISahsDCNLbVJhHiIICYum6uUNoii0njLGat6sxUGtifxrH7x7Pusfsji
+ZA+azV9fpRsNZip8zMMm+lyljE4nQbVZv09aExq0Mh2n+mH6OWS43mZ1N7TxWtgd
+MmtoLBAPoMiqXlCxZOcznptVR9hY7CSG0uOQUSui44DOXOyqEI7z57eoYM1hWmng
+Ery07Qr9BbEVl4epLaEyLIGXcUsUbcQz3kbXCg0NbXHiFtr0kdIVwJXHg5M9MAsf
+fDaxJZECgYEA29oLRkI+0L9rSErteaf4spo4FJAARWbsVM3uj1gKEClUUHklI97A
+UVTcOFC7Drr/rwqfHy8fQq9ASIMDnj+FulYQSMna3SLdkgsbNSDuesbG4wp6+chQ
+uSzZP1YtaYrjMxz6s8s/zmFkqAssyOuscsOhgF16945hc63GLro4GwUCgYEAzJv4
+eqWrY6Il7p/Oir4AJxbdfO50Oj1dECsFNZ1KhtA280FslW6Za+oUfD1+Xv13XRZP
+O62IvXXJT67NOq0rKVUixPJJFXQqSRU1QljLgREM6dqr4pS4NirkaPvGwuuej8I4
+dKLqVPcNxDSAXfMwR0KQu7+IVEdvzrw5hrsgg0sCgYB21YUClQwfCViT2uxBtelX
+oMRvWObMnLVhoW4xTQUjdzN7y/+nQ9/wFk5yojB55doOY09fK7lZ8iBtEWQDRZKj
+BaIHthP3M8FQD3DFZueAtbELR77xBLWdYgCLm6kwQ0JLfn6EcHgstbgSnPe4Iqsz
+3UqOd/jflrZWMLfOyhlJgQKBgCGCRa5oZWo6yvWKjHviZAoCz6E/OB+1nwEf2omO
+Sf9MKEOsakkKxOuMeXBjbcfGwP6owa8nW2aT3LVFDm1WoOPzAm+4sklmLeqsI33L
+JwDrNu8xlcbUzlpoqeGbolCX3+7xQuevKqthjoqcgo1gX368IxHsazpKPMBhyRYM
+nWWDAoGBANOG/59508uQqZvWtByA092ARXjEUYLgNTwDo1N4kM5zgV8NETtv7qs/
+P/ze2e88sI230jzbU3iq2OGjk6S1c6LHVG9QohZPwtnwTCeKRhSG+CYHMcXSLK7D
+xf4C0kAbPsaG5F0w3vbGTTF4uuGXyijOQSXMhiG4756VaMEGvb9k
+-----END RSA PRIVATE KEY-----
diff --git a/xos/synchronizers/mcordservice/__init__.py b/xos/synchronizers/mcordservice/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/synchronizers/mcordservice/__init__.py
diff --git a/xos/synchronizers/mcordservice/mcordservice-synchronizer.py b/xos/synchronizers/mcordservice/mcordservice-synchronizer.py
new file mode 100644
index 0000000..95f4081
--- /dev/null
+++ b/xos/synchronizers/mcordservice/mcordservice-synchronizer.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+# Runs the standard XOS observer
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(
+    os.path.realpath(__file__)), "../../synchronizers/base")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-synchronizer")
+mod.main()
diff --git a/xos/synchronizers/mcordservice/mcordservice_config b/xos/synchronizers/mcordservice/mcordservice_config
new file mode 100644
index 0000000..56fb6b8
--- /dev/null
+++ b/xos/synchronizers/mcordservice/mcordservice_config
@@ -0,0 +1,36 @@
+# Required by XOS
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+# Required by XOS
+[api]
+nova_enabled=True
+
+# Sets options for the observer
+[observer]
+# Optional name
+name=mcordservice
+# This is the location to the dependency graph you generate
+dependency_graph=/opt/xos/synchronizers/mcordservice/model-deps
+# The location of your SyncSteps
+steps_dir=/opt/xos/synchronizers/mcordservice/steps
+# A temporary directory that will be used by ansible
+sys_dir=/opt/xos/synchronizers/mcordservice/sys
+# Location of the file to save logging messages to the backend log is often used
+logfile=/var/log/xos_backend.log
+# If this option is true, then nothing will change, we simply pretend to run
+pretend=False
+# If this is False then XOS will use an exponential backoff when the observer
+# fails, since we will be waiting for an instance, we don't want this.
+backoff_disabled=True
+# We want the output from ansible to be logged
+save_ansible_output=True
+# This determines how we SSH to a client, if this is set to True then we try
+# to ssh using the instance name as a proxy, if this is disabled we ssh using
+# the NAT IP of the instance. On CloudLab the first option will fail so we must
+# set this to False
+proxy_ssh=False
diff --git a/xos/synchronizers/mcordservice/mcordservice_private_key b/xos/synchronizers/mcordservice/mcordservice_private_key
new file mode 100644
index 0000000..427bf89
--- /dev/null
+++ b/xos/synchronizers/mcordservice/mcordservice_private_key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAr7ezZV9wU4O9F/fMOsG9Zm0kIbjLGNsL/MJXuGlqw0SRRbkx
+YipvtP+pJmCzkHmnUFCE1BMVHcnCJRfhcwabF08c+t7H5mj6GPo/QKR7seLr2IKM
+jfG3846u3k2Oo8wNfy8HJ5g+bZFhBk+Z0scYkqQFDL0IW1JtWkl+Yu2VcZsiSJCq
+j+7XjjM1QoiDCyx3p6z/jHi5K1XIFxvQaeBddm3ICau2x6ezd5wnYjPaCANuwisJ
+KgmwSvX/lr8NZkjpETME+ghMPDq7KvXFZL9MCmv8IFe2fKVzIDqHkbgcZ/W0maed
+A/2y9p55B+SQmN3PXW1EhOXHH0SNP31ZS+N5dwIDAQABAoIBAGrudaN5ItgP0WDm
+kUgoYmQUgupqlF2531+fvNYigK/36BfwDRdaD8Sr2HncWynOfn0nos2UF0ObZiRA
+lhfzqynSISahsDCNLbVJhHiIICYum6uUNoii0njLGat6sxUGtifxrH7x7Pusfsji
+ZA+azV9fpRsNZip8zMMm+lyljE4nQbVZv09aExq0Mh2n+mH6OWS43mZ1N7TxWtgd
+MmtoLBAPoMiqXlCxZOcznptVR9hY7CSG0uOQUSui44DOXOyqEI7z57eoYM1hWmng
+Ery07Qr9BbEVl4epLaEyLIGXcUsUbcQz3kbXCg0NbXHiFtr0kdIVwJXHg5M9MAsf
+fDaxJZECgYEA29oLRkI+0L9rSErteaf4spo4FJAARWbsVM3uj1gKEClUUHklI97A
+UVTcOFC7Drr/rwqfHy8fQq9ASIMDnj+FulYQSMna3SLdkgsbNSDuesbG4wp6+chQ
+uSzZP1YtaYrjMxz6s8s/zmFkqAssyOuscsOhgF16945hc63GLro4GwUCgYEAzJv4
+eqWrY6Il7p/Oir4AJxbdfO50Oj1dECsFNZ1KhtA280FslW6Za+oUfD1+Xv13XRZP
+O62IvXXJT67NOq0rKVUixPJJFXQqSRU1QljLgREM6dqr4pS4NirkaPvGwuuej8I4
+dKLqVPcNxDSAXfMwR0KQu7+IVEdvzrw5hrsgg0sCgYB21YUClQwfCViT2uxBtelX
+oMRvWObMnLVhoW4xTQUjdzN7y/+nQ9/wFk5yojB55doOY09fK7lZ8iBtEWQDRZKj
+BaIHthP3M8FQD3DFZueAtbELR77xBLWdYgCLm6kwQ0JLfn6EcHgstbgSnPe4Iqsz
+3UqOd/jflrZWMLfOyhlJgQKBgCGCRa5oZWo6yvWKjHviZAoCz6E/OB+1nwEf2omO
+Sf9MKEOsakkKxOuMeXBjbcfGwP6owa8nW2aT3LVFDm1WoOPzAm+4sklmLeqsI33L
+JwDrNu8xlcbUzlpoqeGbolCX3+7xQuevKqthjoqcgo1gX368IxHsazpKPMBhyRYM
+nWWDAoGBANOG/59508uQqZvWtByA092ARXjEUYLgNTwDo1N4kM5zgV8NETtv7qs/
+P/ze2e88sI230jzbU3iq2OGjk6S1c6LHVG9QohZPwtnwTCeKRhSG+CYHMcXSLK7D
+xf4C0kAbPsaG5F0w3vbGTTF4uuGXyijOQSXMhiG4756VaMEGvb9k
+-----END RSA PRIVATE KEY-----
diff --git a/xos/synchronizers/mcordservice/model-deps b/xos/synchronizers/mcordservice/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/synchronizers/mcordservice/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/synchronizers/mcordservice/run.sh b/xos/synchronizers/mcordservice/run.sh
new file mode 100644
index 0000000..9e8516e
--- /dev/null
+++ b/xos/synchronizers/mcordservice/run.sh
@@ -0,0 +1,3 @@
+# Runs the XOS observer using helloworldservice_config
+export XOS_DIR=/opt/xos
+python mcordservice-synchronizer.py  -C $XOS_DIR/synchronizers/mcordservice/mcordservice_config
diff --git a/xos/synchronizers/mcordservice/steps/sync_mcordservicecomponent.py b/xos/synchronizers/mcordservice/steps/sync_mcordservicecomponent.py
new file mode 100644
index 0000000..6e0590e
--- /dev/null
+++ b/xos/synchronizers/mcordservice/steps/sync_mcordservicecomponent.py
@@ -0,0 +1,37 @@
+import os
+import sys
+from django.db.models import Q, F
+from services.mcordservice.models import MCORDService, MCORDServiceComponent
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+
+parentdir = os.path.join(os.path.dirname(__file__), "..")
+sys.path.insert(0, parentdir)
+
+class SyncMCORDServiceComponent(SyncInstanceUsingAnsible):
+
+    provides = [MCORDServiceComponent]
+
+    observes = MCORDServiceComponent
+
+    requested_interval = 0
+
+    template_name = "sync_mcordservicecomponent.yaml"
+
+    service_key_name = "/opt/xos/synchronizers/mcordservice/mcordservice_private_key"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncMCORDServiceComponent, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+
+        if (not deleted):
+            objs = MCORDServiceComponent.get_tenant_objects().filter(
+                Q(enacted__lt=F('updated')) | Q(enacted=None), Q(lazy_blocked=False))
+        else:
+
+            objs = MCORDServiceComponent.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_extra_attributes(self, o):
+        return {"display_message": o.display_message}
diff --git a/xos/synchronizers/mcordservice/steps/sync_mcordservicecomponent.yaml b/xos/synchronizers/mcordservice/steps/sync_mcordservicecomponent.yaml
new file mode 100644
index 0000000..719c75f
--- /dev/null
+++ b/xos/synchronizers/mcordservice/steps/sync_mcordservicecomponent.yaml
@@ -0,0 +1,18 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  tasks:
+  - name: install apache
+    apt: name=apache2 state=present update_cache=yes
+
+  - name: write message
+    shell: echo "{{ display_message }}" > /var/www/html/index.html
+
+  - name: stop apache
+    service: name=apache2 state=stopped
+
+  - name: start apache
+    service: name=apache2 state=started
diff --git a/xos/synchronizers/mcordservice/stop.sh b/xos/synchronizers/mcordservice/stop.sh
new file mode 100644
index 0000000..9127d5f
--- /dev/null
+++ b/xos/synchronizers/mcordservice/stop.sh
@@ -0,0 +1,2 @@
+# Kill the observer
+pkill -9 -f mcordservice-synchronizer.py
diff --git a/xos/synchronizers/openstack/steps/sync_controller_networks.py b/xos/synchronizers/openstack/steps/sync_controller_networks.py
index f8b2292..df8f70b 100644
--- a/xos/synchronizers/openstack/steps/sync_controller_networks.py
+++ b/xos/synchronizers/openstack/steps/sync_controller_networks.py
@@ -40,6 +40,18 @@
             raise Exception("Invalid subnet %s" % subnet)
         return ".".join(parts[:3]) + ".1"
 
+    def alloc_start_ip(self, subnet):
+        parts = subnet.split(".")
+        if len(parts)!=4:
+            raise Exception("Invalid subnet %s" % subnet)
+        return ".".join(parts[:3]) + ".3"
+
+    def alloc_end_ip(self, subnet):
+        parts = subnet.split(".")
+        if len(parts)!=4:
+            raise Exception("Invalid subnet %s" % subnet)
+        return ".".join(parts[:3]) + ".254"
+
     def save_controller_network(self, controller_network):
         network_name = controller_network.network.name
         subnet_name = '%s-%d'%(network_name,controller_network.pk)
@@ -47,9 +59,27 @@
             # If a subnet is already specified (pass in by the creator), then
             # use that rather than auto-generating one.
             cidr = controller_network.subnet.strip()
+            print "CIDR_MS", cidr
         else:
             cidr = self.alloc_subnet(controller_network.pk)
+            print "CIDR_AMS", cidr
+
+        if controller_network.network.start_ip and controller_network.network.start_ip.strip():
+            start_ip = controller_network.network.start_ip.strip()
+            print "DEF_START_IP", start_ip
+        else:
+            start_ip = self.alloc_start_ip(cidr) 
+            print "DEF_START_AIP", start_ip
+
+        if controller_network.network.end_ip and controller_network.network.end_ip.strip():
+            end_ip = controller_network.network.end_ip.strip()
+            print "DEF_START_IP", end_ip
+        else:
+            end_ip = self.alloc_end_ip(cidr) 
+            print "DEF_END_AIP", end_ip
+        
         self.cidr=cidr
+        self.start_ip=start_ip
         slice = controller_network.network.owner
 
         network_fields = {'endpoint':controller_network.controller.auth_url,
@@ -63,6 +93,8 @@
                     'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
                     'cidr':cidr,
                     'gateway':self.alloc_gateway(cidr),
+                    'start_ip':start_ip,
+                    'end_ip':end_ip,
                     'use_vtn':getattr(Config(), "networking_use_vtn", False),
                     'delete':False
                     }
diff --git a/xos/synchronizers/openstack/steps/sync_controller_networks.yaml b/xos/synchronizers/openstack/steps/sync_controller_networks.yaml
index b885516..070a050 100644
--- a/xos/synchronizers/openstack/steps/sync_controller_networks.yaml
+++ b/xos/synchronizers/openstack/steps/sync_controller_networks.yaml
@@ -35,5 +35,7 @@
         {% endif %}
         dns_nameservers=8.8.8.8
         cidr={{ cidr }}
+        allocation_pool_start={{ start_ip }}
+        allocation_pool_end={{ end_ip }}
         {% endif %}
   {% endif %}
diff --git a/xos/tosca/MCORDService.yaml b/xos/tosca/MCORDService.yaml
new file mode 100644
index 0000000..185a314
--- /dev/null
+++ b/xos/tosca/MCORDService.yaml
@@ -0,0 +1,86 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+    tosca.nodes.MCORDComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: A Service Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+

+topology_template:

+  node_templates:

+    service_mcord:

+      type: tosca.nodes.Service

+      requirements:

+      properties:

+          kind: mcordservice

+#          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }

+#      artifacts:

+#          pubkey: /opt/xos/observers/mcord/mcord_public_key

+

+

+    Private:

+      type: tosca.nodes.NetworkTemplate

+

+    mcord_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: mcord_service_internal_net

+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    mysite:

+      type: tosca.nodes.Site

+

+    mcord-server-image-s1:

+      type: tosca.nodes.Image

+

+    trusty-server-multi-nic:

+      type: tosca.nodes.Image

+

+    mysite_mcord_slice1:

+      description: MCORD Service Slice 1

+      type: tosca.nodes.Slice

+      requirements:

+          - mcord_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+          - default_image:

+                node: trusty-server-multi-nic

+#                node: mcord-server-image-s1

+                relationship: tosca.relationships.DefaultImage

+      properties:

+          default_flavor: m1.medium

+          default_node: ip-10-0-10-125 

+    

+    my_service_mcord_component1:

+      description: MCORD Service default Component

+      type: tosca.nodes.MCORDComponent

+      requirements:

+          - provider_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - mcord_slice:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

diff --git a/xos/tosca/MCORDServiceN.yaml b/xos/tosca/MCORDServiceN.yaml
new file mode 100644
index 0000000..0229e43
--- /dev/null
+++ b/xos/tosca/MCORDServiceN.yaml
@@ -0,0 +1,200 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+    tosca.nodes.MCORDComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: A Service Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+

+topology_template:

+  node_templates:

+

+    # Setup

+    trusty-server-multi-nic:

+      type: tosca.nodes.Image

+      properties:

+         disk_format: QCOW2

+         container_format: BARE

+

+    MyDeployment:

+      type: tosca.nodes.Deployment

+      properties:

+          flavors: m1.large, m1.medium, m1.small

+      requirements:

+          - image:

+              node: trusty-server-multi-nic

+              relationship: tosca.relationships.SupportsImage

+

+    compute9:

+      type: tosca.nodes.Node

+      requirements:

+        - site:

+            node: mysite

+            relationship: tosca.relationships.MemberOfSite

+        - deployment:

+            node: MyDeployment

+            relationship: tosca.relationships.MemberOfDeployment

+

+    # SUBSCRIBER

+

+    # Let's add a user who can be administrator of the household

+    johndoe@stanford.us:

+      type: tosca.nodes.User

+      properties:

+          password: letmein

+          firstname: john

+          lastname: doe

+      requirements:

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+    

+    # A subscriber

+    Stanford:

+       type: tosca.nodes.CORDSubscriber

+       properties:

+           service_specific_id: 123

+           firewall_enable: false

+           cdn_enable: false

+           url_filter_enable: false

+           url_filter_level: R

+       requirements:

+          - house_admin:

+              node: johndoe@stanford.us

+              relationship: tosca.relationships.AdminPrivilege

+

+    Barbera Lapinski:

+       type: tosca.nodes.CORDUser

+       properties:

+           mac: 01:02:03:04:05:06

+           level: PG_13

+       requirements:

+           - household:

+               node: Stanford

+               relationship: tosca.relationships.SubscriberDevice

+

+    Norbert Shumway:

+       type: tosca.nodes.CORDUser

+       properties:

+           mac: 90:E2:BA:82:F9:75

+           level: PG_13

+       requirements:

+           - household:

+               node: Stanford

+               relationship: tosca.relationships.SubscriberDevice

+

+    Fay Muldoon:

+       type: tosca.nodes.CORDUser

+       properties:

+           mac: 68:5B:35:9D:91:D5

+           level: PG_13

+       requirements:

+           - household:

+               node: Stanford

+               relationship: tosca.relationships.SubscriberDevice

+

+    Janene Earnest:

+       type: tosca.nodes.CORDUser

+       properties:

+           mac: 34:36:3B:C9:B6:A6

+           level: PG_13

+       requirements:

+           - household:

+               node: Stanford

+               relationship: tosca.relationships.SubscriberDevice

+

+

+    Topology:

+      type: tosca.nodes.DashboardView

+      properties:

+          url: template:xosMcordTopology

+

+    padmin@vicci.org:

+      type: tosca.nodes.User

+      properties:

+          firstname: XOS

+          lastname: admin

+          is_admin: true

+      requirements:

+          - mcord_dashboard:

+              node: Topology

+              relationship: tosca.relationships.UsesDashboard

+

+    # SERIVCES

+    service_mcord:

+      type: tosca.nodes.Service

+      requirements:

+      properties:

+          view_url: /admin/mcordservice/

+          kind: mcordservice

+

+    # Network Templates

+    Private:

+      type: tosca.nodes.NetworkTemplate

+

+    # Network

+    mcord_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: mcord_service_internal_net

+          cidr: 172.16.16.0/24

+          start_ip: 172.16.16.2

+          end_ip: 172.16.16.5

+          gateway_ip: 172.16.16.1

+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    mysite:

+      type: tosca.nodes.Site

+

+

+    ubuntu-14.04-server-cloudimg-amd64-disk1:

+      type: tosca.nodes.Image

+

+    mysite_mcord_slice1:

+      description: MCORD Service Slice 1

+      type: tosca.nodes.Slice

+      requirements:

+          - mcord_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+          - default_image:

+                node: ubuntu-14.04-server-cloudimg-amd64-disk1 

+#                node: mcord-server-image-s1

+                relationship: tosca.relationships.DefaultImage

+      properties:

+          default_flavor: m1.medium

+          default_node: compute9 

+

+    my_service_mcord_component1:

+      description: MCORD Service default Component

+      type: tosca.nodes.MCORDComponent

+      requirements:

+          - provider_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - mcord_slice:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

diff --git a/xos/tosca/custom_types/xos.m4 b/xos/tosca/custom_types/xos.m4
index 15e9710..e811dd7 100644
--- a/xos/tosca/custom_types/xos.m4
+++ b/xos/tosca/custom_types/xos.m4
@@ -130,9 +130,6 @@
             no_container:
                 type: boolean
                 default: false
-            node_key:
-                type: string
-                required: false
 
 
     tosca.nodes.ONOSApp:
@@ -205,9 +202,9 @@
                 type: string
                 required: false
 
-    tosca.nodes.VSGService:
+    tosca.nodes.VCPEService:
         description: >
-            CORD: The vSG Service.
+            CORD: The vCPE Service.
         derived_from: tosca.nodes.Root
         capabilities:
             xos_base_service_caps
@@ -433,7 +430,6 @@
             This is a variant of the TOSCA Network object that includes additional

             XOS-specific properties.

           properties:

-            xos_base_props

             ip_version:

               type: integer

               required: no

@@ -535,17 +531,6 @@
                 required: false
                 description: Comma-separated list of flavors that this deployment supports.
 
-    tosca.nodes.AddressPool:
-        derived_from: tosca.nodes.Root
-        description: >
-            A pool of addresses
-        properties:
-            xos_base_props
-            addresses:
-                type: string
-                required: false
-                description: space-separated list of addresses
-
     tosca.nodes.Image:
         derived_from: tosca.nodes.Root
         description: >
@@ -695,6 +680,10 @@
                 type: string
                 required: false
                 description: default flavor to use for slice
+            default_node:
+                type: string
+                required: false
+                description: default node to use for this slice
             network:
                 type: string
                 required: false
diff --git a/xos/tosca/custom_types/xos.yaml b/xos/tosca/custom_types/xos.yaml
index 88b3388..be5ab7d 100644
--- a/xos/tosca/custom_types/xos.yaml
+++ b/xos/tosca/custom_types/xos.yaml
@@ -160,9 +160,6 @@
             no_container:
                 type: boolean
                 default: false
-            node_key:
-                type: string
-                required: false
 
 
     tosca.nodes.ONOSApp:
@@ -263,9 +260,9 @@
                 type: string
                 required: false
 
-    tosca.nodes.VSGService:
+    tosca.nodes.VCPEService:
         description: >
-            CORD: The vSG Service.
+            CORD: The vCPE Service.
         derived_from: tosca.nodes.Root
         capabilities:
             scalable:
@@ -646,18 +643,6 @@
             This is a variant of the TOSCA Network object that includes additional

             XOS-specific properties.

           properties:

-            no-delete:
-                type: boolean
-                default: false
-                description: Do not allow Tosca to delete this object
-            no-create:
-                type: boolean
-                default: false
-                description: Do not allow Tosca to create this object
-            no-update:
-                type: boolean
-                default: false
-                description: Do not allow Tosca to update this object

             ip_version:

               type: integer

               required: no

@@ -770,28 +755,6 @@
                 required: false
                 description: Comma-separated list of flavors that this deployment supports.
 
-    tosca.nodes.AddressPool:
-        derived_from: tosca.nodes.Root
-        description: >
-            A pool of addresses
-        properties:
-            no-delete:
-                type: boolean
-                default: false
-                description: Do not allow Tosca to delete this object
-            no-create:
-                type: boolean
-                default: false
-                description: Do not allow Tosca to create this object
-            no-update:
-                type: boolean
-                default: false
-                description: Do not allow Tosca to update this object
-            addresses:
-                type: string
-                required: false
-                description: space-separated list of addresses
-
     tosca.nodes.Image:
         derived_from: tosca.nodes.Root
         description: >
@@ -974,6 +937,10 @@
                 type: string
                 required: false
                 description: default flavor to use for slice
+            default_node:
+                type: string
+                required: false
+                description: default node to use for this slice
             network:
                 type: string
                 required: false
diff --git a/xos/tosca/resources/mcordcomponent.py b/xos/tosca/resources/mcordcomponent.py
new file mode 100644
index 0000000..401dba3
--- /dev/null
+++ b/xos/tosca/resources/mcordcomponent.py
@@ -0,0 +1,39 @@
+import os
+import pdb
+import sys
+import tempfile
+sys.path.append("/opt/tosca")
+from translator.toscalib.tosca_template import ToscaTemplate
+import pdb
+
+from services.mcordservice.models import MCORDServiceComponent, MCORDService
+
+from xosresource import XOSResource
+
+class XOSMCORDComponent(XOSResource):
+    provides = "tosca.nodes.MCORDComponent"
+    xos_model = MCORDServiceComponent
+    name_field = None
+
+    def get_xos_args(self, throw_exception=True):
+        args = super(XOSMCORDComponent, self).get_xos_args()
+
+        provider_name = self.get_requirement("tosca.relationships.MemberOfService", throw_exception=throw_exception)
+        if provider_name:
+            args["provider_service"] = self.get_xos_object(MCORDService, throw_exception=throw_exception, name=provider_name)
+
+        return args
+
+    def get_existing_objs(self):
+        args = self.get_xos_args(throw_exception=False)
+        provider_service = args.get("provider", None)
+        if provider_service:
+            return [ self.get_xos_object(provider_service=provider_service) ]
+        return []
+
+    def postprocess(self, obj):
+        pass
+
+    def can_delete(self, obj):
+        return super(XOSMCORDComponent, self).can_delete(obj)
+
diff --git a/xos/tosca/resources/network.py b/xos/tosca/resources/network.py
index 7b513c3..2d22bf0 100644
--- a/xos/tosca/resources/network.py
+++ b/xos/tosca/resources/network.py
@@ -42,6 +42,20 @@
         cidr = self.get_property_default("cidr", None)
         if cidr:
             args["subnet"] = cidr
+        print "DEF_RES_CIDR", cidr 
+
+        start_ip = self.get_property_default("start_ip", None)
+        if start_ip:
+            args["start_ip"] = start_ip 
+        print "DEF_RES_IP", start_ip 
+
+        end_ip = self.get_property_default("end_ip", None)
+        if end_ip:
+            args["end_ip"] = end_ip 
+
+#        default_ = self.get_property_default("gateway_ip", None)
+#        if gateway_ip:
+#            args["gateway_ip"] = gateway_ip
 
         return args
 
diff --git a/xos/tosca/resources/slice.py b/xos/tosca/resources/slice.py
index 48e5eb0..724957f 100644
--- a/xos/tosca/resources/slice.py
+++ b/xos/tosca/resources/slice.py
@@ -5,7 +5,7 @@
 sys.path.append("/opt/tosca")
 from translator.toscalib.tosca_template import ToscaTemplate
 
-from core.models import Slice,User,Site,Network,NetworkSlice,SliceRole,SlicePrivilege,Service,Image,Flavor
+from core.models import Slice,User,Site,Network,NetworkSlice,SliceRole,SlicePrivilege,Service,Image,Flavor,Node
 
 from xosresource import XOSResource
 
@@ -36,6 +36,11 @@
             default_flavor = self.get_xos_object(Flavor, name=default_flavor_name, throw_exception=True)
             args["default_flavor"] = default_flavor
 
+        default_node_name = self.get_property_default("default_node", None)
+        if default_node_name:
+            default_node = self.get_xos_object(Node, name=default_node_name, throw_exception=True)
+            args["default_node"] = default_node
+
         return args
 
     def postprocess(self, obj):
diff --git a/xos/tosca/vBBU.yaml b/xos/tosca/vBBU.yaml
new file mode 100644
index 0000000..69aacb8
--- /dev/null
+++ b/xos/tosca/vBBU.yaml
@@ -0,0 +1,148 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+    tosca.nodes.MCORDComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: A Service Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+

+topology_template:

+  node_templates:

+    service_mcord_bbu:

+      type: tosca.nodes.Service

+      requirements:

+      properties:

+          view_url: /admin/mcordservice/

+          kind: mcordservice

+

+

+    Private:

+      type: tosca.nodes.NetworkTemplate

+

+    net-BBU1-private:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: vBBU1_internal_net

+          cidr: 192.168.6.0/24

+          start_ip: 192.168.6.20

+          end_ip: 192.168.6.25

+          gateway_ip: 192.168.6.1

+      
+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_bbu_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_bbu_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    net-BBU1-private2:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: vBBU1_internal_net2

+          cidr: 192.168.7.0/24

+          start_ip: 192.168.7.20

+          end_ip: 192.168.7.25

+          gateway_ip: 192.168.7.1

+      
+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_bbu_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_bbu_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    net-BBU1-private3:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: vBBU1_internal_net3

+          cidr: 192.168.8.0/24

+          start_ip: 192.168.8.20

+          end_ip: 192.168.8.25

+          gateway_ip: 192.168.168.1

+      
+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_bbu_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_bbu_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    net-Internet:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: Internet

+          cidr: 10.128.13.0/24

+          start_ip: 10.128.13.20

+          end_ip: 10.128.13.25

+          gateway_ip: 10.128.13.1

+

+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_bbu_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_bbu_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    mysite:

+      type: tosca.nodes.Site

+

+    ubuntu-arm64-bbu-snapshot.img:

+      type: tosca.nodes.Image

+

+    mysite_mcord_bbu_slice1:

+      description: MCORD BBU Service Slice 1

+      type: tosca.nodes.Slice

+      requirements:

+          - mcord_bbu_service:

+              node: service_mcord_bbu

+              relationship: tosca.relationships.MemberOfService

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+          - default_image:

+                node: ubuntu-arm64-bbu-snapshot.img 

+                relationship: tosca.relationships.DefaultImage

+      properties:

+          default_flavor: m1.xlarge

+          default_node: computeBBU1

+

+    mcord_service_bbu_component1:

+      description: MCORD Service BBU Component

+      type: tosca.nodes.MCORDComponent

+      requirements:

+          - provider_service:

+              node: service_mcord_bbu

+              relationship: tosca.relationships.MemberOfService

+          - mcord_slice:

+              node: mysite_mcord_bbu_slice1

+              relationship: tosca.relationships.MemberOfSlice

diff --git a/xos/xos/settings.py b/xos/xos/settings.py
index 04b54aa..a1c2f60 100644
--- a/xos/xos/settings.py
+++ b/xos/xos/settings.py
@@ -177,6 +177,7 @@
     'services.hpc',
     'services.cord',
     'services.helloworldservice_complete',
+    'services.mcordservice',
     'services.onos',
     'services.ceilometer',
     'services.requestrouter',
diff --git a/xos/xos_configuration/xos_common_config b/xos/xos_configuration/xos_common_config
index 2855816..3b5177d 100755
--- a/xos/xos_configuration/xos_common_config
+++ b/xos/xos_configuration/xos_common_config
@@ -43,3 +43,6 @@
 branding_name=Open Cloud
 branding_icon=/static/logo.png
 branding_favicon=/static/favicon.png
+
+[networking]
+use_vtn=True