mcord merged into master-branch
diff --git a/.gitignore b/.gitignore
index 7813bf4..9095902 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,7 @@
 profile
 *.moved-aside
 .idea
+setup/*
 xos/configurations/frontend/Dockerfile
 xos/core/xoslib/karma-*
 xos/core/xoslib/docs
diff --git a/views/ngXosViews/mcordTopology/.bowerrc b/views/ngXosViews/mcordTopology/.bowerrc
new file mode 100644
index 0000000..e491038
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/.bowerrc
@@ -0,0 +1,3 @@
+{
+  "directory": "src/vendor/"
+}
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/.eslintrc b/views/ngXosViews/mcordTopology/.eslintrc
new file mode 100644
index 0000000..c852748
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/.eslintrc
@@ -0,0 +1,42 @@
+{
+    "ecmaFeatures": {
+        "blockBindings": true,
+        "forOf": true,
+        "destructuring": true,
+        "arrowFunctions": true,
+        "templateStrings": true
+    },
+    "env": { 
+        "browser": true,
+        "node": true,
+        "es6": true
+    },
+    "plugins": [
+        //"angular"
+    ],
+    "rules": {
+        "quotes": [2, "single"],
+        "camelcase": [1, {"properties": "always"}],
+        "no-underscore-dangle": 1,
+        "eqeqeq": [2, "smart"],
+        "no-alert": 1,
+        "key-spacing": [1, { "beforeColon": false, "afterColon": true }],
+        "indent": [2, 2],
+        "no-irregular-whitespace": 1,
+        "eol-last": 0,
+        "max-nested-callbacks": [2, 4],
+        "comma-spacing": [1, {"before": false, "after": true}],
+        "no-trailing-spaces": [1, { skipBlankLines: true }],
+        "no-unused-vars": [1, {"vars": "all", "args": "after-used"}],
+        "new-cap": 0,
+
+        //"angular/ng_module_name": [2, '/^xos\.*[a-z]*$/'],
+        //"angular/ng_controller_name": [2, '/^[a-z].*Ctrl$/'],
+        //"angular/ng_service_name": [2, '/^[A-Z].*Service$/'],
+        //"angular/ng_directive_name": [2, '/^[a-z]+[[A-Z].*]*$/'],
+        //"angular/ng_di": [0, "function or array"]
+    },
+    "globals" :{
+        "angular": true
+    } 
+}
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/.gitignore b/views/ngXosViews/mcordTopology/.gitignore
new file mode 100644
index 0000000..567aee4
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/.gitignore
@@ -0,0 +1,6 @@
+dist/
+src/vendor
+.tmp
+node_modules
+npm-debug.log
+dist/
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/bower.json b/views/ngXosViews/mcordTopology/bower.json
new file mode 100644
index 0000000..cb9b543
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/bower.json
@@ -0,0 +1,30 @@
+{
+  "name": "xos-mcordTopology",
+  "version": "0.0.0",
+  "authors": [
+    "Matteo Scandolo <teo@onlab.us>"
+  ],
+  "description": "The mcordTopology view",
+  "license": "MIT",
+  "ignore": [
+    "**/.*",
+    "node_modules",
+    "bower_components",
+    "static/js/vendor/",
+    "test",
+    "tests"
+  ],
+  "dependencies": {
+    "d3": "~3.5.16"
+  },
+  "devDependencies": {
+    "jquery": "2.1.4",
+    "angular-mocks": "1.4.7",
+    "angular": "1.4.7",
+    "angular-ui-router": "0.2.15",
+    "angular-cookies": "1.4.7",
+    "angular-resource": "1.4.7",
+    "ng-lodash": "0.3.0",
+    "bootstrap-css": "2.3.2"
+  }
+}
diff --git a/views/ngXosViews/mcordTopology/env/default.js b/views/ngXosViews/mcordTopology/env/default.js
new file mode 100644
index 0000000..df86216
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/env/default.js
@@ -0,0 +1,13 @@
+// This is a default configuration for your development environment.
+// You can duplicate this configuration for any of your Backend Environments.
+// Different configurations are loaded setting a NODE_ENV variable that contain the config file name.
+// `NODE_ENV=local npm start`
+//
+// If xoscsrftoken or xossessionid are not specified the browser value are used
+// (works only for local environment as both application are served on the same domain)
+
+module.exports = {
+  host: 'http://clnode067.clemson.cloudlab.us:9999/',
+  xoscsrftoken: 'Pkq9PqoAsaMvrEiFAgxfw47IxTOtd0Y5',
+  xossessionid: 'qa1t49qeecdehofjkndqvxik71iwzfvf'
+};
diff --git a/views/ngXosViews/mcordTopology/env/mock.js b/views/ngXosViews/mcordTopology/env/mock.js
new file mode 100644
index 0000000..610ad78
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/env/mock.js
@@ -0,0 +1,5 @@
+module.exports = {
+  host: 'http://localhost:4000',
+  xoscsrftoken: 'Pkq9PqoAsaMvrEiFAgxfw47IxTOtd0Y5',
+  xossessionid: 'qa1t49qeecdehofjkndqvxik71iwzfvf'
+};
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/gulp/build.js b/views/ngXosViews/mcordTopology/gulp/build.js
new file mode 100644
index 0000000..80ba4be
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/gulp/build.js
@@ -0,0 +1,147 @@
+'use strict';
+
+// BUILD
+//
+// The only purpose of this gulpfile is to build a XOS view and copy the correct files into
+// .html => dashboards
+// .js (minified and concat) => static/js
+//
+// The template are parsed and added to js with angular $templateCache
+
+var gulp = require('gulp');
+var ngAnnotate = require('gulp-ng-annotate');
+var uglify = require('gulp-uglify');
+var templateCache = require('gulp-angular-templatecache');
+var runSequence = require('run-sequence');
+var concat = require('gulp-concat-util');
+var del = require('del');
+var wiredep = require('wiredep');
+var angularFilesort = require('gulp-angular-filesort');
+var _ = require('lodash');
+var eslint = require('gulp-eslint');
+var inject = require('gulp-inject');
+var rename = require('gulp-rename');
+var replace = require('gulp-replace');
+var postcss = require('gulp-postcss');
+var autoprefixer = require('autoprefixer');
+var mqpacker = require('css-mqpacker');
+var csswring = require('csswring');
+
+var TEMPLATE_FOOTER = `
+angular.module('xos.mcordTopology').run(function($location){$location.path('/')});
+angular.bootstrap(angular.element('#xosMcordTopology'), ['xos.mcordTopology']);`;
+
+module.exports = function(options){
+  
+  // delete previous builded file
+  gulp.task('clean', function(){
+    return del(
+      [options.dashboards + 'xosMcordTopology.html'],
+      {force: true}
+    );
+  });
+
+  // minify css
+  gulp.task('css', function () {
+    var processors = [
+      autoprefixer({browsers: ['last 1 version']}),
+      mqpacker,
+      csswring
+    ];
+    gulp.src([
+      `${options.css}**/*.css`,
+      `!${options.css}dev.css`
+    ])
+    .pipe(postcss(processors))
+    .pipe(gulp.dest(options.tmp + '/css/'));
+  });
+
+  gulp.task('copyCss', ['css'], function(){
+    return gulp.src([`${options.tmp}/css/*.css`])
+    .pipe(concat('xosMcordTopology.css'))
+    .pipe(gulp.dest(options.static + 'css/'))
+  });
+
+  // compile and minify scripts
+  gulp.task('scripts', function() {
+    return gulp.src([
+      options.tmp + '**/*.js'
+    ])
+    .pipe(ngAnnotate())
+    .pipe(angularFilesort())
+    .pipe(concat('xosMcordTopology.js'))
+    .pipe(concat.header('//Autogenerated, do not edit!!!\n'))
+    .pipe(concat.footer(TEMPLATE_FOOTER))
+    // .pipe(uglify())
+    .pipe(gulp.dest(options.static + 'js/'));
+  });
+
+  // set templates in cache
+  gulp.task('templates', function(){
+    return gulp.src('./src/templates/*.html')
+      .pipe(templateCache({
+        module: 'xos.mcordTopology',
+        root: 'templates/',
+        templateFooter: TEMPLATE_FOOTER
+      }))
+      .pipe(gulp.dest(options.tmp));
+  });
+
+  // copy html index to Django Folder
+  gulp.task('copyHtml', ['clean'], function(){
+    return gulp.src(options.src + 'index.html')
+      // remove dev dependencies from html
+      .pipe(replace(/<!-- bower:css -->(\n.*)*\n<!-- endbower --><!-- endcss -->/, ''))
+      .pipe(replace(/<!-- bower:js -->(\n.*)*\n<!-- endbower --><!-- endjs -->/, ''))
+      .pipe(replace(/ng-app=".*"\s/, ''))
+      // injecting minified files
+      .pipe(
+        inject(
+          gulp.src([
+            options.static + 'js/vendor/xosMcordTopologyVendor.js',
+            options.static + 'js/xosMcordTopology.js',
+            options.static + 'css/xosMcordTopology.css'
+          ]),
+          {ignorePath: '/../../../xos/core/xoslib'}
+        )
+      )
+      .pipe(rename('xosMcordTopology.html'))
+      .pipe(gulp.dest(options.dashboards));
+  });
+
+  // minify vendor js files
+  gulp.task('wiredep', function(){
+    var bowerDeps = wiredep().js;
+    if(!bowerDeps){
+      return;
+    }
+
+    // remove angular (it's already loaded)
+    _.remove(bowerDeps, function(dep){
+      return dep.indexOf('angular/angular.js') !== -1;
+    });
+
+    return gulp.src(bowerDeps)
+      .pipe(concat('xosMcordTopologyVendor.js'))
+      .pipe(uglify())
+      .pipe(gulp.dest(options.static + 'js/vendor/'));
+  });
+
+  gulp.task('lint', function () {
+    return gulp.src(['src/js/**/*.js'])
+      .pipe(eslint())
+      .pipe(eslint.format())
+      .pipe(eslint.failAfterError());
+  });
+
+  gulp.task('build', function() {
+    runSequence(
+      'templates',
+      'copyCss',
+      'babel',
+      'scripts',
+      'wiredep',
+      'copyHtml'
+    );
+  });
+};
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/gulp/server.js b/views/ngXosViews/mcordTopology/gulp/server.js
new file mode 100644
index 0000000..162bc97
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/gulp/server.js
@@ -0,0 +1,154 @@
+'use strict';
+
+var gulp = require('gulp');
+var browserSync = require('browser-sync').create();
+var inject = require('gulp-inject');
+var runSequence = require('run-sequence');
+var angularFilesort = require('gulp-angular-filesort');
+var babel = require('gulp-babel');
+var wiredep = require('wiredep').stream;
+var httpProxy = require('http-proxy');
+var del = require('del');
+
+const environment = process.env.NODE_ENV;
+
+if (environment){
+  var conf = require(`../env/${environment}.js`);
+}
+else{
+  var conf = require('../env/default.js')
+}
+
+var proxy = httpProxy.createProxyServer({
+  target: conf.host || 'http://0.0.0.0:9999'
+});
+
+var traffic = httpProxy.createProxyServer({
+  target: 'http://10.128.13.3'
+});
+
+
+proxy.on('error', function(error, req, res) {
+  res.writeHead(500, {
+    'Content-Type': 'text/plain'
+  });
+
+  console.error('[Proxy]', error);
+});
+
+module.exports = function(options){
+
+  // open in browser with sync and proxy to 0.0.0.0
+  gulp.task('browser', function() {
+    browserSync.init({
+      // reloadDelay: 500,
+      // logLevel: 'debug',
+      // logConnections: true,
+      startPath: '#/',
+      snippetOptions: {
+        rule: {
+          match: /<!-- browserSync -->/i
+        }
+      },
+      server: {
+        baseDir: options.src,
+        routes: {
+          '/api': options.api,
+          '/xosHelpers/src': options.helpers
+        },
+        middleware: function(req, res, next){
+          if(
+            req.url.indexOf('/xos/') !== -1 ||
+            req.url.indexOf('/xoslib/') !== -1 ||
+            req.url.indexOf('/hpcapi/') !== -1
+          ){
+            if(conf.xoscsrftoken && conf.xossessionid){
+              req.headers.cookie = `xoscsrftoken=${conf.xoscsrftoken}; xossessionid=${conf.xossessionid}`;
+              req.headers['x-csrftoken'] = conf.xoscsrftoken;
+            }
+            proxy.web(req, res);
+          }
+          else if(req.url.indexOf('videoLocal') !== -1){
+            console.log('traffic: ', req.url);
+            traffic.web(req, res);
+          }
+          else{
+            next();
+          }
+        }
+      }
+    });
+
+    gulp.watch(options.src + 'js/**/*.js', ['js-watch']);
+    gulp.watch(options.src + 'vendor/**/*.js', ['bower'], function(){
+      browserSync.reload();
+    });
+    gulp.watch(options.src + '**/*.html', function(){
+      browserSync.reload();
+    });
+  });
+
+  // transpile js with sourceMaps
+  gulp.task('babel', function(){
+    return gulp.src(options.scripts + '**/*.js')
+      .pipe(babel({sourceMaps: true}))
+      .pipe(gulp.dest(options.tmp));
+  });
+
+  // inject scripts
+  gulp.task('injectScript', ['cleanTmp', 'babel'], function(){
+    return gulp.src(options.src + 'index.html')
+      .pipe(
+        inject(
+          gulp.src([
+            options.tmp + '**/*.js',
+            options.api + '*.js',
+            options.helpers + '**/*.js'
+          ])
+          .pipe(angularFilesort()),
+          {
+            ignorePath: [options.src, '/../../ngXosLib']
+          }
+        )
+      )
+      .pipe(gulp.dest(options.src));
+  });
+
+  // inject CSS
+  gulp.task('injectCss', function(){
+    return gulp.src(options.src + 'index.html')
+      .pipe(
+        inject(
+          gulp.src(options.src + 'css/*.css'),
+          {
+            ignorePath: [options.src]
+          }
+          )
+        )
+      .pipe(gulp.dest(options.src));
+  });
+
+  // inject bower dependencies with wiredep
+  gulp.task('bower', function () {
+    return gulp.src(options.src + 'index.html')
+    .pipe(wiredep({devDependencies: true}))
+    .pipe(gulp.dest(options.src));
+  });
+
+  gulp.task('js-watch', ['injectScript'], function(){
+    browserSync.reload();
+  });
+
+  gulp.task('cleanTmp', function(){
+    return del([options.tmp + '**/*']);
+  });
+
+  gulp.task('serve', function() {
+    runSequence(
+      'bower',
+      'injectScript',
+      'injectCss',
+      ['browser']
+    );
+  });
+};
diff --git a/views/ngXosViews/mcordTopology/gulpfile.js b/views/ngXosViews/mcordTopology/gulpfile.js
new file mode 100644
index 0000000..a3523ee
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/gulpfile.js
@@ -0,0 +1,25 @@
+'use strict';
+
+var gulp = require('gulp');
+var wrench = require('wrench');
+
+var options = {
+  src: 'src/',
+  css: 'src/css/',
+  scripts: 'src/js/',
+  tmp: 'src/.tmp',
+  dist: 'dist/',
+  api: '../../ngXosLib/api/',
+  helpers: '../../ngXosLib/xosHelpers/src/',
+  static: '../../../xos/core/xoslib/static/', // this is the django static folder
+  dashboards: '../../../xos/core/xoslib/dashboards/' // this is the django html folder
+};
+
+wrench.readdirSyncRecursive('./gulp')
+.map(function(file) {
+  require('./gulp/' + file)(options);
+});
+
+gulp.task('default', function () {
+  gulp.start('build');
+});
diff --git a/views/ngXosViews/mcordTopology/karma.conf.js b/views/ngXosViews/mcordTopology/karma.conf.js
new file mode 100644
index 0000000..83d3f63
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/karma.conf.js
@@ -0,0 +1,88 @@
+// Karma configuration
+// Generated on Tue Oct 06 2015 09:27:10 GMT+0000 (UTC)
+
+/* eslint indent: [2,2], quotes: [2, "single"]*/
+
+/*eslint-disable*/
+var wiredep = require('wiredep');
+var path = require('path');
+
+var bowerComponents = wiredep( {devDependencies: true} )[ 'js' ].map(function( file ){
+  return path.relative(process.cwd(), file);
+});
+
+module.exports = function(config) {
+/*eslint-enable*/
+  config.set({
+
+    // base path that will be used to resolve all patterns (eg. files, exclude)
+    basePath: '',
+
+
+    // frameworks to use
+    // available frameworks: https://npmjs.org/browse/keyword/karma-adapter
+    frameworks: ['jasmine'],
+
+
+    // list of files / patterns to load in the browser
+    files: bowerComponents.concat([
+      '../../static/js/xosApi.js',
+      '../../static/js/vendor/ngXosHelpers.js',
+      'src/js/**/*.js',
+      'spec/**/*.mock.js',
+      'spec/**/*.test.js',
+      'src/**/*.html'
+    ]),
+
+
+    // list of files to exclude
+    exclude: [
+    ],
+
+
+    // preprocess matching files before serving them to the browser
+    // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor
+    preprocessors: {
+      'src/js/**/*.js': ['babel'],
+      'spec/**/*.test.js': ['babel'],
+      'src/**/*.html': ['ng-html2js']
+    },
+
+    ngHtml2JsPreprocessor: {
+      stripPrefix: 'src/', //strip the src path from template url (http://stackoverflow.com/questions/22869668/karma-unexpected-request-when-testing-angular-directive-even-with-ng-html2js)
+      moduleName: 'templates' // define the template module name
+    },
+
+    // test results reporter to use
+    // possible values: 'dots', 'progress'
+    // available reporters: https://npmjs.org/browse/keyword/karma-reporter
+    reporters: ['mocha'],
+
+
+    // web server port
+    port: 9876,
+
+
+    // enable / disable colors in the output (reporters and logs)
+    colors: true,
+
+
+    // level of logging
+    // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
+    logLevel: config.LOG_INFO,
+
+
+    // enable / disable watching file and executing tests whenever any file changes
+    autoWatch: true,
+
+
+    // start these browsers
+    // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
+    browsers: ['PhantomJS'],
+
+
+    // Continuous Integration mode
+    // if true, Karma captures browsers, runs the tests and exits
+    singleRun: false
+  });
+};
diff --git a/views/ngXosViews/mcordTopology/mocks/data/instances.json b/views/ngXosViews/mcordTopology/mocks/data/instances.json
new file mode 100644
index 0000000..0f86876
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/mocks/data/instances.json
@@ -0,0 +1,29 @@
+[
+  {
+    "id": 1,
+    "humanReadableName": "BBU_service_instance-1",
+    "instance_id": "instance-00000001", 
+    "instance_uuid": "42b75cb7-7205-4a68-9100-b2c1e3ea69b5", 
+    "name": "BBU_service_instance-1", 
+    "instance_name": "mysite_BBU-1", 
+    "ip": "130.127.133.91"
+  },
+  {
+    "id": 2,
+    "humanReadableName": "BBU_service_instance-2",
+    "instance_id": "instance-00000002", 
+    "instance_uuid": "42b75cb7-7205-4a68-9200-b2c2e3ea69b5", 
+    "name": "BBU_service_instance-2", 
+    "instance_name": "mysite_BBU-2", 
+    "ip": "130.127.133.92"
+  },
+  {
+    "id": 5,
+    "humanReadableName": "vsg_service_instance-4",
+    "instance_id": "instance-00000004", 
+    "instance_uuid": "44b75cb7-7405-4a68-9400-b4c4e3ea69b5", 
+    "name": "vsg_service_instance-4", 
+    "instance_name": "mysite_vsg-4", 
+    "ip": "130.127.133.94"
+  }
+]
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/mocks/mcord.conf.json b/views/ngXosViews/mcordTopology/mocks/mcord.conf.json
new file mode 100644
index 0000000..7246775
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/mocks/mcord.conf.json
@@ -0,0 +1,8 @@
+[
+  {
+    "url": "instances",
+    "base": "xos/",
+    "methods": ["GET", "POST"],
+    "param": "id"
+  }
+]
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/package.json b/views/ngXosViews/mcordTopology/package.json
new file mode 100644
index 0000000..9826895
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/package.json
@@ -0,0 +1,53 @@
+{
+  "name": "xos-mcordTopology",
+  "version": "1.0.0",
+  "description": "Angular Application for XOS, created with generator-xos",
+  "scripts": {
+    "prestart": "npm install && bower install",
+    "start": "gulp serve",
+    "prebuild": "npm install && bower install",
+    "build": "gulp",
+    "test": "karma start",
+    "lint": "eslint src/js/",
+    "mocks": "easy-mocker -c ./mocks/mcord.conf.json -d ./mocks/data",
+    "dev": "NODE_ENV=mock gulp serve"
+  },
+  "keywords": [
+    "XOS",
+    "Angular",
+    "XOSlib"
+  ],
+  "author": "Matteo Scandolo",
+  "license": "MIT",
+  "dependencies": {},
+  "devDependencies": {
+    "autoprefixer": "^6.3.3",
+    "browser-sync": "^2.9.11",
+    "css-mqpacker": "^4.0.0",
+    "csswring": "^4.2.1",
+    "del": "^2.0.2",
+    "easy-mocker": "^1.2.0",
+    "eslint": "^1.8.0",
+    "eslint-plugin-angular": "linkmesrl/eslint-plugin-angular",
+    "gulp": "^3.9.0",
+    "gulp-angular-filesort": "^1.1.1",
+    "gulp-angular-templatecache": "^1.8.0",
+    "gulp-babel": "^5.3.0",
+    "gulp-concat": "^2.6.0",
+    "gulp-concat-util": "^0.5.5",
+    "gulp-eslint": "^1.0.0",
+    "gulp-inject": "^3.0.0",
+    "gulp-minify-html": "^1.0.4",
+    "gulp-ng-annotate": "^1.1.0",
+    "gulp-postcss": "^6.0.1",
+    "gulp-rename": "^1.2.2",
+    "gulp-replace": "^0.5.4",
+    "gulp-uglify": "^1.4.2",
+    "http-proxy": "^1.12.0",
+    "lodash": "^3.10.1",
+    "proxy-middleware": "^0.15.0",
+    "run-sequence": "^1.1.4",
+    "wiredep": "^3.0.0-beta",
+    "wrench": "^1.5.8"
+  }
+}
diff --git a/views/ngXosViews/mcordTopology/spec/sample.test.js b/views/ngXosViews/mcordTopology/spec/sample.test.js
new file mode 100644
index 0000000..ecc17a0
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/spec/sample.test.js
@@ -0,0 +1,37 @@
+'use strict';
+
+describe('The User List', () => {
+  
+  var scope, element, isolatedScope, httpBackend;
+
+  beforeEach(module('xos.mcordTopology'));
+  beforeEach(module('templates'));
+
+  beforeEach(inject(function($httpBackend, $compile, $rootScope){
+    
+    httpBackend = $httpBackend;
+    // Setting up mock request
+    $httpBackend.expectGET('/xos/users/?no_hyperlinks=1').respond([
+      {
+        email: 'teo@onlab.us',
+        firstname: 'Matteo',
+        lastname: 'Scandolo' 
+      }
+    ]);
+  
+    scope = $rootScope.$new();
+    element = angular.element('<users-list></users-list>');
+    $compile(element)(scope);
+    scope.$digest();
+    isolatedScope = element.isolateScope().vm;
+  }));
+
+  it('should load 1 users', () => {
+    httpBackend.flush();
+    expect(isolatedScope.users.length).toBe(1);
+    expect(isolatedScope.users[0].email).toEqual('teo@onlab.us');
+    expect(isolatedScope.users[0].firstname).toEqual('Matteo');
+    expect(isolatedScope.users[0].lastname).toEqual('Scandolo');
+  });
+
+});
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/src/css/dev.css b/views/ngXosViews/mcordTopology/src/css/dev.css
new file mode 100644
index 0000000..32d915d
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/src/css/dev.css
@@ -0,0 +1,7 @@
+#xosMcordTopology{
+  position: absolute;
+  /*top: 100px;
+  left: 200px;*/
+  width: 100%;
+  height: 100%;
+}
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/src/css/mcord.css b/views/ngXosViews/mcordTopology/src/css/mcord.css
new file mode 100644
index 0000000..8fb7540
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/src/css/mcord.css
@@ -0,0 +1,81 @@
+#xosMcordTopology {
+  height: 700px;
+}
+
+[ui-view],
+m-cord-topology{
+  width: 100%;
+  height: 100%;
+  display: block;
+}
+
+line {
+  stroke: blue;
+  /*stroke-width: 1;*/
+}
+
+line.big{
+  stroke-width: 2;
+}
+
+circle,
+rect {
+  fill: #fff;
+  stroke-width: 1; 
+}
+
+.fabric {
+  stroke: none;
+  fill: #123456;
+  fill-rule: evenodd;
+}
+
+.fabric-container {
+  fill: transparent;
+  stroke: #000;
+  stroke-width: 1;
+}
+
+.bbu {
+  stroke: black;
+  fill: #FF7F0E;
+}
+
+.rru {
+  stroke: #000;
+  fill: #FFBB78;
+}
+
+.rru.antenna {
+  stroke: #000;
+  fill: brown;
+}
+
+.rru-shadow {
+  fill: #FFBB78;
+  opacity: .4
+}
+
+.MME, .SGW, .PGW, .Vid {
+  fill: purple;
+  stroke: #000;
+}
+
+rect.MME,
+rect.SGW,
+rect.PGW,
+rect.bbu,
+rect.Vid {
+  fill: #fff;
+  stroke: #fff;
+}
+
+.bbu text, 
+.MME text,
+.SGW text,
+.PGW text, 
+.Vid text {
+  font-size: 10px;
+  stroke-width: 0;
+  fill: #000;
+}
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/src/index.html b/views/ngXosViews/mcordTopology/src/index.html
new file mode 100644
index 0000000..691a869
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/src/index.html
@@ -0,0 +1,36 @@
+<!-- browserSync -->
+<!-- bower:css -->
+<link rel="stylesheet" href="vendor/bootstrap-css/css/bootstrap.css" />
+<!-- endbower --><!-- endcss -->
+<!-- inject:css -->
+<link rel="stylesheet" href="/css/dev.css">
+<link rel="stylesheet" href="/css/mcord.css">
+<!-- endinject -->
+
+<div ng-app="xos.mcordTopology" id="xosMcordTopology">
+    <div ui-view></div>
+</div>
+
+<!-- bower:js -->
+<script src="vendor/d3/d3.js"></script>
+<script src="vendor/jquery/dist/jquery.js"></script>
+<script src="vendor/angular/angular.js"></script>
+<script src="vendor/angular-mocks/angular-mocks.js"></script>
+<script src="vendor/angular-ui-router/release/angular-ui-router.js"></script>
+<script src="vendor/angular-cookies/angular-cookies.js"></script>
+<script src="vendor/angular-resource/angular-resource.js"></script>
+<script src="vendor/ng-lodash/build/ng-lodash.js"></script>
+<script src="vendor/bootstrap-css/js/bootstrap.js"></script>
+<!-- endbower --><!-- endjs -->
+<!-- inject:js -->
+<script src="/xosHelpers/src/xosHelpers.module.js"></script>
+<script src="/xosHelpers/src/services/noHyperlinks.interceptor.js"></script>
+<script src="/xosHelpers/src/services/csrfToken.interceptor.js"></script>
+<script src="/xosHelpers/src/services/api.services.js"></script>
+<script src="/api/ng-xoslib.js"></script>
+<script src="/api/ng-xos.js"></script>
+<script src="/api/ng-hpcapi.js"></script>
+<script src="/.tmp/main.js"></script>
+<script src="/.tmp/static.data.js"></script>
+<script src="/.tmp/node_drawer.js"></script>
+<!-- endinject -->
diff --git a/views/ngXosViews/mcordTopology/src/js/main.js b/views/ngXosViews/mcordTopology/src/js/main.js
new file mode 100644
index 0000000..a479717
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/src/js/main.js
@@ -0,0 +1,348 @@
+'use strict';
+
+angular.module('xos.mcordTopology', [
+  'ngResource',
+  'ngCookies',
+  'ngLodash',
+  'ui.router',
+  'xos.helpers'
+])
+.config(($stateProvider) => {
+  $stateProvider
+  .state('topology', {
+    url: '/',
+    template: '<m-cord-topology></m-cord-topology>'
+  });
+})
+.config(function($httpProvider){
+  $httpProvider.interceptors.push('NoHyperlinks');
+})
+.service('Traffic', function($http, $q){
+  this.get = () => {
+    var deferred = $q.defer();
+    $http.get('videoLocal.txt')
+    .then(res => {
+      deferred.resolve(res.data);
+    });
+    return deferred.promise;
+  }
+})
+.directive('mCordTopology', function(){
+  return {
+    restrict: 'E',
+    scope: {},
+    bindToController: true,
+    controllerAs: 'vm',
+    template: '',
+    controller: function($element, $interval, $rootScope, XosApi, lodash, TopologyElements, NodeDrawer, Traffic){
+
+      const el = $element[0];
+
+      let nodes = [];
+      let links = [];
+      let traffic = 0;
+      let linkWidth = 1;
+      let trafficCorrection = 5;
+
+      const filterBBU = (instances) => {
+        return lodash.filter(instances, i => i.name.indexOf('BBU') >= 0);
+      };
+
+      const filterOthers = (instances) => {
+        return TopologyElements.fakedInstance;
+      };
+
+      // retrieving instances list
+      const getData = () => {
+
+        d3.select('svg')
+          .style('width', `${el.clientWidth}px`)
+          .style('height', `${el.clientHeight}px`);
+
+        nodes = TopologyElements.nodes;
+        links = TopologyElements.links;
+
+        Traffic.get()
+        .then((newTraffic) => {
+
+          // calculating link size
+          // it should change between 1 and 10
+          if(!traffic){
+            linkWidth = 2;
+          }
+          else if(newTraffic === traffic){
+            linkWidth = linkWidth;
+          }
+          else{
+            let delta = newTraffic - traffic;
+
+            if(delta > 0){
+              linkWidth = linkWidth + (delta / trafficCorrection);
+            }
+            else{
+              linkWidth = linkWidth - ((delta * -1) / trafficCorrection);
+            }
+
+          }
+
+          if(linkWidth < 0.2){
+            linkWidth = 0.2
+          };
+
+          traffic = newTraffic;
+
+          return XosApi.Instance_List_GET()
+        })
+        .then((instances) => {
+          addBbuNodes(filterBBU(instances));
+          addOtherNodes(filterOthers(instances));
+
+          draw(svg, nodes, links);
+        })
+        .catch((e) => {
+          throw new Error(e);
+        });
+      };
+
+      const force = d3.layout.force();
+
+      // create svg elements
+      const svg = d3.select(el)
+        .append('svg')
+        .style('width', `${el.clientWidth}px`)
+        .style('height', `${el.clientHeight}px`);
+
+      const linkContainer = svg.append('g')
+        .attr({
+          class: 'link-container'
+        });
+
+      const nodeContainer = svg.append('g')
+        .attr({
+          class: 'node-container'
+        });
+
+      // replace human readable ids with d3 ids
+      // NOTE now ids are not manatined on update...
+      const buildLinks = (links, nodes) => {
+        return links.map((l) => {
+
+
+          let source = lodash.findIndex(nodes, {id: l.source});
+          let target = lodash.findIndex(nodes, {id: l.target});
+          // console.log(`link-${source}-${target}`, source, target);
+          return {
+            source: source,
+            target: target,
+            value: 1,
+            id: `link-${source}-${target}`,
+            type: l.source.indexOf('fabric') >= 0 ? 'big':'small'
+          };
+
+        });
+      };
+
+      // find fabric nodes and center horizontally
+      const positionFabricNodes = (nodes) => {
+        return lodash.map(nodes, n => {
+          if(n.type !== 'fabric'){
+            return n;
+          }
+
+          n.x = n.x * hStep;
+          n.y = n.y * vStep;
+
+          return n;
+        });
+      };
+
+      const addBbuNodes = (instances) => {
+
+        // calculate bbu hStep
+        let bbuHStep = ((el.clientWidth / 2) / (instances.length + 1));
+
+        // create nodes
+        let bbuNodes = instances.map((n, i) => {
+          return {
+            type: 'bbu',
+            name: n.name,
+            id: `bbu-${n.id}`,
+            fixed: true,
+            y: vStep * 3,
+            x: bbuHStep * (i + 1)
+          };
+        });
+
+        // create links
+        let bbuLinks = bbuNodes.map(n => {
+          return {
+            source: n.id,
+            target: 'fabric4'
+          };
+        });
+
+        // fake RRU nodes and links
+        instances.forEach((n, i) => {
+          bbuNodes.push({
+            type: 'rru',
+            name: 'rru',
+            id: `rru-${n.id}`,
+            fixed: true,
+            y: vStep * 4,
+            x: bbuHStep * (i + 1)
+          });
+
+          bbuLinks.push({
+            source: `rru-${n.id}`,
+            target: `bbu-${n.id}`
+          });
+        })
+
+        nodes = nodes.concat(bbuNodes);
+
+
+        links = links.concat(bbuLinks);
+      };
+
+      // add MME, PGW, SGW nodes
+      const addOtherNodes = (instances) => {
+        let hStep = ((el.clientWidth / 2) / (instances.length + 1));
+
+        // create nodes
+        let otherNodes = instances.map((n, i) => {
+          return {
+            type: n.name.substring(0, 3),
+            name: n.name,
+            id: `${n.name.substring(0, 3)}-${n.id}`,
+            fixed: true,
+            y: vStep * 3,
+            x: (el.clientWidth / 2) + (hStep * (i + 1))
+          };
+        });
+
+        // create links
+        let otherLinks = otherNodes.map(n => {
+          return {
+            source: n.id,
+            target: 'fabric4'
+          };
+        });
+
+
+        nodes = nodes.concat(otherNodes);
+        links = links.concat(otherLinks);
+      }
+
+      let hStep, vStep;
+
+      hStep = el.clientWidth / 3;
+      vStep = el.clientHeight / 5;
+
+      const draw = (svg, nodes, links) => {
+
+        hStep = el.clientWidth / 3;
+        vStep = el.clientHeight / 5;
+
+        links = buildLinks(links, nodes);
+
+        nodes = positionFabricNodes(nodes);
+
+        console.log(nodes);
+        // start force layout
+        force
+          .nodes(nodes)
+          .links(links)
+          .size([el.clientWidth, el.clientHeight])
+          .charge(-20)
+          .chargeDistance(200)
+          .linkDistance(80)
+          .linkStrength(0.1)
+          .start();
+
+
+        const linkContainer = d3.select('.link-container');
+        const nodeContainer = d3.select('.node-container');
+
+        NodeDrawer.drawFabricBox(nodeContainer, hStep, vStep);
+
+        // draw links
+        var link = linkContainer.selectAll('.link')
+          .data(links, d => d.id);
+        
+        link.enter().append('line')
+          .attr({
+            class: d => `link ${d.type}`,
+            'stroke-width': linkWidth,
+            id: d => d.id,
+            opacity: 0
+          })
+          .transition()
+          .duration(1000)
+          .attr({
+            opacity: 1
+          });
+
+        link
+          .transition()
+          .duration(1000)
+          .attr({
+            'stroke-width': linkWidth,
+            opacity: 1
+          });
+
+        link.exit()
+        .remove();
+
+        //draw nodes
+        var node = nodeContainer.selectAll('.node')
+          .data(nodes, d => {
+            return d.id
+          });
+        
+        // append a group for any new node
+        var enter = node.enter()
+          .append('g', d => d.interfaceCfgIdentifier)
+          .attr({
+            class: d => `${d.type} node`,
+            transform: d => `translate(${d.x}, ${d.y})`
+          });
+
+        // draw nodes
+        NodeDrawer.drawBbus(enter.filter('.bbu'))
+        NodeDrawer.drawRrus(enter.filter('.rru'))
+        NodeDrawer.drawFabric(enter.filter('.fabric'))
+        NodeDrawer.drawOthers(enter.filter(d => {
+          console.log(d.type);
+          return (
+            d.type  === 'MME' ||
+            d.type === 'SGW' ||
+            d.type === 'PGW' ||
+            d.type === 'Vid'
+          )
+        }));
+
+        // remove nodes
+        var exit = node.exit();
+
+        NodeDrawer.removeElements(exit);
+
+        force.on('tick', function() {
+          link
+            .attr('x1', d => d.source.x )
+            .attr('y1', d => d.source.y )
+            .attr('x2', d => d.target.x )
+            .attr('y2', d => d.target.y );
+
+          node.attr('transform', (d) => `translate(${d.x},${d.y})`);
+        });
+      };
+      
+      // $interval(() => {
+      //   getData();
+      // }, 3000);
+      getData();
+
+      
+    }
+  };
+});
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/src/js/node_drawer.js b/views/ngXosViews/mcordTopology/src/js/node_drawer.js
new file mode 100644
index 0000000..b2dae65
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/src/js/node_drawer.js
@@ -0,0 +1,213 @@
+'use strict';
+
+angular.module('xos.mcordTopology')
+.service('NodeDrawer', function(TopologyElements){
+
+  const duration = 500;
+
+  let isFabricDrawed = false;
+
+  this.drawFabricBox = (svg, hStep, vStep) => {
+
+    if(isFabricDrawed){
+      return;
+    }
+
+    let fabric = svg.append('g')
+    .attr({
+      transform: `translate(${hStep - 25}, ${vStep - 25})`
+    });
+
+    fabric.append('rect')
+      .attr({
+        width: hStep + 50,
+        height: vStep + 50,
+        class: 'fabric-container'
+      });
+
+    // fabric.append('text')
+    // .text('Fabric')
+    // .attr({
+    //   'text-anchor': 'middle',
+    //   x: ((hStep + 50) / 2),
+    //   y: -10
+    // });
+
+    isFabricDrawed = true;
+  };
+
+  this.drawBbus = (nodes) => {
+
+    nodes.append('rect')
+      .attr({
+        class: d => d.type,
+        width: 30,
+        height: 30,
+        x: -15,
+        y: -15,
+        opacity: 0
+      })
+      .transition()
+      .duration(duration)
+      .attr({
+        r: 15,
+        opacity: 1
+      });
+
+    nodes
+      .append('path')
+      .attr({
+        class: d => `${d.type} antenna`,
+        opacity: 0,
+        d: () => TopologyElements.icons.bbu,
+        transform: `translate(-18, -18)`
+      })
+      .transition()
+      .duration(duration)
+      .attr({
+        opacity: 1
+      });
+
+    nodes.append('text')
+    .attr({
+      'text-anchor': 'start',
+      y: 25,
+      x: 5,
+      opacity: 0
+    })
+    .text(d => `BBU ${d.name.substr(d.name.length - 1, 1)}`)
+    .transition()
+    .duration(duration * 2)
+    .attr({
+      opacity: 1
+    });
+  };
+
+  this.drawRrus = (nodes) => {
+
+    nodes.append('circle')
+      .attr({
+        class: d => `${d.type}-shadow`,
+        r: 0,
+        opacity: 0
+      })
+      .transition()
+      .duration(duration * 2)
+      // .delay((d, i) => i * (duration / 2))
+      .attr({
+        r: 40,
+        opacity: 1
+      });
+
+    nodes
+      .append('path')
+      .attr({
+        class: d => `${d.type} antenna`,
+        opacity: 0,
+        d: () => TopologyElements.icons.rru,
+        transform: `translate(-18, -18)`
+      })
+      .transition()
+      .duration(duration)
+      .attr({
+        opacity: 1
+      });
+  
+    // nodes.append('circle')
+    //   .attr({
+    //     class: d => d.type,
+    //     r: 0,
+    //     opacity: 0
+    //   })
+    //   .transition()
+    //   .duration(duration)
+    //   // .delay((d, i) => i * (duration / 2))
+    //   .attr({
+    //     r: 10,
+    //     opacity: 1
+    //   });
+  };
+
+  this.drawFabric = (nodes) => {
+    nodes
+      .append('rect')
+      .attr({
+        width: 30,
+        height: 30,
+        x: -15,
+        y: -15
+      });
+
+    nodes
+      .append('path')
+      .attr({
+        class: d => d.type,
+        opacity: 0,
+        d: () => TopologyElements.icons.switch,
+        transform: `translate(-22, -22), scale(0.4)`
+      })
+      .transition()
+      .duration(duration)
+      // .delay((d, i) => i * (duration / 2))
+      .attr({
+        opacity: 1
+      });
+  };
+
+  this.drawOthers = (nodes) => {
+    nodes.append('rect')
+      .attr({
+        class: d => d.type,
+        width: 30,
+        height: 30,
+        x: -15,
+        y: -15,
+        opacity: 0
+      })
+      .transition()
+      .duration(duration)
+      .attr({
+        r: 15,
+        opacity: 1
+      });
+
+    nodes
+      .append('path')
+      .attr({
+        class: d => `${d.type} antenna`,
+        opacity: 0,
+        d: () => TopologyElements.icons.bbu,
+        transform: `translate(-18, -18)`
+      })
+      .transition()
+      .duration(duration)
+      .attr({
+        opacity: 1
+      });
+
+    nodes.append('text')
+    .attr({
+      'text-anchor': 'start',
+      y: 25,
+      x: -12,
+      opacity: 0
+    })
+    .text(d => d.name.toUpperCase())
+    .transition()
+    .duration(duration * 2)
+    .attr({
+      opacity: 1
+    });
+
+  };
+
+  this.removeElements = (nodes) => {
+    nodes
+    .transition()
+    .duration(duration)
+    .attr({
+      opacity: 0
+    })
+    .remove();
+  };
+});
\ No newline at end of file
diff --git a/views/ngXosViews/mcordTopology/src/js/static.data.js b/views/ngXosViews/mcordTopology/src/js/static.data.js
new file mode 100644
index 0000000..97ccf9d
--- /dev/null
+++ b/views/ngXosViews/mcordTopology/src/js/static.data.js
@@ -0,0 +1,85 @@
+'use strict';
+
+angular.module('xos.mcordTopology')
+.constant('TopologyElements', {
+  nodes: [
+    // {
+    //   id: 'fabric1',
+    //   type: 'fabric',
+    //   name: 'fabric1',
+    //   fixed: true,
+    //   x: 1,
+    //   y: 1
+    // },
+    // {
+    //   id: 'fabric2',
+    //   type: 'fabric',
+    //   name: 'fabric2',
+    //   fixed: true,
+    //   x: 1,
+    //   y: 2
+    // },
+    // {
+    //   id: 'fabric3',
+    //   type: 'fabric',
+    //   name: 'fabric3',
+    //   fixed: true,
+    //   x: 2,
+    //   y: 1
+    // },
+    {
+      id: 'fabric4',
+      type: 'fabric',
+      name: 'fabric4',
+      fixed: true,
+      x: 1.5,
+      y: 1.5
+    }
+  ],
+  links: [
+    // {
+    //   source: 'fabric1',
+    //   target: 'fabric2'
+    // },
+    // {
+    //   source: 'fabric1',
+    //   target: 'fabric4'
+    // },
+    // {
+    //   source: 'fabric3',
+    //   target: 'fabric4'
+    // },
+    // {
+    //   source: 'fabric3',
+    //   target: 'fabric2'
+    // }
+  ],
+  fakedInstance: [
+    {
+      humanReadableName: 'MME',
+      name: 'MME'
+    },
+    {
+      humanReadableName: 'PGW',
+      name: 'PGW'
+    },
+    {
+      humanReadableName: 'SGW',
+      name: 'SGW'
+    },
+    {
+      humanReadableName: 'Video Server',
+      name: 'Video Server'
+    }
+  ],
+  icons: {
+    bbu: `M11.08,4.66H24.76l6.81,6.82H4.23Z M4.24,18.34V13.21H31.6v5.13H4.24Zm25.64-1.72V14.94H28.19v1.69h1.68Zm-13.65-1.7v1.69h1.69V14.93H16.22Zm-3.42,0v1.69h1.68V14.93H12.8Zm-3.42,0v1.69h1.68V14.93H9.38ZM6,14.93v1.69H7.64V14.93H6Z M32.8,33.23H3V11.42l0,0c1.17-1.16,2.54-2.5,3.87-3.8S9.59,5,10.72,3.87l0,0H25.08l0,0C26.25,5,27.6,6.32,28.9,7.61s2.68,2.63,3.83,3.78l0,0v0.06ZM3.3,33H32.53l0-21.43C31.36,10.39,30,9.07,28.71,7.8S26.09,5.22,25,4.1H10.86C9.75,5.21,8.41,6.52,7.12,7.77s-2.67,2.61-3.83,3.76V33Z M4.24,25.18V20.05H31.6v5.13H4.24Zm24-1.73h1.68V21.78H28.19v1.67Zm-12,0H17.9V21.78H16.21v1.68Zm-1.73-1.68H12.81v1.67h1.68V21.78Zm-3.43,1.68V21.78H9.38v1.69h1.68ZM6,23.46H7.64V21.78H6v1.68Z M31.6,26.89V32H4.24V26.89H31.6Zm-3.4,1.72V30.3h1.68V28.61H28.19Zm-10.28,0H16.22V30.3h1.68V28.62Zm-3.43,1.69V28.62H12.8v1.69h1.68Zm-3.42,0V28.62H9.38v1.69h1.68ZM7.65,28.62H6v1.67H7.65V28.62Z`,
+    // bbu: `M15,100a5,5,0,0,1-5-5v-65a5,5,0,0,1,5-5h80a5,5,0,0,1,5,5v65a5,5,0,0,1-5,5zM14,22.5l11-11a10,3,0,0,1,10-2h40a10,3,0,0,1,10,2l11,11zM16,35a5,5,0,0,1,10,0a5,5,0,0,1-10,0z`,
+    switch: `M10,20a10,10,0,0,1,10-10h70a10,10,0,0,1,10,10v70a10,10,
+            0,0,1-10,10h-70a10,10,0,0,1-10-10zM60,26l12,0,0-8,18,13-18,13,0
+            -8-12,0zM60,60l12,0,0-8,18,13-18,13,0-8-12,0zM50,40l-12,0,0-8
+            -18,13,18,13,0-8,12,0zM50,74l-12,0,0-8-18,13,18,13,0-8,12,0z`,
+    // rru: `M85,71.2c-8.9,10.5-29.6,8.7-45.3-3.5C23.9,55.4,19.8,37,28.6,26.5C29.9,38.6,71.5,69.9,85,71.2z M92.7,76.2M16.2,15 M69.5,100.7v-4c0-1.4-1.2-2.2-2.6-2.2H19.3c-1.4,0-2.8,0.7-2.8,2.2v3.9c0,0.7,0.8,1,1.5,1h50.3C69,101.5,69.5,101.3,69.5,100.7z M77.3,7.5l0,3.7c9,0.1,16.3,7.1,16.2,15.7l3.9,0C97.5,16.3,88.5,7.6,77.3,7.5z M77.6,14.7l0,2.5c5.3,0,9.7,4.2,9.6,9.3l2.6,0C89.9,20,84.4,14.7,77.6,14.7z M82.3,22.2c-1.3-1.2-2.9-1.9-4.7-1.9l0,1.2c1.4,0,2.8,0.6,3.8,1.5c1,1,1.6,2.3,1.6,3.7l1.3,0C84.3,25.1,83.6,23.4,82.3,22.2z M38.9,69.5l-5.1,23h16.5l-2.5-17.2C44.1,73.3,38.9,69.5,38.9,69.5zM58.1,54.1c13.7,10.1,26.5,16.8,29.2,13.7c2.7-3.1-5.6-13-19.3-24.4 M62.9,34.2 M62,37.9C47.7,27.3,33.7,20,31,23.1c-2.7,3.2,7,14.2,20.6,26 M73.9,25.7c-2.9,0.1-5.2,2.3-5.1,4.8c0,0.7,0.2,1.4,0.6,2l0,0L53.8,49.7l3.3,2.5L72.7,35l-0.4-0.3c0.6,0.2,1.3,0.3,1.9,0.3c2.9-0.1,5.2-2.3,5.1-4.9C79.3,27.6,76.8,25.6,73.9,25.7z`,
+    rru: `M18.11,11a2.25,2.25,0,0,1,2.13,1.53A2.2,2.2,0,0,1,19.52,15a0.74,0.74,0,0,0-.3.61A7.49,7.49,0,0,0,20,19.35c2,4.55,3.94,9.13,5.89,13.7a1.14,1.14,0,0,1-.59,1.64A1.11,1.11,0,0,1,23.86,34q-0.53-1.2-1-2.41a0.38,0.38,0,0,0-.41-0.28H13.78a0.36,0.36,0,0,0-.39.26q-0.51,1.24-1.06,2.47a1.11,1.11,0,0,1-1.14.67,1.07,1.07,0,0,1-1-.89,1.47,1.47,0,0,1,.1-0.75q2.84-6.66,5.7-13.32a4.06,4.06,0,0,1,.18-0.42A6.39,6.39,0,0,0,17,15.53,0.58,0.58,0,0,0,16.74,15,2.21,2.21,0,0,1,16,12.5,2.26,2.26,0,0,1,18.11,11ZM21.74,29.1c-0.32-.74-0.61-1.43-0.92-2.12a0.35,0.35,0,0,0-.27-0.14H15.66a0.33,0.33,0,0,0-.26.11c-0.32.7-.62,1.41-0.93,2.15h7.26Zm-5.31-4.55h3.37L18.1,20.63Z M2.23,13.56A16,16,0,0,1,6.76,2.16a1.68,1.68,0,0,1,.8-0.46,1.06,1.06,0,0,1,1.18.59,1.16,1.16,0,0,1-.23,1.37A14.48,14.48,0,0,0,6.19,6.77a13.57,13.57,0,0,0,1.9,15.59l0.46,0.49a1.16,1.16,0,1,1-1.68,1.59,15.6,15.6,0,0,1-4.41-8.64C2.32,14.95,2.28,14.07,2.23,13.56Z M34,13.84a15.51,15.51,0,0,1-4.54,10.52,1.19,1.19,0,0,1-1.65.18,1.17,1.17,0,0,1,0-1.77,13.81,13.81,0,0,0,2.79-4.1,13.6,13.6,0,0,0-2.7-14.91A1.8,1.8,0,0,1,27.41,3,1.08,1.08,0,0,1,28,1.8,1.15,1.15,0,0,1,29.38,2a15.59,15.59,0,0,1,2.51,3.28A16.47,16.47,0,0,1,34,13.84Z M10.93,21.6A1.33,1.33,0,0,1,9.87,21a11.06,11.06,0,0,1-2.8-5.27A11.22,11.22,0,0,1,9.8,5.51l0.27-.28a1.16,1.16,0,1,1,1.64,1.63,8.62,8.62,0,0,0-2.06,3.22A8.87,8.87,0,0,0,11.18,19c0.18,0.23.4,0.44,0.59,0.66A1.13,1.13,0,0,1,11.95,21,1.08,1.08,0,0,1,10.93,21.6Z M29.47,13.57a11.11,11.11,0,0,1-3.27,7.64,1.18,1.18,0,0,1-1.51.21,1.13,1.13,0,0,1-.43-1.4,2.06,2.06,0,0,1,.39-0.54,8.85,8.85,0,0,0,2.49-5.89A9,9,0,0,0,24.64,7a1.85,1.85,0,0,1-.44-0.85A1,1,0,0,1,24.82,5a1.07,1.07,0,0,1,1.3.21,20.11,20.11,0,0,1,1.79,2.31A11.09,11.09,0,0,1,29.47,13.57Z M11.3,13.18a6.73,6.73,0,0,1,2-4.73,1.15,1.15,0,0,1,1.45-.2,1.12,1.12,0,0,1,.49,1.32,1.58,1.58,0,0,1-.33.53,4.49,4.49,0,0,0,0,6.26,1.16,1.16,0,1,1-1.7,1.57A6.81,6.81,0,0,1,11.3,13.18Z M24.94,13.14A6.9,6.9,0,0,1,23,18a1.16,1.16,0,1,1-1.7-1.58,4.5,4.5,0,0,0,0-6.29A1.16,1.16,0,1,1,23,8.5,6.75,6.75,0,0,1,24.94,13.14Z`
+  }
+})
\ No newline at end of file
diff --git a/xos/configurations/common/xos_common_config b/xos/configurations/common/xos_common_config
index 76ba747..312f1d9 100644
--- a/xos/configurations/common/xos_common_config
+++ b/xos/configurations/common/xos_common_config
@@ -37,11 +37,18 @@
 images_directory=/opt/xos/images
 dependency_graph=/opt/xos/model-deps
 logfile=/var/log/xos_backend.log
-save_ansible_output=True
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
 
 [gui]
 disable_minidashboard=True
-branding_name=Open Cloud
-branding_icon=/static/logo.png
-branding_favicon=/static/favicon.png
-branding_bg=/static/bg.jpg
+branding_name=M-CORD
+branding_icon=/static/cord-logo.png
+branding_favicon=/static/cord-favicon.png
+branding_bg=/static/mcord-bg2.jpg
+service_view_class=core.views.mCordServiceGrid.ServiceGridView
+
+[networking]
+use_vtn=True
+
diff --git a/xos/configurations/frontend/Makefile b/xos/configurations/frontend/Makefile
index ee2739c..dcc5af2 100644
--- a/xos/configurations/frontend/Makefile
+++ b/xos/configurations/frontend/Makefile
@@ -5,6 +5,9 @@
 	sudo docker-compose up -d
 	bash ../common/wait_for_xos.sh
 	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/frontend/sample.yaml
+	# sudo docker-compose run xos python manage.py makemigrations mcordservice
+	# sudo docker-compose run xos python manage.py syncdb
+	# sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/tosca/MCORDServiceN.yaml
 
 containers:
 	cd ../../../containers/xos; make devel
@@ -24,19 +27,14 @@
 enter-xos:
 	sudo docker exec -ti frontend_xos_1 bash
 
-django-restart:
-	sudo docker exec frontend_xos_1 touch /opt/xos/xos/settings.py
-
 mock-cord:
 	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
 	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/frontend/mocks/cord.yaml
 	sudo docker exec frontend_xos_1 cp /opt/xos/configurations/cord/xos_cord_config /opt/xos/xos_configuration/
 	sudo docker exec frontend_xos_1 touch /opt/xos/xos/settings.py
 
-mock-cord-pod:
-	echo "make sure to add '../vtn/files/xos_vtn_config:/opt/xos/xos_configuration/xos_vtn_config:ro' to volumes section of docker-compose.yml"
+mock-mcord:
 	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
-	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord-pod/mgmt-net.yaml
-	sudo docker-compose run xos bash -c "echo somekey > /opt/xos/synchronizers/vcpe/vcpe_public_key; python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/cord-pod/cord-vtn-vsg.yaml"
-	sudo docker exec frontend_xos_1 cp /opt/xos/configurations/cord/xos_cord_config /opt/xos/xos_configuration/
+	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/frontend/mocks/mcord.yaml
+	sudo docker exec frontend_xos_1 cp /opt/xos/configurations/frontend/mocks/xos_mcord_config /opt/xos/xos_configuration/
 	sudo docker exec frontend_xos_1 touch /opt/xos/xos/settings.py
diff --git a/xos/configurations/frontend/README.md b/xos/configurations/frontend/README.md
index c8f4097..b1a689c 100644
--- a/xos/configurations/frontend/README.md
+++ b/xos/configurations/frontend/README.md
@@ -42,6 +42,7 @@
 Sometimes while developing the GUI is usefull to have control over the DataModel. Sample `tosca` recipes for different configuration are defined in the `mocks` folder, and corresponding `make` commands are provided.
 
 - Bring up the **CORD** data model: `make mock-cord`
+- - Bring up the **M-CORD** data model: `make mock-mcord`
 
 ## JS Styleguide
 
diff --git a/xos/configurations/frontend/docker-compose.yml b/xos/configurations/frontend/docker-compose.yml
index fae13ab..45893ee 100644
--- a/xos/configurations/frontend/docker-compose.yml
+++ b/xos/configurations/frontend/docker-compose.yml
@@ -21,9 +21,10 @@
     volumes:
       - ../common/xos_common_config:/opt/xos/xos_configuration/xos_common_config
       - ../../core/xoslib:/opt/xos/core/xoslib
+      - ../../tosca:/opt/xos/tosca
       - ../../core/static:/opt/xos/core/static
-      - ../../core/dashboard:/opt/xos/core/dashboard
-      - ../../core/templatetags:/opt/xos/core/templatetags
       - ../../templates/admin:/opt/xos/templates/admin
       - ../../configurations:/opt/xos/configurations
       - ../../xos:/opt/xos/xos
+      - ../../core/views:/opt/xos/core/views
+      - ../../services:/opt/xos/services
diff --git a/xos/configurations/frontend/mocks/MCORDServiceN.yaml b/xos/configurations/frontend/mocks/MCORDServiceN.yaml
new file mode 100644
index 0000000..bef7bb3
--- /dev/null
+++ b/xos/configurations/frontend/mocks/MCORDServiceN.yaml
@@ -0,0 +1,89 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+    tosca.nodes.MCORDComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: A Service Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+

+topology_template:

+  node_templates:

+    service_mcord:

+      type: tosca.nodes.Service

+      requirements:

+      properties:

+          kind: mcordservice

+

+

+    Private:

+      type: tosca.nodes.NetworkTemplate

+

+    mcord_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: mcord_service_internal_net

+          cidr: 172.16.16.0/24

+          start_ip: 172.16.16.1

+          end_ip: 172.16.16.5

+          gateway_ip: 172.16.16.1

+

+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    mysite:

+      type: tosca.nodes.Site

+

+

+    ubuntu-14.04-server-cloudimg-amd64-disk1:

+      type: tosca.nodes.Image

+

+    trusty-server-multi-nic:

+      type: tosca.nodes.Image

+

+    mysite_mcord_slice1:

+      description: MCORD Service Slice 1

+      type: tosca.nodes.Slice

+      requirements:

+          - mcord_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+          - default_image:

+                node: ubuntu-14.04-server-cloudimg-amd64-disk1 

+#                node: mcord-server-image-s1

+                relationship: tosca.relationships.DefaultImage

+      properties:

+          default_flavor: m1.medium

+          default_node: compute9 

+

+    my_service_mcord_component1:

+      description: MCORD Service default Component

+      type: tosca.nodes.MCORDComponent

+      requirements:

+          - provider_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - mcord_slice:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

diff --git a/xos/configurations/frontend/mocks/MCORDServiceN.yml b/xos/configurations/frontend/mocks/MCORDServiceN.yml
new file mode 100644
index 0000000..8f764d0
--- /dev/null
+++ b/xos/configurations/frontend/mocks/MCORDServiceN.yml
@@ -0,0 +1,106 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+    tosca.nodes.MCORDComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: A Service Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+

+topology_template:

+  node_templates:

+    service_mcord:

+      type: tosca.nodes.Service

+      requirements:

+      properties:

+          kind: mcordservice

+

+

+    Private:

+      type: tosca.nodes.NetworkTemplate

+

+    mcord_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: mcord_service_internal_net

+          cidr: 172.16.16.0/24

+          start_ip: 172.16.16.1

+          end_ip: 172.16.16.5

+          gateway_ip: 172.16.16.1

+

+      requirements:

+          - network_template:

+              node: Private

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    mysite:

+      type: tosca.nodes.Site

+

+

+    mcord-server-image-s1:

+      type: tosca.nodes.Image

+

+    trusty-server-multi-nic:

+      type: tosca.nodes.Image

+

+    mysite_mcord_slice1:

+      description: MCORD Service Slice 1

+      type: tosca.nodes.Slice

+      requirements:

+          - mcord_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+          - default_image:

+                node: trusty-server-multi-nic

+#                node: mcord-server-image-s1

+                relationship: tosca.relationships.DefaultImage

+      properties:

+          default_flavor: m1.medium

+          default_node: ip-10-0-10-125

+

+    my_service_mcord_component1:

+      description: MCORD Service default Component

+      type: tosca.nodes.MCORDComponent

+      # properties:

+      #     view_url: /mcord/?service=vBBU

+      #     kind: RAN

+      requirements:

+          - provider_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - mcord_slice:

+              node: mysite_mcord_slice1

+              relationship: tosca.relationships.MemberOfSlice

+

+    # Nodes

+    compute9:

+      type: tosca.nodes.Node

+      properties:

+          view_url: /mcord/?service=vBBU

+          kind: RAN

+      requirements:

+        - site:

+            node: mysite

+            relationship: tosca.relationships.MemberOfSite

+        - deployment:

+            node: MyDeployment

+            relationship: tosca.relationships.MemberOfDeployment

diff --git a/xos/configurations/frontend/mocks/cord.yaml b/xos/configurations/frontend/mocks/cord.yaml
index c448da5..8c84d8f 100644
--- a/xos/configurations/frontend/mocks/cord.yaml
+++ b/xos/configurations/frontend/mocks/cord.yaml
@@ -8,12 +8,6 @@
 topology_template:
   node_templates:
     # CORD Services
-    service_vtr:
-      type: tosca.nodes.Service
-      properties:
-          view_url: /admin/vtr/vtrservice/$id$/
-          kind: vTR
-
     service_volt:
       type: tosca.nodes.Service
       requirements:
@@ -538,20 +532,3 @@
             - subscriber:
                 node: My House
                 relationship: tosca.relationships.BelongsToSubscriber
-
-    # DASHBOARDS
-    Customer Care:
-      type: tosca.nodes.DashboardView
-      properties:
-          url: template:xosDiagnostic
-
-    padmin@vicci.org:
-      type: tosca.nodes.User
-      properties:
-          firstname: XOS
-          lastname: admin
-          is_admin: true
-      requirements:
-          - customer_care_dashboard:
-              node: Customer Care
-              relationship: tosca.relationships.UsesDashboard
diff --git a/xos/configurations/frontend/mocks/mcord.yaml b/xos/configurations/frontend/mocks/mcord.yaml
new file mode 100644
index 0000000..6c10ad3
--- /dev/null
+++ b/xos/configurations/frontend/mocks/mcord.yaml
@@ -0,0 +1,319 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+    # M-CORD Services
+    
+    # RAN
+    vBBU:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vBBU
+          kind: RAN
+
+    eSON:
+      type: tosca.nodes.Service
+      properties:
+          view_url: http://www.google.com
+          kind: RAN
+
+    # EPC
+    vMME:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vMME
+          kind: EPC
+
+    vSGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vSGW
+          kind: EPC
+
+    vPGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vPGW
+          kind: EPC
+
+    # EDGE
+    Cache:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Cache
+          icon_url: /static/mCordServices/service_cache.png
+          kind: EDGE
+
+    Firewall:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Firewall
+          icon_url: /static/mCordServices/service_firewall.png
+          kind: EDGE
+
+    Video Optimization:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Video%20Optimization
+          icon_url: /static/mCordServices/service_video.png
+          kind: EDGE
+          
+    # Images
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+      properties:
+         disk_format: QCOW2
+         container_format: BARE
+
+    # Deployments
+    StanfordDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
+          - image:
+              node: trusty-server-multi-nic
+              relationship: tosca.relationships.SupportsImage
+
+    # Site
+    stanford:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Stanford University
+          site_url: https://www.stanford.edu/
+      requirements:
+          - deployment:
+               node: StanfordDeployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: CloudLab
+                       relationship: tosca.relationships.UsesController
+
+
+    # Nodes
+    node1.stanford.edu:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: stanford
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: StanfordDeployment
+            relationship: tosca.relationships.MemberOfDeployment
+
+    # Slices
+    stanford_slice:
+      description: Slice that contains sample instances
+      type: tosca.nodes.Slice
+      requirements:
+          - site:
+              node: stanford
+              relationship: tosca.relationships.MemberOfSite
+
+    # Instances
+    BBU_service_instance1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    BBU_service_instance2:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    MME_service_instance1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    SGW_service_instance1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    PGW_service_instance1:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 1
+           disk_size: 10 GB
+           mem_size: 4 MB
+        # Guest Operating System properties
+        os:
+          properties:
+            # host Operating System image properties
+            architecture: x86_64
+            type: linux
+            distribution: ubuntu
+            version: 14.04
+      requirements:
+          - slice:
+                node: stanford_slice
+                relationship: tosca.relationships.MemberOfSlice
+
+    # Let's add a user who can be administrator of the household
+    johndoe@stanford.us:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: stanford
+              relationship: tosca.relationships.MemberOfSite
+
+    # A subscriber
+    Stanford:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@stanford.us
+              relationship: tosca.relationships.AdminPrivilege
+
+    Barbera Lapinski:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Norbert Shumway:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Fay Muldoon:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Janene Earnest:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+
+    Topology:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosMcordTopology
+
+    Ceilometer:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosCeilometerDashboard
+
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      properties:
+          firstname: XOS
+          lastname: admin
+          is_admin: true
+      requirements:
+          - mcord_dashboard:
+              node: Topology
+              relationship: tosca.relationships.UsesDashboard
+          - ceilometer_dashboard:
+              node: Ceilometer
+              relationship: tosca.relationships.UsesDashboard
diff --git a/xos/configurations/frontend/mocks/xos_mcord_config b/xos/configurations/frontend/mocks/xos_mcord_config
new file mode 100644
index 0000000..7730f3b
--- /dev/null
+++ b/xos/configurations/frontend/mocks/xos_mcord_config
@@ -0,0 +1,6 @@
+[gui]
+branding_name=M-CORD
+branding_icon=/static/cord-logo.png
+branding_favicon=/static/cord-favicon.png
+branding_bg=/static/mcord-bg2.jpg
+service_view_class=core.views.mCordServiceGrid.ServiceGridView
diff --git a/xos/configurations/mcord/Makefile b/xos/configurations/mcord/Makefile
new file mode 100644
index 0000000..37025de
--- /dev/null
+++ b/xos/configurations/mcord/Makefile
@@ -0,0 +1,45 @@
+.PHONY: xos
+xos: nodes.yaml images.yaml
+	sudo docker-compose up -d
+	../common/wait_for_xos_port.sh 8000
+	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /opt/xos/configurations/common/fixtures.yaml
+	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/setup.yaml
+	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/nodes.yaml
+	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/images.yaml
+	sudo docker-compose run xos python /opt/xos/tosca/run.py padmin@vicci.org /root/setup/mgmt-net.yaml
+	sudo docker exec mcord_xos_1 cp /opt/xos/configurations/frontend/mocks/xos_mcord_config /opt/xos/xos_configuration/
+
+nodes.yaml:
+	export SETUPDIR=.; bash ../common/make-nodes-yaml.sh
+
+images.yaml:
+	export SETUPDIR=.; bash ../common/make-images-yaml.sh
+
+.PHONY: local_containers
+local_containers:
+	cd xos; make devel
+	cd synchronizer; make
+enter-xos:
+	sudo docker exec -it mcord_xos_1 bash
+
+enter-os:
+	sudo docker exec -it mcord_xos_synchronizer_openstack_1 bash
+
+enter-vbbu:
+	sudo docker exec -it mcord_xos_synchronizer_vbbu_1 bash
+
+upgrade_pkgs:
+	sudo pip install httpie --upgrade
+
+rebuild_xos:
+	make -C ../../../containers/xos devel
+
+rebuild_synchronizer:
+	make -C ../../../containers/synchronizer
+
+stop:
+	docker-compose stop
+
+rm:
+	docker-compose stop; docker-compose rm
+
diff --git a/xos/configurations/mcord/admin-openrc.sh b/xos/configurations/mcord/admin-openrc.sh
new file mode 100644
index 0000000..24cd509
--- /dev/null
+++ b/xos/configurations/mcord/admin-openrc.sh
@@ -0,0 +1,8 @@
+# Replace with the OpenStack admin credentials for your cluster
+export OS_PROJECT_DOMAIN_ID=default
+export OS_USER_DOMAIN_ID=default
+export OS_TENANT_NAME=admin
+export OS_USERNAME=admin
+export OS_PASSWORD=mcord
+export OS_AUTH_URL=http://10.102.81.3:35357/v2.0
+
diff --git a/xos/configurations/mcord/docker-compose.yml b/xos/configurations/mcord/docker-compose.yml
new file mode 100644
index 0000000..8598396
--- /dev/null
+++ b/xos/configurations/mcord/docker-compose.yml
@@ -0,0 +1,59 @@
+xos_db:
+    image: xosproject/xos-postgres
+    expose:
+        - "5432"
+
+xos_synchronizer_openstack:
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/openstack/xos-synchronizer.py"
+    image: xosproject/xos-synchronizer-openstack
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: openstack
+    links:
+        - xos_db
+    volumes:
+        - .:/root/setup:ro
+        - ../setup/id_rsa:/opt/xos/configurations/mcord/mcord_private_key:ro  # private key
+    extra_hosts:
+        - "controller:10.102.81.3"
+        - "computeBBU1:10.102.81.6"
+        - "computeBBU2:10.102.81.7"
+        - "compute9:10.102.81.9"
+        - "compute10:10.102.81.10"
+
+xos_synchronizer_vbbu:
+    image: xosproject/xos-synchronizer-openstack
+    command: bash -c "sleep 120; python /opt/xos/synchronizers/vbbu/vbbu-synchronizer.py -C /opt/xos/synchronizers/vbbu/vbbu_config"
+    labels:
+        org.xosproject.kind: synchronizer
+        org.xosproject.target: vbbu 
+    links:
+        - xos_db
+    volumes:
+        - ../setup/id_rsa_mcord:/opt/xos/configurations/mcord/mcord_private_key:ro  # private key
+        - ../setup/id_rsa_mcord.pub:/opt/xos/configurations/mcord/mcord_public_key:ro  # public key
+        - ../setup:/root/setup:ro
+    extra_hosts:
+        - "controller:10.102.81.3"
+        - "computeBBU1:10.102.81.6"
+        - "computeBBU2:10.102.81.7"
+        - "compute9:10.102.81.9"
+        - "compute10:10.102.81.10"
+
+# FUTURE
+#xos_swarm_synchronizer:
+#    image: xosproject/xos-swarm-synchronizer
+#    labels:
+#        org.xosproject.kind: synchronizer
+#        org.xosproject.target: swarm
+
+xos:
+    command: python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
+    image: xosproject/xos
+    links:
+        - xos_db
+    ports:
+        - "8000:8000"
+    volumes:
+        - .:/root/setup:ro
+        - ../setup/id_rsa.pub:/opt/xos/configurations/mcord/mcord_public_key:ro  # private key
diff --git a/xos/configurations/mcord/fixtures2.yaml b/xos/configurations/mcord/fixtures2.yaml
new file mode 100644
index 0000000..ab16e85
--- /dev/null
+++ b/xos/configurations/mcord/fixtures2.yaml
@@ -0,0 +1,33 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+    tosca.nodes.VBBUComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: vBBU Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+            s1u_tag:

+                type: string

+                required: false

+                default: 201

+                description: VTN stag port-name

+            s1mme_tag:

+                type: string

+                required: false

+                default: 200

+                description: VTN stag port-name

+            rru_tag:

+                type: string

+                required: false

+                default: 199 

+                description: VTN stag port-name

+

diff --git a/xos/configurations/mcord/images.yaml b/xos/configurations/mcord/images.yaml
new file mode 100644
index 0000000..2b62235
--- /dev/null
+++ b/xos/configurations/mcord/images.yaml
@@ -0,0 +1,18 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: autogenerated nodes file
+
+topology_template:
+  node_templates:
+    mysite:
+        type: tosca.nodes.Site
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+      requirements:
+
diff --git a/xos/configurations/mcord/mcord.yaml b/xos/configurations/mcord/mcord.yaml
new file mode 100644
index 0000000..3c4d8eb
--- /dev/null
+++ b/xos/configurations/mcord/mcord.yaml
@@ -0,0 +1,234 @@
+tosca_definitions_version: tosca_simple_yaml_1_0

+

+description: Setup MCORD-related services.

+

+imports:

+   - custom_types/xos.yaml

+

+node_types:

+

+    tosca.nodes.MCORDService:

+        derived_from: tosca.nodes.Root

+        description: >

+            An XOS Service object. Services may be listed in the Service

+            directory and may be linked together via Tenancy Relationships.

+        capabilities:

+            scalable:

+                type: tosca.capabilities.Scalable

+            service:

+                type: tosca.capabilities.xos.Service

+        properties:

+            no-delete:

+                type: boolean

+                default: false

+                description: Do not allow Tosca to delete this object

+            no-create:

+                type: boolean

+                default: false

+                description: Do not allow Tosca to create this object

+            no-update:

+                type: boolean

+                default: false

+                description: Do not allow Tosca to update this object

+            kind:

+                type: string

+                default: generic

+                description: Type of service.

+            view_url:

+                type: string

+                required: false

+                description: URL to follow when icon is clicked in the Service Directory.

+            icon_url:

+                type: string

+                required: false

+                description: ICON to display in the Service Directory.

+            enabled:

+                type: boolean

+                default: true

+            published:

+                type: boolean

+                default: true

+                description: If True then display this Service in the Service Directory.

+            public_key:

+                type: string

+                required: false

+                description: Public key to install into Instances to allows Services to SSH into them.

+            private_key_fn:

+                type: string

+                required: false

+                description: Location of private key file

+            versionNumber:

+                type: string

+                required: false

+                description: Version number of Service.

+

+    tosca.nodes.VBBUComponent:

+        derived_from: tosca.nodes.Root

+        description: >

+            CORD: vBBU Component of MCORD Service.

+        properties:

+            kind:

+                type: string

+                default: generic

+                description: Kind of component

+            s1u_tag:

+                type: string

+                required: false

+                default: 201

+                description: VTN stag port-name

+            s1mme_tag:

+                type: string

+                required: false

+                default: 200

+                description: VTN stag port-name

+            rru_tag:

+                type: string

+                required: false

+                default: 199 

+                description: VTN stag port-name

+

+

+topology_template:

+  node_templates:

+    service_mcord:

+      type: tosca.nodes.MCORDService

+      requirements:

+      properties:

+          kind: MobileRAN

+          view_url: /admin/mcord/$id$/

+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }

+          private_key_fn: /opt/xos/configurations/mcord/mcord_private_key

+      artifacts:

+          pubkey: /opt/xos/configurations/mcord/mcord_public_key

+

+

+    Private:

+      type: tosca.nodes.NetworkTemplate

+

+    External:

+      type: tosca.nodes.NetworkTemplate

+

+    management_template:

+      type: tosca.nodes.NetworkTemplate

+      properties:

+          visibility: private

+          translation: none

+

+    management:

+      type: tosca.nodes.network.Network.XOS

+#      properties:

+#          no-create: true

+#          no-delete: true

+#          no-update: true

+

+    lan_3gpp_s1mme_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: lan_3gpp_s1uc_net

+          cidr: 172.16.1.0/24

+          start_ip: 172.16.1.3

+          end_ip: 172.16.1.12

+          gateway_ip: 172.16.1.1

+      requirements:

+          - network_template:

+              node: External

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_vbbu_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_vbbu_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    lan_3gpp_s1u_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: lan_3gpp_s1u_net

+          cidr: 172.16.2.0/24

+          start_ip: 172.16.2.3

+          end_ip: 172.16.2.12

+          gateway_ip: 172.16.16.1

+      requirements:

+          - network_template:

+              node: External

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_vbbu_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_vbbu_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    lan_rru_network:

+      type: tosca.nodes.network.Network.XOS

+      properties:

+          ip_version: 4

+          labels: lan_rru_net

+          cidr: 172.16.0.0/24

+          start_ip: 172.16.0.3

+          end_ip: 172.16.0.12

+          gateway_ip: 172.16.0.1

+      requirements:

+          - network_template:

+              node: External

+              relationship: tosca.relationships.UsesNetworkTemplate

+          - owner:

+              node: mysite_vbbu_slice1

+              relationship: tosca.relationships.MemberOfSlice

+          - connection:

+              node: mysite_vbbu_slice1

+              relationship: tosca.relationships.ConnectsToSlice

+

+    mysite:

+      type: tosca.nodes.Site

+

+    mcord-bbu-multi-nic:

+      type: tosca.nodes.Image

+

+    mysite_management:

+      description: This slice exists solely to own the management network

+      type: tosca.nodes.Slice

+      properties:

+          network: noauto

+      requirements:

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+

+    mysite_vbbu_slice1:

+      description: MCORD Service Slice 1

+      type: tosca.nodes.Slice

+      requirements:

+          - mcord_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - site:

+              node: mysite

+              relationship: tosca.relationships.MemberOfSite

+          - default_image:

+                node: mcord-bbu-multi-nic 

+                relationship: tosca.relationships.DefaultImage

+          - management:

+              node: management

+              relationship: tosca.relationships.ConnectsToNetwork

+      properties:

+          network: noauto

+          default_flavor: m1.xlarge

+          default_node: computeBBU2 

+    

+    mysite_VBBU_Component:

+      description: MCORD Service default Component

+      type: tosca.nodes.VBBUComponent

+      requirements:

+          - provider_service:

+              node: service_mcord

+              relationship: tosca.relationships.MemberOfService

+          - vbbu_slice:

+              node: mysite_vbbu_slice1

+              relationship: tosca.relationships.MemberOfSlice

+      properties:

+          s1u_tag: 201

+          s1mme_tag: 200

+          rru_tag: 199 

diff --git a/xos/configurations/mcord/mgmt-net.yaml b/xos/configurations/mcord/mgmt-net.yaml
new file mode 100644
index 0000000..ac8ad7e
--- /dev/null
+++ b/xos/configurations/mcord/mgmt-net.yaml
@@ -0,0 +1,40 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Set up management network for CORD POD
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    management_template:
+      type: tosca.nodes.NetworkTemplate
+      properties:
+          visibility: private
+          translation: none
+
+    management:
+      type: tosca.nodes.network.Network
+      properties:
+          ip_version: 4
+          cidr: 10.102.83.0/24
+      requirements:
+          - network_template:
+              node: management_template
+              relationship: tosca.relationships.UsesNetworkTemplate
+          - owner:
+              node: mysite_management
+              relationship: tosca.relationships.MemberOfSlice
+
+    mysite:
+      type: tosca.nodes.Site
+
+    mysite_management:
+      description: This slice exists solely to own the management network
+      type: tosca.nodes.Slice
+      properties:
+          network: noauto
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
diff --git a/xos/configurations/mcord/migrations/0001_initial.py b/xos/configurations/mcord/migrations/0001_initial.py
new file mode 100644
index 0000000..c53e548
--- /dev/null
+++ b/xos/configurations/mcord/migrations/0001_initial.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from django.db import models, migrations
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('core', '0001_initial'),
+    ]
+
+    operations = [
+        migrations.CreateModel(
+            name='MCORDService',
+            fields=[
+            ],
+            options={
+                'verbose_name': 'MCORD Service',
+                'proxy': True,
+            },
+            bases=('core.service',),
+        ),
+        migrations.CreateModel(
+            name='VBBUComponent',
+            fields=[
+            ],
+            options={
+                'verbose_name': 'VBBU MCORD Service Component',
+                'proxy': True,
+            },
+            bases=('core.tenantwithcontainer',),
+        ),
+    ]
diff --git a/xos/configurations/mcord/migrations/__init__.py b/xos/configurations/mcord/migrations/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/configurations/mcord/migrations/__init__.py
diff --git a/xos/configurations/mcord/nodes.yaml b/xos/configurations/mcord/nodes.yaml
new file mode 100644
index 0000000..ae22112
--- /dev/null
+++ b/xos/configurations/mcord/nodes.yaml
@@ -0,0 +1,24 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+imports:
+   - custom_types/xos.yaml
+
+description: autogenerated nodes file
+
+topology_template:
+  node_templates:
+    MyDeployment:
+        type: tosca.nodes.Deployment
+    mysite:
+        type: tosca.nodes.Site
+
+    computeBBU2:
+        type: tosca.nodes.Node
+        requirements:
+          - site:
+              node: mysite 
+              relationship: tosca.relationships.MemberOfSite
+          - deployment:
+              node: MyDeployment
+              relationship: tosca.relationships.MemberOfDeployment
+
diff --git a/xos/configurations/mcord/postgresql/Dockerfile b/xos/configurations/mcord/postgresql/Dockerfile
new file mode 100644
index 0000000..4d4ebfd
--- /dev/null
+++ b/xos/configurations/mcord/postgresql/Dockerfile
@@ -0,0 +1,35 @@
+FROM ubuntu
+
+RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8
+
+RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y --force-yes\
+    python-software-properties \
+    software-properties-common \
+    postgresql-9.3 \
+    postgresql-client-9.3 \
+    postgresql-contrib-9.3
+
+# Workaround for AUFS issue
+# https://github.com/docker/docker/issues/783#issuecomment-56013588
+RUN mkdir /etc/ssl/private-copy; mv /etc/ssl/private/* /etc/ssl/private-copy/; rm -r /etc/ssl/private; mv /etc/ssl/private-copy /etc/ssl/private; chmod -R 0700 /etc/ssl/private; chown -R postgres /etc/ssl/private
+
+USER postgres
+
+RUN /etc/init.d/postgresql start && \
+    psql --command "ALTER USER postgres WITH SUPERUSER PASSWORD 'password' " && \
+    psql --command "CREATE DATABASE xos"
+
+# Allow remote connections. 
+RUN echo "host all  all    0.0.0.0/0  md5" >> /etc/postgresql/9.3/main/pg_hba.conf
+RUN echo "host all  all    0.0.0.0/0  password" >> /etc/postgresql/9.3/main/pg_hba.conf
+
+RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf
+
+# Expose the PostgreSQL port
+EXPOSE 5432
+
+VOLUME  ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"]
+
+CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"]
diff --git a/xos/configurations/mcord/postgresql/Makefile b/xos/configurations/mcord/postgresql/Makefile
new file mode 100644
index 0000000..c50923e
--- /dev/null
+++ b/xos/configurations/mcord/postgresql/Makefile
@@ -0,0 +1,25 @@
+IMAGE_NAME:=xosproject/xos-postgres
+CONTAINER_NAME:=xos-db-postgres
+NO_DOCKER_CACHE?=false
+
+.PHONY: build
+build: ; docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} .
+
+.PHONY: run
+run: ; docker run -d -p 5432:5432 --name ${CONTAINER_NAME} ${IMAGE_NAME}
+
+.PHONY: stop
+stop: ; docker stop ${CONTAINER_NAME}
+
+.PHONY: rm
+rm: ; docker rm ${CONTAINER_NAME}
+
+.PHONE: rmi
+rmi: ; docker rmi ${IMAGE_NAME}
+
+.PHONY: backup
+backupvol: ; docker run --volumes-from ${CONTAINER_NAME} -v /backup:/backup postgres tar cvf /backup/backup-postgres.tar /var/lib/postgresql
+
+.PHONY: restore
+restorevol: ; docker run --volumes-from ${CONTAINER_NAME} -v /backup:/backup postgres cd /var/lib/postgresql && tar xvf /backup/backup-postgres.tar
+
diff --git a/xos/configurations/mcord/setup.yaml b/xos/configurations/mcord/setup.yaml
new file mode 100644
index 0000000..4f2da15
--- /dev/null
+++ b/xos/configurations/mcord/setup.yaml
@@ -0,0 +1,73 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+
+#    m1.xlarge:
+#      type: tosca.nodes.Flavor
+
+
+    MyOpenStack:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: MyDeployment
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Kilo
+          auth_url: { get_script_env: [ SELF, adminrc, OS_AUTH_URL, LOCAL_FILE] }
+          admin_user: { get_script_env: [ SELF, adminrc, OS_USERNAME, LOCAL_FILE] }
+          admin_password: { get_script_env: [ SELF, adminrc, OS_PASSWORD, LOCAL_FILE] }
+          admin_tenant: { get_script_env: [ SELF, adminrc, OS_TENANT_NAME, LOCAL_FILE] }
+          domain: Default
+      artifacts:
+          adminrc: /root/setup/admin-openrc.sh
+
+    mysite:
+      type: tosca.nodes.Site
+      properties:
+          display_name: MySite
+          site_url: http://xosproject.org/
+      requirements:
+          - deployment:
+               node: MyDeployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: MyOpenStack
+                       relationship: tosca.relationships.UsesController
+    Topology:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosMcordTopology
+
+    # This user already exists in XOS with this password
+    # It's an example of how to create new users
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: XOS
+          lastname: admin
+          password: letmein
+      requirements:
+          - mcord_dashboard:
+              node: Topology
+              relationship: tosca.relationships.UsesDashboard
diff --git a/xos/configurations/mcord/synchronizer/Dockerfile b/xos/configurations/mcord/synchronizer/Dockerfile
new file mode 100644
index 0000000..011e8dd
--- /dev/null
+++ b/xos/configurations/mcord/synchronizer/Dockerfile
@@ -0,0 +1,48 @@
+FROM       xosproject/xos
+
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \
+    openssh-client \
+    python-crypto \
+    python-jinja2 \
+    python-paramiko \
+    python-yaml \
+    python-httplib2 \
+    rsync \
+    supervisor
+
+RUN pip install -U \
+    jinja2
+
+# Install custom Ansible
+RUN \
+    git clone -b release1.8.2 git://github.com/ansible/ansible.git /opt/ansible && \
+    git clone -b release1.8.2 git://github.com/ansible/ansible-modules-extras.git /opt/ansible/lib/ansible/modules/extras && \
+    git clone -b release1.8.2 git://github.com/ansible/ansible-modules-extras.git /opt/ansible/v2/ansible/modules/extras && \
+    git clone git://github.com/sb98052/ansible-modules-core.git /opt/ansible/lib/ansible/modules/core && \
+    git clone git://github.com/sb98052/ansible-modules-core.git /opt/ansible/v2/ansible/modules/core && \
+    # git clone uses cached copy, doesn't pick up latest
+    git -C /opt/ansible pull && \
+    git -C /opt/ansible/lib/ansible/modules/core pull && \
+    git -C /opt/ansible/v2/ansible/modules/core pull
+
+
+# For Observer
+RUN mkdir -p /usr/local/share /bin /etc/ansible
+
+COPY conf/ansible-hosts /etc/ansible/hosts
+
+ADD http://phantomjs.googlecode.com/files/phantomjs-1.7.0-linux-x86_64.tar.bz2 /usr/local/share/
+
+RUN git clone git://git.planet-lab.org/fofum.git /tmp/fofum && \
+    cd /tmp/fofum; python setup.py install && \
+    rm -rf /tmp/fofum && \
+    tar jxvf /usr/local/share/phantomjs-1.7.0-linux-x86_64.tar.bz2 -C /usr/local/share/ && \
+    rm -f /usr/local/share/phantomjs-1.7.0-linux-x86_64.tar.bz2 && \
+    ln -s /usr/local/share/phantomjs-1.7.0-linux-x86_64 /usr/local/share/phantomjs && \
+    ln -s /usr/local/share/phantomjs/bin/phantomjs /bin/phantomjs
+
+
+# Supervisor
+COPY conf/synchronizer.conf /etc/supervisor/conf.d/
+
+CMD update-ca-certificates && /usr/bin/supervisord -c /etc/supervisor/conf.d/synchronizer.conf
diff --git a/xos/configurations/mcord/synchronizer/Makefile b/xos/configurations/mcord/synchronizer/Makefile
new file mode 100644
index 0000000..352616a
--- /dev/null
+++ b/xos/configurations/mcord/synchronizer/Makefile
@@ -0,0 +1,15 @@
+IMAGE_NAME:=xosproject/xos-synchronizer-openstack
+CONTAINER_NAME:=xos-synchronizer
+NO_DOCKER_CACHE?=false
+
+.PHONY: build
+build: ; sudo docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} .
+
+.PHONY: run
+run: ; sudo docker run -d --name ${CONTAINER_NAME} -v /usr/local/share/ca-certificates:/usr/local/share/ca-certificates:ro ${IMAGE_NAME}
+
+.PHONY: stop
+stop: ; sudo docker stop ${CONTAINER_NAME}
+
+.PHONY: rm
+rm: ; sudo docker rm ${CONTAINER_NAME}
diff --git a/xos/configurations/mcord/synchronizer/conf/ansible-hosts b/xos/configurations/mcord/synchronizer/conf/ansible-hosts
new file mode 100644
index 0000000..0dd74f1
--- /dev/null
+++ b/xos/configurations/mcord/synchronizer/conf/ansible-hosts
@@ -0,0 +1,2 @@
+[localhost]
+127.0.0.1
diff --git a/xos/configurations/mcord/synchronizer/conf/synchronizer.conf b/xos/configurations/mcord/synchronizer/conf/synchronizer.conf
new file mode 100644
index 0000000..2131a25
--- /dev/null
+++ b/xos/configurations/mcord/synchronizer/conf/synchronizer.conf
@@ -0,0 +1,9 @@
+[supervisord]
+logfile=/var/log/supervisord.log ; (main log file;default $CWD/supervisord.log)
+pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
+nodaemon=true
+
+[program:synchronizer]
+command=python /opt/xos/synchronizers/openstack/xos-synchronizer.py
+stderr_logfile=/var/log/supervisor/synchronizer.err.log
+stdout_logfile=/var/log/supervisor/synchronizer.out.log
diff --git a/xos/configurations/mcord/tosca_tmp/setup.yaml.new b/xos/configurations/mcord/tosca_tmp/setup.yaml.new
new file mode 100644
index 0000000..3c7bf8b
--- /dev/null
+++ b/xos/configurations/mcord/tosca_tmp/setup.yaml.new
@@ -0,0 +1,212 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    Stanford_Deployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+
+    computeBBU1:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: Stanford_University
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: Stanford_Deployment
+            relationship: tosca.relationships.MemberOfDeployment
+
+    computeBBU2:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: Stanford_University
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: Stanford_Deployment
+            relationship: tosca.relationships.MemberOfDeployment
+
+    McordOpenStack:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: Stanford_Deployment
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Kilo
+          auth_url: { get_script_env: [ SELF, adminrc, OS_AUTH_URL, LOCAL_FILE] }
+          admin_user: { get_script_env: [ SELF, adminrc, OS_USERNAME, LOCAL_FILE] }
+          admin_password: { get_script_env: [ SELF, adminrc, OS_PASSWORD, LOCAL_FILE] }
+          admin_tenant: { get_script_env: [ SELF, adminrc, OS_TENANT_NAME, LOCAL_FILE] }
+          domain: Default
+      artifacts:
+          adminrc: /root/setup/admin-openrc.sh
+
+    Stanford_University:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Stanford University
+          site_url: http://xosproject.org/
+      requirements:
+          - deployment:
+               node: Stanford_Deployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: McordOpenStack
+                       relationship: tosca.relationships.UsesController
+
+    # This user already exists in XOS with this password
+    # It's an example of how to create new users
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: Stanford_University
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: XOS
+          lastname: admin
+          password: letmein
+
+    johndoe@stanford.us:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: Stanford_University
+              relationship: tosca.relationships.MemberOfSite
+    
+    # A subscriber
+    Stanford:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@stanford.us
+              relationship: tosca.relationships.AdminPrivilege
+
+    Barbera Lapinski:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Norbert Shumway:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Fay Muldoon:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Janene Earnest:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    RRU:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vBBU
+          icon_url: /static/mCordServices/service_rru.png
+          kind: RAN
+    vBBU:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vBBU
+          icon_url: /static/mCordServices/service_server.png
+          kind: RAN
+
+    eSON:
+      type: tosca.nodes.Service
+      properties:
+          icon_url: /static/mCordServices/service_server.png
+          view_url: http://www.google.com
+          kind: RAN
+
+    # EPC
+    vMME:
+      type: tosca.nodes.Service
+      properties:
+          icon_url: /static/mCordServices/service_server.png
+          view_url: /mcord/?service=vMME
+          kind: vEPC
+
+    vSGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vSGW
+          icon_url: /static/mCordServices/service_server.png
+          kind: vEPC
+
+    vPGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vPGW
+          icon_url: /static/mCordServices/service_server.png
+          kind: vEPC
+
+    # EDGE
+    Cache:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Cache
+          icon_url: /static/mCordServices/service_cache.png
+          kind: EDGE
+
+    Firewall:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Firewall
+          icon_url: /static/mCordServices/service_firewall.png
+          kind: EDGE
+
+    Video Optimization:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Video%20Optimization
+          icon_url: /static/mCordServices/service_video.png
+          kind: EDGE
+
+
diff --git a/xos/configurations/mcord/tosca_tmp/setup.yaml.old b/xos/configurations/mcord/tosca_tmp/setup.yaml.old
new file mode 100644
index 0000000..c13f0eb
--- /dev/null
+++ b/xos/configurations/mcord/tosca_tmp/setup.yaml.old
@@ -0,0 +1,61 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    MyDeployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+
+    MyOpenStack:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: MyDeployment
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Kilo
+          auth_url: { get_script_env: [ SELF, adminrc, OS_AUTH_URL, LOCAL_FILE] }
+          admin_user: { get_script_env: [ SELF, adminrc, OS_USERNAME, LOCAL_FILE] }
+          admin_password: { get_script_env: [ SELF, adminrc, OS_PASSWORD, LOCAL_FILE] }
+          admin_tenant: { get_script_env: [ SELF, adminrc, OS_TENANT_NAME, LOCAL_FILE] }
+          domain: Default
+      artifacts:
+          adminrc: /root/setup/admin-openrc.sh
+
+    mysite:
+      type: tosca.nodes.Site
+      properties:
+          display_name: MySite
+          site_url: http://xosproject.org/
+      requirements:
+          - deployment:
+               node: MyDeployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: MyOpenStack
+                       relationship: tosca.relationships.UsesController
+
+    # This user already exists in XOS with this password
+    # It's an example of how to create new users
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: XOS
+          lastname: admin
+          password: letmein
diff --git a/xos/configurations/mcord/tosca_tmp/tmp.yaml b/xos/configurations/mcord/tosca_tmp/tmp.yaml
new file mode 100644
index 0000000..32f2a12
--- /dev/null
+++ b/xos/configurations/mcord/tosca_tmp/tmp.yaml
@@ -0,0 +1,130 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+    johndoe@stanford.us:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: mysite
+              relationship: tosca.relationships.MemberOfSite
+    
+    # A subscriber
+    Stanford:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@stanford.us
+              relationship: tosca.relationships.AdminPrivilege
+
+    Barbera Lapinski:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Norbert Shumway:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Fay Muldoon:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Janene Earnest:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    RRU:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vBBU
+          icon_url: /static/mCordServices/service_rru.png
+          kind: RAN
+
+    eSON:
+      type: tosca.nodes.Service
+      properties:
+          icon_url: /static/mCordServices/service_server.png
+          view_url: http://www.google.com
+          kind: RAN
+
+    # EPC
+    vMME:
+      type: tosca.nodes.Service
+      properties:
+          icon_url: /static/mCordServices/service_server.png
+          view_url: /mcord/?service=vMME
+          kind: vEPC
+
+    vSGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vSGW
+          icon_url: /static/mCordServices/service_server.png
+          kind: vEPC
+
+    vPGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vPGW
+          icon_url: /static/mCordServices/service_server.png
+          kind: vEPC
+
+    # EDGE
+    Cache:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Cache
+          icon_url: /static/mCordServices/service_cache.png
+          kind: EDGE
+
+    Firewall:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Firewall
+          icon_url: /static/mCordServices/service_firewall.png
+          kind: EDGE
+
+    Video Optimization:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Video%20Optimization
+          icon_url: /static/mCordServices/service_video.png
+          kind: EDGE
diff --git a/xos/configurations/mcord/tosca_tmp/tmp2.yaml b/xos/configurations/mcord/tosca_tmp/tmp2.yaml
new file mode 100644
index 0000000..3c7bf8b
--- /dev/null
+++ b/xos/configurations/mcord/tosca_tmp/tmp2.yaml
@@ -0,0 +1,212 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: >
+    * Adds OpenCloud Sites, Deployments, and Controllers.
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    Stanford_Deployment:
+      type: tosca.nodes.Deployment
+      properties:
+          flavors: m1.large, m1.medium, m1.small
+
+    computeBBU1:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: Stanford_University
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: Stanford_Deployment
+            relationship: tosca.relationships.MemberOfDeployment
+
+    computeBBU2:
+      type: tosca.nodes.Node
+      requirements:
+        - site:
+            node: Stanford_University
+            relationship: tosca.relationships.MemberOfSite
+        - deployment:
+            node: Stanford_Deployment
+            relationship: tosca.relationships.MemberOfDeployment
+
+    McordOpenStack:
+      type: tosca.nodes.Controller
+      requirements:
+          - deployment:
+              node: Stanford_Deployment
+              relationship: tosca.relationships.ControllerDeployment
+      properties:
+          backend_type: OpenStack
+          version: Kilo
+          auth_url: { get_script_env: [ SELF, adminrc, OS_AUTH_URL, LOCAL_FILE] }
+          admin_user: { get_script_env: [ SELF, adminrc, OS_USERNAME, LOCAL_FILE] }
+          admin_password: { get_script_env: [ SELF, adminrc, OS_PASSWORD, LOCAL_FILE] }
+          admin_tenant: { get_script_env: [ SELF, adminrc, OS_TENANT_NAME, LOCAL_FILE] }
+          domain: Default
+      artifacts:
+          adminrc: /root/setup/admin-openrc.sh
+
+    Stanford_University:
+      type: tosca.nodes.Site
+      properties:
+          display_name: Stanford University
+          site_url: http://xosproject.org/
+      requirements:
+          - deployment:
+               node: Stanford_Deployment
+               relationship: tosca.relationships.SiteDeployment
+               requirements:
+                   - controller:
+                       node: McordOpenStack
+                       relationship: tosca.relationships.UsesController
+
+    # This user already exists in XOS with this password
+    # It's an example of how to create new users
+    padmin@vicci.org:
+      type: tosca.nodes.User
+      requirements:
+          - site:
+              node: Stanford_University
+              relationship: tosca.relationships.MemberOfSite
+      properties:
+          is_admin: true
+          is_active: true
+          firstname: XOS
+          lastname: admin
+          password: letmein
+
+    johndoe@stanford.us:
+      type: tosca.nodes.User
+      properties:
+          password: letmein
+          firstname: john
+          lastname: doe
+      requirements:
+          - site:
+              node: Stanford_University
+              relationship: tosca.relationships.MemberOfSite
+    
+    # A subscriber
+    Stanford:
+       type: tosca.nodes.CORDSubscriber
+       properties:
+           service_specific_id: 123
+           firewall_enable: false
+           cdn_enable: false
+           url_filter_enable: false
+           url_filter_level: R
+       requirements:
+          - house_admin:
+              node: johndoe@stanford.us
+              relationship: tosca.relationships.AdminPrivilege
+
+    Barbera Lapinski:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 01:02:03:04:05:06
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Norbert Shumway:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 90:E2:BA:82:F9:75
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Fay Muldoon:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 68:5B:35:9D:91:D5
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    Janene Earnest:
+       type: tosca.nodes.CORDUser
+       properties:
+           mac: 34:36:3B:C9:B6:A6
+           level: PG_13
+       requirements:
+           - household:
+               node: Stanford
+               relationship: tosca.relationships.SubscriberDevice
+
+    RRU:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vBBU
+          icon_url: /static/mCordServices/service_rru.png
+          kind: RAN
+    vBBU:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vBBU
+          icon_url: /static/mCordServices/service_server.png
+          kind: RAN
+
+    eSON:
+      type: tosca.nodes.Service
+      properties:
+          icon_url: /static/mCordServices/service_server.png
+          view_url: http://www.google.com
+          kind: RAN
+
+    # EPC
+    vMME:
+      type: tosca.nodes.Service
+      properties:
+          icon_url: /static/mCordServices/service_server.png
+          view_url: /mcord/?service=vMME
+          kind: vEPC
+
+    vSGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vSGW
+          icon_url: /static/mCordServices/service_server.png
+          kind: vEPC
+
+    vPGW:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=vPGW
+          icon_url: /static/mCordServices/service_server.png
+          kind: vEPC
+
+    # EDGE
+    Cache:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Cache
+          icon_url: /static/mCordServices/service_cache.png
+          kind: EDGE
+
+    Firewall:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Firewall
+          icon_url: /static/mCordServices/service_firewall.png
+          kind: EDGE
+
+    Video Optimization:
+      type: tosca.nodes.Service
+      properties:
+          view_url: /mcord/?service=Video%20Optimization
+          icon_url: /static/mCordServices/service_video.png
+          kind: EDGE
+
+
diff --git a/xos/configurations/mcord/xos/Dockerfile b/xos/configurations/mcord/xos/Dockerfile
new file mode 100644
index 0000000..f65eb37
--- /dev/null
+++ b/xos/configurations/mcord/xos/Dockerfile
@@ -0,0 +1,94 @@
+FROM       ubuntu:14.04.3
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \
+    curl \
+    gcc \
+    geoip-database \
+    git \
+    graphviz \
+    graphviz-dev \
+    libgeoip1 \
+    libxslt1.1 \
+    libxslt1-dev \
+    libyaml-dev \
+    m4 \
+    pkg-config \
+    python-dev \
+    python-httplib2 \
+    python-pip \
+    python-psycopg2 \
+    python-pycurl \
+    python-setuptools \
+    tar \
+    wget \
+##### observer dependencies
+    python-keystoneclient \
+    python-novaclient \
+    python-neutronclient \
+    python-glanceclient \
+    python-ceilometerclient
+
+RUN pip install \
+    django==1.7 \
+    django-bitfield \
+    django-crispy-forms \
+    django-encrypted-fields \
+    django-extensions \
+    django-filter==0.11.0 \
+    django-geoposition \
+    django-ipware \
+    django_rest_swagger \
+    django-suit==0.3a1 \
+    django-timezones \
+    djangorestframework==2.4.4 \
+    dnslib \
+    lxml \
+    markdown \
+    netaddr \
+    pyOpenSSL \
+    psycopg2 \
+    python-ceilometerclient \
+    python-dateutil \
+    python-keyczar \
+    pygraphviz \
+    pytz \
+    pyyaml \
+    requests
+
+RUN easy_install --upgrade httplib2
+
+RUN easy_install \
+    python_gflags \
+    google_api_python_client \
+    httplib2.ca_certs_locater
+
+ADD http://code.jquery.com/jquery-1.9.1.min.js /usr/local/lib/python2.7/dist-packages/suit/static/suit/js/
+
+# Install XOS
+RUN git clone git://github.com/open-cloud/xos.git /tmp/xos && \
+    mv /tmp/xos/xos /opt/ && \
+    chmod +x /opt/xos/tools/xos-manage && \
+    /opt/xos/tools/xos-manage genkeys
+
+# install Tosca engine
+RUN chmod +x /opt/xos/tosca/run.py
+RUN bash /opt/xos/tosca/install_tosca.sh
+
+EXPOSE 8000
+
+# Set environment variables.
+ENV HOME /root
+
+# Define working directory.
+WORKDIR /opt/xos
+
+# Define default command.
+CMD update-ca-certificates && python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
diff --git a/xos/configurations/mcord/xos/Dockerfile.devel b/xos/configurations/mcord/xos/Dockerfile.devel
new file mode 100644
index 0000000..e0d0c6d
--- /dev/null
+++ b/xos/configurations/mcord/xos/Dockerfile.devel
@@ -0,0 +1,94 @@
+FROM       ubuntu:14.04.3
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \
+    curl \
+    gcc \
+    geoip-database \
+    git \
+    graphviz \
+    graphviz-dev \
+    libgeoip1 \
+    libxslt1.1 \
+    libxslt1-dev \
+    libyaml-dev \
+    m4 \
+    pkg-config \
+    python-dev \
+    python-httplib2 \
+    python-pip \
+    python-psycopg2 \
+    python-pycurl \
+    python-setuptools \
+    tar \
+    wget \
+##### observer dependencies
+    python-keystoneclient \
+    python-novaclient \
+    python-neutronclient \
+    python-glanceclient \
+    python-ceilometerclient
+
+RUN pip install \
+    django==1.7 \
+    django-bitfield \
+    django-crispy-forms \
+    django-encrypted-fields \
+    django-extensions \
+    django-filter==0.11.0 \
+    django-geoposition \
+    django-ipware \
+    django_rest_swagger \
+    django-suit==0.3a1 \
+    django-timezones \
+    djangorestframework==2.4.4 \
+    dnslib \
+    lxml \
+    markdown \
+    netaddr \
+    pyOpenSSL \
+    psycopg2 \
+    python-ceilometerclient \
+    python-dateutil \
+    python-keyczar \
+    pygraphviz \
+    pytz \
+    pyyaml \
+    requests
+
+RUN easy_install --upgrade httplib2
+
+RUN easy_install \
+    python_gflags \
+    google_api_python_client \
+    httplib2.ca_certs_locater
+
+ADD http://code.jquery.com/jquery-1.9.1.min.js /usr/local/lib/python2.7/dist-packages/suit/static/suit/js/
+
+# Install XOS
+ADD xos /opt/xos
+RUN chmod +x /opt/xos/tools/xos-manage
+RUN /opt/xos/tools/xos-manage genkeys
+
+# install Tosca engine
+RUN chmod +x /opt/xos/tosca/run.py
+RUN bash /opt/xos/tosca/install_tosca.sh
+
+EXPOSE 8000
+
+# Set environment variables.
+ENV HOME /root
+
+# Define working directory.
+WORKDIR /opt/xos
+
+# RUN python /opt/xos/manage.py makemigrations mcordservice 
+# Define default command.
+CMD update-ca-certificates && python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
diff --git a/xos/configurations/mcord/xos/Dockerfile.templ b/xos/configurations/mcord/xos/Dockerfile.templ
new file mode 100644
index 0000000..25270a6
--- /dev/null
+++ b/xos/configurations/mcord/xos/Dockerfile.templ
@@ -0,0 +1,88 @@
+FROM       ubuntu:14.04.3
+
+# XXX Workaround for docker bug:
+# https://github.com/docker/docker/issues/6345
+# Kernel 3.15 breaks docker, uss the line below as a workaround
+# until there is a fix
+RUN ln -s -f /bin/true /usr/bin/chfn
+# XXX End workaround
+
+# Install.
+RUN DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -y \
+    curl \
+    gcc \
+    geoip-database \
+    git \
+    graphviz \
+    graphviz-dev \
+    libgeoip1 \
+    libxslt1.1 \
+    libxslt1-dev \
+    libyaml-dev \
+    m4 \
+    pkg-config \
+    python-dev \
+    python-httplib2 \
+    python-pip \
+    python-psycopg2 \
+    python-pycurl \
+    python-setuptools \
+    tar \
+    wget \
+##### observer dependencies
+    python-keystoneclient \
+    python-novaclient \
+    python-neutronclient \
+    python-glanceclient \
+    python-ceilometerclient
+
+RUN pip install -U \
+    django==1.7 \
+    django-bitfield \
+    django-crispy-forms \
+    django-encrypted-fields \
+    django_evolution \
+    django-extensions \
+    django-filter==0.11.0 \
+    django-geoposition \
+    django-ipware \
+    django_rest_swagger \
+    django-suit==0.3a1 \
+    django-timezones \
+    djangorestframework==2.4.4 \
+    dnslib \
+    google_api_python_client \
+    httplib2 \
+    httplib2.ca_certs_locater \
+    lxml \
+    markdown \
+    netaddr \
+    python-dateutil \
+    python_gflags \
+    python-keyczar \
+    pygraphviz \
+    pytz \
+    pyyaml \
+    requests
+
+ADD http://code.jquery.com/jquery-1.9.1.min.js /usr/local/lib/python2.7/dist-packages/suit/static/suit/js/
+
+# Install XOS
+RUN git clone XOS_GIT_REPO -b XOS_GIT_BRANCH /tmp/xos && \
+    mv /tmp/xos/xos /opt/ && \
+    chmod +x /opt/xos/tools/xos-manage && \
+    /opt/xos/tools/xos-manage genkeys
+
+# install Tosca engine
+RUN bash /opt/xos/tosca/install_tosca.sh
+
+EXPOSE 8000
+
+# Set environment variables.
+ENV HOME /root
+
+# Define working directory.
+WORKDIR /root
+
+# Define default command.
+CMD update-ca-certificates && python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure --makemigrations
diff --git a/xos/configurations/mcord/xos/Makefile b/xos/configurations/mcord/xos/Makefile
new file mode 100644
index 0000000..0ba043d
--- /dev/null
+++ b/xos/configurations/mcord/xos/Makefile
@@ -0,0 +1,27 @@
+CONTAINER_NAME:=xos-server
+IMAGE_NAME:=xosproject/xos
+TOSCA_CONFIG_PATH:=/opt/xos/configurations/opencloud/opencloud.yaml
+XOS_GIT_REPO?=git://github.com/open-cloud/xos.git
+XOS_GIT_BRANCH?=master
+NO_DOCKER_CACHE?=false
+
+.PHONY: build
+build: ; sudo docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} .
+
+.PHONY: custom
+custom: ; cat Dockerfile.templ | sed -e "s|XOS_GIT_REPO|${XOS_GIT_REPO}|g" -e "s|XOS_GIT_BRANCH|${XOS_GIT_BRANCH}|g" | docker build --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} -
+
+.PHONY: devel
+devel: ; cd ../..; ls; sudo docker build -f containers/xos/Dockerfile.devel --no-cache=${NO_DOCKER_CACHE} --rm -t ${IMAGE_NAME} .
+
+.PHONY: run
+run: ; sudo docker run -d --name ${CONTAINER_NAME} -p 80:8000 -v /usr/local/share/ca-certificates:/usr/local/share/ca-certificates:ro ${IMAGE_NAME}
+
+.PHONY: runtosca
+runtosca: ; sudo docker exec -it ${CONTAINER_NAME} /usr/bin/python /opt/xos/tosca/run.py padmin@vicci.org ${TOSCA_CONFIG_PATH}
+
+.PHONY: stop
+stop: ; sudo docker stop ${CONTAINER_NAME}
+
+.PHONY: rm
+rm: ; sudo docker rm ${CONTAINER_NAME}
diff --git a/xos/configurations/mcord/xos/initdb b/xos/configurations/mcord/xos/initdb
new file mode 100755
index 0000000..1f5b770
--- /dev/null
+++ b/xos/configurations/mcord/xos/initdb
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+IMAGE_NAME=xosproject/xos
+CONTAINER_NAME=xos_build_helper_$$
+DB_HOST=$(wget http://ipinfo.io/ip -qO -)
+
+# configure db host
+docker run -it --name=$CONTAINER_NAME $IMAGE_NAME sed -i '0,/host/{s/host=localhost/host='$DB_HOST'/}' /opt/xos/xos_configuration/xos_common_config
+docker commit $CONTAINER_NAME $IMAGE_NAME
+docker rm $CONTAINER_NAME
+
+# init db schema
+docker run -it --name=$CONTAINER_NAME $IMAGE_NAME /opt/xos/tools/xos-manage makemigrations
+# run overrides the CMD specifed in the Dockerfile, so we re-set the CMD in the final commit"
+docker commit --change="CMD update-ca-certificates && python /opt/xos/manage.py runserver 0.0.0.0:8000 --insecure" $CONTAINER_NAME $IMAGE_NAME
+docker rm $CONTAINER_NAME
diff --git a/xos/core/models/network.py b/xos/core/models/network.py
index 8373814..c70e5a9 100644
--- a/xos/core/models/network.py
+++ b/xos/core/models/network.py
@@ -138,6 +138,8 @@
     name = models.CharField(max_length=32)
     template = models.ForeignKey(NetworkTemplate)
     subnet = models.CharField(max_length=32, blank=True)
+    start_ip = models.CharField(max_length=32, blank=True)
+    end_ip = models.CharField(max_length=32, blank=True)
     ports = models.CharField(max_length=1024, blank=True, null=True, validators=[ValidateNatList])
     labels = models.CharField(max_length=1024, blank=True, null=True)
     owner = models.ForeignKey(Slice, related_name="ownedNetworks", help_text="Slice that owns control of this Network")
@@ -203,6 +205,8 @@
     router_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum router id")
     subnet_id = models.CharField(null=True, blank=True, max_length=256, help_text="Quantum subnet id")
     subnet = models.CharField(max_length=32, blank=True)
+    start_ip = models.CharField(max_length=32, blank=True)
+    stop_ip = models.CharField(max_length=32, blank=True)
 
     class Meta:
         unique_together = ('network', 'controller')
diff --git a/xos/core/models/node.py b/xos/core/models/node.py
index d464532..f3ea303 100644
--- a/xos/core/models/node.py
+++ b/xos/core/models/node.py
@@ -2,7 +2,7 @@
 from django.db import models
 from core.models import PlCoreBase
 from core.models.plcorebase import StrippedCharField
-from core.models import Site, SiteDeployment, SitePrivilege
+from core.models.site import Site, SiteDeployment, SitePrivilege
 from core.models import Tag
 from django.contrib.contenttypes import generic
 
diff --git a/xos/core/models/service.py b/xos/core/models/service.py
index 641c5ae..97b0ab1 100644
--- a/xos/core/models/service.py
+++ b/xos/core/models/service.py
@@ -510,8 +510,14 @@
 
     def pick(self):
         from core.models import Node
-        nodes = Node.objects.all()
-
+#        nodes = Node.objects.all()
+#MCORD        
+        if not self.slice.default_node:
+            nodes = list(Node.objects.all())
+            nodes = sorted(nodes, key=lambda node: node.instances.all().count())
+        else:
+            nodes = list(Node.objects.filter(name = self.slice.default_node))
+#MCORD
         if self.label:
             nodes = nodes.filter(nodelabels__name=self.label)
 
@@ -703,6 +709,10 @@
             images = Image.objects.filter(name=image_name)
             if images:
                 return images[0]
+            else:
+                images = Image.objects.filter(name = slice.default_image)
+                if images:
+                    return images[0]
 
         raise XOSProgrammingError(
             "No VPCE image (looked for %s)" % str(look_for_images))
@@ -754,6 +764,7 @@
 
             if not instance:
                 slice = self.provider_service.slices.all()[0]
+                flavors = Flavor.objects.filter(name=slice.default_flavor) #MCORD
 
                 flavor = slice.default_flavor
                 if not flavor:
@@ -761,6 +772,8 @@
                     if not flavors:
                         raise XOSConfigurationError("No m1.small flavor")
                     flavor = flavors[0]
+#                default_flavor = slice.default_flavor #MCORD
+
 
                 if slice.default_isolation == "container_vm":
                     (node, parent) = ContainerVmScheduler(slice).pick()
diff --git a/xos/core/models/slice.py b/xos/core/models/slice.py
index a449691..aefe08c 100644
--- a/xos/core/models/slice.py
+++ b/xos/core/models/slice.py
@@ -12,6 +12,7 @@
 from django.contrib.contenttypes import generic
 from core.models import Service
 from core.models import Controller
+from core.models.node import Node
 from core.models import Flavor, Image
 from core.models.plcorebase import StrippedCharField
 from django.core.exceptions import PermissionDenied, ValidationError
@@ -40,6 +41,7 @@
     # for tenant view
     default_flavor = models.ForeignKey(Flavor, related_name = "slices", null=True, blank=True)
     default_image = models.ForeignKey(Image, related_name = "slices", null=True, blank=True);
+    default_node = models.ForeignKey(Node, related_name = "slices", null=True, blank=True)
     mount_data_sets = StrippedCharField(default="GenBank",null=True, blank=True, max_length=256)
 
     default_isolation = models.CharField(null=False, blank=False, max_length=30, choices=ISOLATION_CHOICES, default="vm")
diff --git a/xos/core/xoslib/dashboards/xosMcordTopology.html b/xos/core/xoslib/dashboards/xosMcordTopology.html
new file mode 100644
index 0000000..163abba
--- /dev/null
+++ b/xos/core/xoslib/dashboards/xosMcordTopology.html
@@ -0,0 +1,15 @@
+<!-- browserSync -->
+
+<!-- inject:css -->
+<link rel="stylesheet" href="/static/css/xosMcordTopology.css">
+<!-- endinject -->
+
+<div id="xosMcordTopology">
+    <div ui-view></div>
+</div>
+
+
+<!-- inject:js -->
+<script src="/static/js/vendor/xosMcordTopologyVendor.js"></script>
+<script src="/static/js/xosMcordTopology.js"></script>
+<!-- endinject -->
diff --git a/xos/services/mcord/__init__.py b/xos/services/mcord/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/services/mcord/__init__.py
diff --git a/xos/services/mcord/admin.py b/xos/services/mcord/admin.py
new file mode 100644
index 0000000..b496ef7
--- /dev/null
+++ b/xos/services/mcord/admin.py
@@ -0,0 +1,141 @@
+
+from core.admin import ReadOnlyAwareAdmin, SliceInline
+from core.middleware import get_request
+from core.models import User
+from django import forms
+from django.contrib import admin
+from services.mcord.models import MCORDService, VBBUComponent, MCORD_KIND
+
+# The class to provide an admin interface on the web for the service.
+# We do only configuration here and don't change any logic because the logic
+# is taken care of for us by ReadOnlyAwareAdmin
+class MCORDServiceAdmin(ReadOnlyAwareAdmin):
+    # We must set the model so that the admin knows what fields to use
+    model = MCORDService
+    verbose_name = "MCORD Service"
+    verbose_name_plural = "MCORD Services"
+
+    # Setting list_display creates columns on the admin page, each value here
+    # is a column, the column is populated for every instance of the model.
+    list_display = ("backend_status_icon", "name", "enabled")
+
+    # Used to indicate which values in the columns of the admin form are links.
+    list_display_links = ('backend_status_icon', 'name', )
+
+    # Denotes the sections of the form, the fields in the section, and the
+    # CSS classes used to style them. We represent this as a set of tuples, each
+    # tuple as a name (or None) and a set of fields and classes.
+    # Here the first section does not have a name so we use none. That first
+    # section has several fields indicated in the 'fields' attribute, and styled
+    # by the classes indicated in the 'classes' attribute. The classes given
+    # here are important for rendering the tabs on the form. To give the tabs
+    # we must assign the classes suit-tab and suit-tab-<name> where
+    # where <name> will be used later.
+    fieldsets = [(None, {'fields': ['backend_status_text', 'name', 'enabled',
+                                    'versionNumber', 'description', "view_url"],
+                         'classes':['suit-tab suit-tab-general']})]
+
+    # Denotes the fields that are readonly and cannot be changed.
+    readonly_fields = ('backend_status_text', )
+
+    # Inlines are used to denote other models that can be edited on the same
+    # form as this one. In this case the service form also allows changes
+    # to slices.
+    inlines = [SliceInline]
+
+    extracontext_registered_admins = True
+
+    # Denotes the fields that can be changed by an admin but not be all users
+    user_readonly_fields = ["name", "enabled", "versionNumber", "description"]
+
+    # Associates fieldsets from this form and from the inlines.
+    # The format here are tuples, of (<name>, tab title). <name> comes from the
+    # <name> in the fieldsets.
+    suit_form_tabs = (('general', 'MCORD Service Details'),
+                      ('administration', 'Components'),
+                      ('slices', 'Slices'),)
+
+    # Used to include a template for a tab. Here we include the
+    # helloworldserviceadmin template in the top position for the administration
+    # tab.
+    suit_form_includes = (('mcordadmin.html',
+                           'top',
+                           'administration'),)
+
+    # Used to get the objects for this model that are associated with the
+    # requesting user.
+    def queryset(self, request):
+        return MCORDService.get_service_objects_by_user(request.user)
+
+# Class to represent the form to add and edit tenants.
+# We need to define this instead of just using an admin like we did for the
+# service because tenants vary more than services and there isn't a common form.
+# This allows us to change the python behavior for the admin form to save extra
+# fields and control defaults.
+class VBBUComponentForm(forms.ModelForm):
+    # Defines a field for the creator of this service. It is a dropdown which
+    # is populated with all of the users.
+    creator = forms.ModelChoiceField(queryset=User.objects.all())
+    # Defines a text field for the display message, it is not required.
+    display_message = forms.CharField(required=False)
+
+    def __init__(self, *args, **kwargs):
+        super(VBBUComponentForm, self).__init__(*args, **kwargs)
+        # Set the kind field to readonly
+        self.fields['kind'].widget.attrs['readonly'] = True
+        # Define the logic for obtaining the objects for the provider_service
+        # dropdown of the tenant form.
+        self.fields[
+            'provider_service'].queryset = MCORDService.get_service_objects().all()
+        # Set the initial kind to HELLO_WORLD_KIND for this tenant.
+        self.fields['kind'].initial = MCORD_KIND
+        # If there is an instance of this model then we can set the initial
+        # form values to the existing values.
+        if self.instance:
+            self.fields['creator'].initial = self.instance.creator
+            self.fields[
+                'display_message'].initial = self.instance.display_message
+
+        # If there is not an instance then we need to set initial values.
+        if (not self.instance) or (not self.instance.pk):
+            self.fields['creator'].initial = get_request().user
+            if MCORDService.get_service_objects().exists():
+                self.fields["provider_service"].initial = MCORDService.get_service_objects().all()[0]
+
+    # This function describes what happens when the save button is pressed on
+    # the tenant form. In this case we set the values for the instance that were
+    # entered.
+    def save(self, commit=True):
+        self.instance.creator = self.cleaned_data.get("creator")
+        self.instance.display_message = self.cleaned_data.get(
+            "display_message")
+        return super(VBBUComponentForm, self).save(commit=commit)
+
+    class Meta:
+        model = VBBUComponent
+
+# Define the admin form for the tenant. This uses a similar structure as the
+# service but uses HelloWorldTenantCompleteForm to change the python behavior.
+
+
+class VBBUComponentAdmin(ReadOnlyAwareAdmin):
+    verbose_name = "vBBU Component"
+    verbose_name_plural = "vBBU Components"
+    list_display = ('id', 'backend_status_icon', 'instance', 'display_message')
+    list_display_links = ('backend_status_icon', 'instance', 'display_message',
+                          'id')
+    fieldsets = [(None, {'fields': ['backend_status_text', 'kind',
+                                    'provider_service', 'instance', 'creator',
+                                    'display_message'],
+                         'classes': ['suit-tab suit-tab-general']})]
+    readonly_fields = ('backend_status_text', 'instance',)
+    form = VBBUComponentForm
+
+    suit_form_tabs = (('general', 'Details'),)
+
+    def queryset(self, request):
+        return VBBUComponent.get_tenant_objects_by_user(request.user)
+
+# Associate the admin forms with the models.
+admin.site.register(MCORDService, MCORDServiceAdmin)
+admin.site.register(VBBUComponent, VBBUComponentAdmin)
diff --git a/xos/services/mcord/models.py b/xos/services/mcord/models.py
new file mode 100644
index 0000000..6e9fac5
--- /dev/null
+++ b/xos/services/mcord/models.py
@@ -0,0 +1,282 @@
+from django.db import models
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, AddressPool, Port
+from core.models.plcorebase import StrippedCharField
+import os
+from django.db import models, transaction
+from django.forms.models import model_to_dict
+from django.db.models import Q
+from operator import itemgetter, attrgetter, methodcaller
+from core.models import Tag
+from core.models.service import LeastLoadedNodeScheduler
+import traceback
+from xos.exceptions import *
+from core.models import SlicePrivilege, SitePrivilege
+from sets import Set
+from xos.config import Config
+
+MCORD_KIND = "MCORD"
+MCORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
+VBBU_KIND = "vBBU"
+VSGW_KIND = "vSGW"
+VPGW_KIND = "vPGW"
+net_types = ("s1u", "s1mme", "rru")
+# The class to represent the service. Most of the service logic is given for us
+# in the Service class but, we have some configuration that is specific for
+# this example.
+class MCORDService(Service):
+    KIND = MCORD_KIND
+
+    class Meta:
+        # When the proxy field is set to True the model is represented as
+        # it's superclass in the database, but we can still change the python
+        # behavior. In this case HelloWorldServiceComplete is a Service in the
+        # database.
+        proxy = True
+        # The name used to find this service, all directories are named this
+        app_label = "mcord"
+        verbose_name = "MCORD Service"
+
+# This is the class to represent the tenant. Most of the logic is given to use
+# in TenantWithContainer, however there is some configuration and logic that
+# we need to define for this example.
+class VBBUComponent(TenantWithContainer):
+
+    class Meta:
+        # Same as a above, HelloWorldTenantComplete is represented as a
+        # TenantWithContainer, but we change the python behavior.
+        proxy = True
+        verbose_name = "VBBU MCORD Service Component"
+
+    # The kind of the service is used on forms to differentiate this service
+    # from the other services.
+    KIND = VBBU_KIND
+
+    # Ansible requires that the sync_attributes field contain nat_ip and nat_mac
+    # these will be used to determine where to SSH to for ansible.
+    # Getters must be defined for every attribute specified here.
+    sync_attributes = ("s1u_ip", "s1u_mac",
+                       "s1mme_ip", "s1mme_mac",
+                       "rru_ip", "rru_mac")
+    # default_attributes is used cleanly indicate what the default values for
+    # the fields are.
+    default_attributes = {"display_message": "VBBU Component is ready!", "s1u_tag": "201", "s1mme_tag": "200", "rru_tag": "199"}
+    def __init__(self, *args, **kwargs):
+        mcord_services = MCORDService.get_service_objects().all()
+        # When the tenant is created the default service in the form is set
+        # to be the first created HelloWorldServiceComplete
+        if mcord_services:
+            self._meta.get_field(
+                "provider_service").default = mcord_services[0].id
+        super(VBBUComponent, self).__init__(*args, **kwargs)
+
+    def can_update(self, user):
+        #Allow creation of this model instances for non-admin users also
+        return True
+
+    def save(self, *args, **kwargs):
+        if not self.creator:
+            if not getattr(self, "caller", None):
+                # caller must be set when creating a monitoring channel since it creates a slice
+                raise XOSProgrammingError("ServiceComponents's self.caller was not set")
+            self.creator = self.caller
+            if not self.creator:
+                raise XOSProgrammingError("ServiceComponents's self.creator was not set")
+
+        super(VBBUComponent, self).save(*args, **kwargs)
+        # This call needs to happen so that an instance is created for this
+        # tenant is created in the slice. One instance is created per tenant.
+        model_policy_mcord_servicecomponent(self.pk)
+
+    def save_instance(self, instance):
+        with transaction.atomic():
+            super(VBBUComponent, self).save_instance(instance)
+            if instance.isolation in ["vm"]:
+                for ntype in net_types:
+                    lan_network = self.get_lan_network(instance, ntype)
+                    port = self.find_or_make_port(instance,lan_network)
+                    if (ntype == "s1u"):
+                        port.set_parameter("s_tag", self.s1u_tag)
+                        port.set_parameter("neutron_port_name", "stag-%s" % self.s1u_tag)
+                        port.save()
+                    elif (ntype == "s1mme"):
+                        port.set_parameter("s_tag", self.s1mme_tag)
+                        port.set_parameter("neutron_port_name", "stag-%s" % self.s1mme_tag)
+                        port.save()
+                    elif (ntype == "rru"):
+                        port.set_parameter("s_tag", self.rru_tag)
+                        port.set_parameter("neutron_port_name", "stag-%s" % self.rru_tag)
+                        port.save()
+    
+    def delete(self, *args, **kwargs):
+        # Delete the instance that was created for this tenant
+        self.cleanup_container()
+        super(VBBUComponent, self).delete(*args, **kwargs)
+
+    def find_or_make_port(self, instance, network, **kwargs):
+        port = Port.objects.filter(instance=instance, network=network)
+        if port:
+            port = port[0]
+            print "port already exist", port[0]
+        else:
+            port = Port(instance=instance, network=network, **kwargs)
+            print "NETWORK", network, "MAKE_PORT", port 
+            port.save()
+        return port
+
+    def get_lan_network(self, instance, ntype):
+        slice = self.provider_service.slices.all()[0]
+        lan_networks = [x for x in slice.networks.all() if ntype in x.name]
+        if not lan_networks:
+            raise XOSProgrammingError("No lan_network")
+        return lan_networks[0]
+
+    def manage_container(self):
+        from core.models import Instance, Flavor
+
+        if self.deleted:
+            return
+
+        # For container or container_vm isolation, use what TenantWithCotnainer
+        # provides us
+        slice = self.get_slice()
+        if slice.default_isolation in ["container_vm", "container"]:
+            super(VBBUComponent,self).manage_container()
+            return
+
+        if not self.s1u_tag:
+            raise XOSConfigurationError("S1U_TAG is missed")
+
+        if not self.s1mme_tag:
+            raise XOSConfigurationError("S1U_TAG is missed")
+
+        if not self.rru_tag:
+            raise XOSConfigurationError("S1U_TAG is missed")
+
+        if self.instance:
+            # We're good.
+            return
+
+        instance = self.make_instance()
+        self.instance = instance
+        super(TenantWithContainer, self).save()
+
+    def get_slice(self):
+        if not self.provider_service.slices.count():
+            raise XOSConfigurationError("The service has no slices")
+        slice = self.provider_service.slices.all()[0]
+        return slice
+
+    def make_instance(self):
+        slice = self.provider_service.slices.all()[0]            
+        flavors = Flavor.objects.filter(name=slice.default_flavor)
+        if not flavors:
+            raise XOSConfigurationError("No default flavor")
+        default_flavor = slice.default_flavor
+        slice = self.provider_service.slices.all()[0]
+        if slice.default_isolation == "container_vm":
+            (node, parent) = ContainerVmScheduler(slice).pick()
+        else:
+            (node, parent) = LeastLoadedNodeScheduler(slice).pick()
+        instance = Instance(slice = slice,
+                        node = node,
+                        image = self.image,
+                        creator = self.creator,
+                        deployment = node.site_deployment.deployment,
+                        flavor = flavors[0],
+                        isolation = slice.default_isolation,
+                        parent = parent)
+        self.save_instance(instance)
+        return instance
+
+    def ip_to_mac(self, ip):
+        (a, b, c, d) = ip.split('.')
+        return "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
+
+    # Getter for the message that will appear on the webpage
+    # By default it is "Hello World!"
+    @property
+    def display_message(self):
+        return self.get_attribute(
+            "display_message",
+            self.default_attributes['display_message'])
+
+    @display_message.setter
+    def display_message(self, value):
+        self.set_attribute("display_message", value)
+
+    @property
+    def s1u_tag(self):
+        return self.get_attribute(
+            "s1u_tag",
+            self.default_attributes['s1u_tag'])
+
+    @s1u_tag.setter
+    def s1u_tag(self, value):
+        self.set_attribute("s1u_tag", value)
+
+    @property
+    def s1mme_tag(self):
+        return self.get_attribute(
+            "s1mme_tag",
+            self.default_attributes['s1mme_tag'])
+
+    @s1mme_tag.setter
+    def s1mme_tag(self, value):
+        self.set_attribute("s1mme_tag", value)
+
+    @property
+    def rru_tag(self):
+        return self.get_attribute(
+            "rru_tag",
+            self.default_attributes['rru_tag'])
+
+    @rru_tag.setter
+    def rru_tag(self, value):
+        self.set_attribute("rru_tag", value)
+
+
+    @property
+    def addresses(self):
+        if (not self.id) or (not self.instance):
+            return {}
+
+        addresses = {}
+        for ns in self.instance.ports.all():
+            if "s1u" in ns.network.name.lower():
+                addresses["s1u"] = (ns.ip, ns.mac)
+            elif "s1mme" in ns.network.name.lower():
+                addresses["s1mme"] = (ns.ip, ns.mac)
+            elif "rru" in ns.network.name.lower():
+                addresses["rru"] = (ns.ip, ns.mac)
+        return addresses
+
+
+    @property
+    def s1u_ip(self):
+        return self.addresses.get("s1u", (None, None))[0]
+    @property
+    def s1u_mac(self):
+        return self.addresses.get("s1u", (None, None))[1]
+    @property
+    def s1mme_ip(self):
+        return self.addresses.get("s1mme", (None, None))[0]
+    @property
+    def s1mme_mac(self):
+        return self.addresses.get("s1mme", (None, None))[1]
+    @property
+    def rru_ip(self):
+        return self.addresses.get("rru", (None, None))[0]
+    @property
+    def rru_mac(self):
+        return self.addresses.get("rru", (None, None))[1]
+
+def model_policy_mcord_servicecomponent(pk):
+    # This section of code is atomic to prevent race conditions
+    with transaction.atomic():
+        # We find all of the tenants that are waiting to update
+        component = VBBUComponent.objects.select_for_update().filter(pk=pk)
+        if not component:
+            return
+        # Since this code is atomic it is safe to always use the first tenant
+        component = component[0]
+        component.manage_container()
diff --git a/xos/services/mcord/models.py.old b/xos/services/mcord/models.py.old
new file mode 100644
index 0000000..fc69548
--- /dev/null
+++ b/xos/services/mcord/models.py.old
@@ -0,0 +1,288 @@
+from django.db import models
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, AddressPool, Port
+from core.models.plcorebase import StrippedCharField
+import os
+from django.db import models, transaction
+from django.forms.models import model_to_dict
+from django.db.models import Q
+from operator import itemgetter, attrgetter, methodcaller
+from core.models import Tag
+from core.models.service import LeastLoadedNodeScheduler
+import traceback
+from xos.exceptions import *
+from core.models import SlicePrivilege, SitePrivilege
+from sets import Set
+from xos.config import Config
+
+MCORD_KIND = "MCORD"
+MCORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
+VBBU_KIND = "vBBU"
+VSGW_KIND = "vSGW"
+VPGW_KIND = "vPGW"
+# The class to represent the service. Most of the service logic is given for us
+# in the Service class but, we have some configuration that is specific for
+# this example.
+class MCORDService(Service):
+    KIND = MCORD_KIND
+
+    class Meta:
+        # When the proxy field is set to True the model is represented as
+        # it's superclass in the database, but we can still change the python
+        # behavior. In this case HelloWorldServiceComplete is a Service in the
+        # database.
+        proxy = True
+        # The name used to find this service, all directories are named this
+        app_label = "mcord"
+        verbose_name = "MCORD Service"
+
+# This is the class to represent the tenant. Most of the logic is given to use
+# in TenantWithContainer, however there is some configuration and logic that
+# we need to define for this example.
+class VBBUComponent(TenantWithContainer):
+
+    class Meta:
+        # Same as a above, HelloWorldTenantComplete is represented as a
+        # TenantWithContainer, but we change the python behavior.
+        proxy = True
+        verbose_name = "VBBU MCORD Service Component"
+
+    # The kind of the service is used on forms to differentiate this service
+    # from the other services.
+    KIND = VBBU_KIND
+
+    # Ansible requires that the sync_attributes field contain nat_ip and nat_mac
+    # these will be used to determine where to SSH to for ansible.
+    # Getters must be defined for every attribute specified here.
+    sync_attributes = ("s1u_ip", "s1u_mac",
+                       "s1mme_ip", "s1mme_mac",
+                       "rru_ip", "rru_mac")
+
+    # default_attributes is used cleanly indicate what the default values for
+    # the fields are.
+    default_attributes = {"display_message": "VBBU Component is ready!", "s1u_tag": "201", "s1mme_tag": "200", "rru_tag": "199"}
+    def __init__(self, *args, **kwargs):
+        mcord_services = MCORDService.get_service_objects().all()
+        # When the tenant is created the default service in the form is set
+        # to be the first created HelloWorldServiceComplete
+        if mcord_services:
+            self._meta.get_field(
+                "provider_service").default = mcord_services[0].id
+        super(VBBUComponent, self).__init__(*args, **kwargs)
+
+    def can_update(self, user):
+        #Allow creation of this model instances for non-admin users also
+        return True
+
+    def save(self, *args, **kwargs):
+        if not self.creator:
+            if not getattr(self, "caller", None):
+                # caller must be set when creating a monitoring channel since it creates a slice
+                raise XOSProgrammingError("ServiceComponents's self.caller was not set")
+            self.creator = self.caller
+            if not self.creator:
+                raise XOSProgrammingError("ServiceComponents's self.creator was not set")
+
+        super(VBBUComponent, self).save(*args, **kwargs)
+        # This call needs to happen so that an instance is created for this
+        # tenant is created in the slice. One instance is created per tenant.
+        model_policy_mcord_servicecomponent(self.pk)
+
+    def save_instance(self, instance):
+        with transaction.atomic():
+            super(VBBUComponent, self).save_instance(instance)
+            if instance.isolation in ["vm"]:
+                ntypes = ["s1u", "s1mme", "rru"]
+                for i in range(len(ntypes)): 
+                    ntype = ntypes[i]
+                    lan_network = self.get_lan_network(instance, ntype)
+#                    port = self.find_or_make_port(instance, lan_network)
+                    if ntype == "s1u":
+                        print "S1U_TAG", self.s1u_tag
+                        s1uport = self.find_or_make_port(instance, lan_network)
+                        s1uport.set_parameter("s_tag", self.s1u_tag)
+                        s1uport.set_parameter("neutron_port_name", "stag-%s" % self.s1u_tag)
+                        s1uport.save()
+                    elif ntype == "s1mme":
+                        print "MME_TAG", self.s1mme_tag
+                        mmeport = self.find_or_make_port(instance, lan_network)
+                        mmeport.set_parameter("s_tag", self.s1mme_tag)
+                        mmeport.set_parameter("neutron_port_name", "stag-%s" % self.s1mme_tag)
+                        mmeport.save()
+                    elif ntype == "rru":
+                        print "RRU_TAG", self.rru_tag
+                        rruport = self.find_or_make_port(instance, lan_network)
+                        rruport.set_parameter("s_tag", self.rru_tag)
+                        rruport.set_parameter("neutron_port_name", "stag-%s" % self.rru_tag)
+                        rruport.save()
+ 
+    def delete(self, *args, **kwargs):
+        # Delete the instance that was created for this tenant
+        self.cleanup_container()
+        super(VBBUComponent, self).delete(*args, **kwargs)
+
+    def find_or_make_port(self, instance, network, **kwargs):
+        port = Port.objects.filter(instance=instance, network=network)
+        if port:
+            port = port[0]
+        else:
+            port = Port(instance=instance, network=network, **kwargs)
+            port.save()
+        return port
+
+    def get_lan_network(self, instance, ntype):
+        slice = self.provider_service.slices.all()[0]
+        lan_networks = [x for x in slice.networks.all() if ntype in x.name]
+        if not lan_networks:
+            raise XOSProgrammingError("No lan_network")
+        return lan_networks[0]
+
+    def manage_container(self):
+        from core.models import Instance, Flavor
+
+        if self.deleted:
+            return
+
+        # For container or container_vm isolation, use what TenantWithCotnainer
+        # provides us
+        slice = self.get_slice()
+        if slice.default_isolation in ["container_vm", "container"]:
+            super(VBBUComponent,self).manage_container()
+            return
+
+        if not self.s1u_tag:
+            raise XOSConfigurationError("S1U_TAG is missed")
+
+        if not self.s1mme_tag:
+            raise XOSConfigurationError("S1U_TAG is missed")
+
+        if not self.rru_tag:
+            raise XOSConfigurationError("S1U_TAG is missed")
+
+        if self.instance:
+            # We're good.
+            return
+
+        instance = self.make_instance_for_s_tag(self.s1u_tag)
+        self.instance = instance
+        super(TenantWithContainer, self).save()
+
+    def get_slice(self):
+        if not self.provider_service.slices.count():
+            raise XOSConfigurationError("The service has no slices")
+        slice = self.provider_service.slices.all()[0]
+        return slice
+
+    def make_instance_for_s_tag(self, s_tag):
+        slice = self.provider_service.slices.all()[0]            
+        flavors = Flavor.objects.filter(name=slice.default_flavor)
+        if not flavors:
+            raise XOSConfigurationError("No default flavor")
+        default_flavor = slice.default_flavor
+        slice = self.provider_service.slices.all()[0]
+        if slice.default_isolation == "container_vm":
+            (node, parent) = ContainerVmScheduler(slice).pick()
+        else:
+            (node, parent) = LeastLoadedNodeScheduler(slice).pick()
+        instance = Instance(slice = slice,
+                        node = node,
+                        image = self.image,
+                        creator = self.creator,
+                        deployment = node.site_deployment.deployment,
+                        flavor = flavors[0],
+                        isolation = slice.default_isolation,
+                        parent = parent)
+        self.save_instance(instance)
+        return instance
+
+    def ip_to_mac(self, ip):
+        (a, b, c, d) = ip.split('.')
+        return "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
+
+    # Getter for the message that will appear on the webpage
+    # By default it is "Hello World!"
+    @property
+    def display_message(self):
+        return self.get_attribute(
+            "display_message",
+            self.default_attributes['display_message'])
+
+    @display_message.setter
+    def display_message(self, value):
+        self.set_attribute("display_message", value)
+
+    @property
+    def s1u_tag(self):
+        return self.get_attribute(
+            "s1u_tag",
+            self.default_attributes['s1u_tag'])
+
+    @s1u_tag.setter
+    def s1u_tag(self, value):
+        self.set_attribute("s1u_tag", value)
+
+    @property
+    def s1mme_tag(self):
+        return self.get_attribute(
+            "s1mme_tag",
+            self.default_attributes['s1mme_tag'])
+
+    @s1mme_tag.setter
+    def s1mme_tag(self, value):
+        self.set_attribute("s1mme_tag", value)
+
+    @property
+    def rru_tag(self):
+        return self.get_attribute(
+            "rru_tag",
+            self.default_attributes['rru_tag'])
+
+    @rru_tag.setter
+    def rru_tag(self, value):
+        self.set_attribute("rru_tag", value)
+
+
+    @property
+    def addresses(self):
+        if (not self.id) or (not self.instance):
+            return {}
+
+        addresses = {}
+        for ns in self.instance.ports.all():
+            if "s1u" in ns.network.name.lower():
+                addresses["s1u"] = (ns.ip, ns.mac)
+            elif "s1mme" in ns.network.name.lower():
+                addresses["s1mme"] = (ns.ip, ns.mac)
+            elif "rru" in ns.network.name.lower():
+                addresses["rru"] = (ns.ip, ns.mac)
+        return addresses
+
+
+    @property
+    def s1u_ip(self):
+        return self.addresses.get("s1u", (None, None))[0]
+    @property
+    def s1u_mac(self):
+        return self.addresses.get("s1u", (None, None))[1]
+    @property
+    def s1mme_ip(self):
+        return self.addresses.get("s1mme", (None, None))[0]
+    @property
+    def s1mme_mac(self):
+        return self.addresses.get("s1mme", (None, None))[1]
+    @property
+    def rru_ip(self):
+        return self.addresses.get("rru", (None, None))[0]
+    @property
+    def rru_mac(self):
+        return self.addresses.get("rru", (None, None))[1]
+
+def model_policy_mcord_servicecomponent(pk):
+    # This section of code is atomic to prevent race conditions
+    with transaction.atomic():
+        # We find all of the tenants that are waiting to update
+        component = VBBUComponent.objects.select_for_update().filter(pk=pk)
+        if not component:
+            return
+        # Since this code is atomic it is safe to always use the first tenant
+        component = component[0]
+        component.manage_container()
diff --git a/xos/services/mcord/models.py.old2 b/xos/services/mcord/models.py.old2
new file mode 100644
index 0000000..7e27a51
--- /dev/null
+++ b/xos/services/mcord/models.py.old2
@@ -0,0 +1,282 @@
+from django.db import models
+from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, AddressPool, Port
+from core.models.plcorebase import StrippedCharField
+import os
+from django.db import models, transaction
+from django.forms.models import model_to_dict
+from django.db.models import Q
+from operator import itemgetter, attrgetter, methodcaller
+from core.models import Tag
+from core.models.service import LeastLoadedNodeScheduler
+import traceback
+from xos.exceptions import *
+from core.models import SlicePrivilege, SitePrivilege
+from sets import Set
+from xos.config import Config
+
+MCORD_KIND = "MCORD"
+MCORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
+VBBU_KIND = "vBBU"
+VSGW_KIND = "vSGW"
+VPGW_KIND = "vPGW"
+net_types = ("s1u", "s1mme", "rru")
+# The class to represent the service. Most of the service logic is given for us
+# in the Service class but, we have some configuration that is specific for
+# this example.
+class MCORDService(Service):
+    KIND = MCORD_KIND
+
+    class Meta:
+        # When the proxy field is set to True the model is represented as
+        # it's superclass in the database, but we can still change the python
+        # behavior. In this case HelloWorldServiceComplete is a Service in the
+        # database.
+        proxy = True
+        # The name used to find this service, all directories are named this
+        app_label = "mcord"
+        verbose_name = "MCORD Service"
+
+# This is the class to represent the tenant. Most of the logic is given to use
+# in TenantWithContainer, however there is some configuration and logic that
+# we need to define for this example.
+class VBBUComponent(TenantWithContainer):
+
+    class Meta:
+        # Same as a above, HelloWorldTenantComplete is represented as a
+        # TenantWithContainer, but we change the python behavior.
+        proxy = True
+        verbose_name = "VBBU MCORD Service Component"
+
+    # The kind of the service is used on forms to differentiate this service
+    # from the other services.
+    KIND = VBBU_KIND
+
+    # Ansible requires that the sync_attributes field contain nat_ip and nat_mac
+    # these will be used to determine where to SSH to for ansible.
+    # Getters must be defined for every attribute specified here.
+    sync_attributes = ("s1u_ip", "s1u_mac",
+                       "s1mme_ip", "s1mme_mac",
+                       "rru_ip", "rru_mac")
+    # default_attributes is used cleanly indicate what the default values for
+    # the fields are.
+    default_attributes = {"display_message": "VBBU Component is ready!", "s1u_tag": "201", "s1mme_tag": "200", "rru_tag": "199"}
+    def __init__(self, *args, **kwargs):
+        mcord_services = MCORDService.get_service_objects().all()
+        # When the tenant is created the default service in the form is set
+        # to be the first created HelloWorldServiceComplete
+        if mcord_services:
+            self._meta.get_field(
+                "provider_service").default = mcord_services[0].id
+        super(VBBUComponent, self).__init__(*args, **kwargs)
+
+    def can_update(self, user):
+        #Allow creation of this model instances for non-admin users also
+        return True
+
+    def save(self, *args, **kwargs):
+        if not self.creator:
+            if not getattr(self, "caller", None):
+                # caller must be set when creating a monitoring channel since it creates a slice
+                raise XOSProgrammingError("ServiceComponents's self.caller was not set")
+            self.creator = self.caller
+            if not self.creator:
+                raise XOSProgrammingError("ServiceComponents's self.creator was not set")
+
+        super(VBBUComponent, self).save(*args, **kwargs)
+        # This call needs to happen so that an instance is created for this
+        # tenant is created in the slice. One instance is created per tenant.
+        model_policy_mcord_servicecomponent(self.pk)
+
+    def save_instance(self, instance):
+        with transaction.atomic():
+            super(VBBUComponent, self).save_instance(instance)
+            if instance.isolation in ["vm"]:
+                for ntype in net_types:
+                    lan_network = self.get_lan_network(instance, ntype)
+                    port = self.find_or_make_port(instance,lan_network)
+                    if (ntype == "s1u"):
+                        port.set_parameter("s_tag", self.s1u_tag)
+                        port.set_parameter("neutron_port_name", "stag-%s" % self.s1u_tag)
+                        port.save()
+#                    elif (ntype == "s1mme"):
+#                        port.set_parameter("s_tag", self.s1mme_tag)
+#                        port.set_parameter("neutron_port_name", "stag-%s" % self.s1mme_tag)
+#                        port.save()
+#                    elif (ntype == "rru"):
+#                        port.set_parameter("s_tag", self.rru_tag)
+#                        port.set_parameter("neutron_port_name", "stag-%s" % self.rru_tag)
+#                        port.save()
+    
+    def delete(self, *args, **kwargs):
+        # Delete the instance that was created for this tenant
+        self.cleanup_container()
+        super(VBBUComponent, self).delete(*args, **kwargs)
+
+    def find_or_make_port(self, instance, network, **kwargs):
+        port = Port.objects.filter(instance=instance, network=network)
+        if port:
+            port = port[0]
+            print "port already exist", port[0]
+        else:
+            port = Port(instance=instance, network=network, **kwargs)
+            print "NETWORK", network, "MAKE_PORT", port 
+            port.save()
+        return port
+
+    def get_lan_network(self, instance, ntype):
+        slice = self.provider_service.slices.all()[0]
+        lan_networks = [x for x in slice.networks.all() if ntype in x.name]
+        if not lan_networks:
+            raise XOSProgrammingError("No lan_network")
+        return lan_networks[0]
+
+    def manage_container(self):
+        from core.models import Instance, Flavor
+
+        if self.deleted:
+            return
+
+        # For container or container_vm isolation, use what TenantWithCotnainer
+        # provides us
+        slice = self.get_slice()
+        if slice.default_isolation in ["container_vm", "container"]:
+            super(VBBUComponent,self).manage_container()
+            return
+
+        if not self.s1u_tag:
+            raise XOSConfigurationError("S1U_TAG is missed")
+
+        if not self.s1mme_tag:
+            raise XOSConfigurationError("S1U_TAG is missed")
+
+        if not self.rru_tag:
+            raise XOSConfigurationError("S1U_TAG is missed")
+
+        if self.instance:
+            # We're good.
+            return
+
+        instance = self.make_instance(self)
+        self.instance = instance
+        super(TenantWithContainer, self).save()
+
+    def get_slice(self):
+        if not self.provider_service.slices.count():
+            raise XOSConfigurationError("The service has no slices")
+        slice = self.provider_service.slices.all()[0]
+        return slice
+
+    def make_instance(self):
+        slice = self.provider_service.slices.all()[0]            
+        flavors = Flavor.objects.filter(name=slice.default_flavor)
+        if not flavors:
+            raise XOSConfigurationError("No default flavor")
+        default_flavor = slice.default_flavor
+        slice = self.provider_service.slices.all()[0]
+        if slice.default_isolation == "container_vm":
+            (node, parent) = ContainerVmScheduler(slice).pick()
+        else:
+            (node, parent) = LeastLoadedNodeScheduler(slice).pick()
+        instance = Instance(slice = slice,
+                        node = node,
+                        image = self.image,
+                        creator = self.creator,
+                        deployment = node.site_deployment.deployment,
+                        flavor = flavors[0],
+                        isolation = slice.default_isolation,
+                        parent = parent)
+        self.save_instance(instance)
+        return instance
+
+    def ip_to_mac(self, ip):
+        (a, b, c, d) = ip.split('.')
+        return "02:42:%02x:%02x:%02x:%02x" % (int(a), int(b), int(c), int(d))
+
+    # Getter for the message that will appear on the webpage
+    # By default it is "Hello World!"
+    @property
+    def display_message(self):
+        return self.get_attribute(
+            "display_message",
+            self.default_attributes['display_message'])
+
+    @display_message.setter
+    def display_message(self, value):
+        self.set_attribute("display_message", value)
+
+    @property
+    def s1u_tag(self):
+        return self.get_attribute(
+            "s1u_tag",
+            self.default_attributes['s1u_tag'])
+
+    @s1u_tag.setter
+    def s1u_tag(self, value):
+        self.set_attribute("s1u_tag", value)
+
+    @property
+    def s1mme_tag(self):
+        return self.get_attribute(
+            "s1mme_tag",
+            self.default_attributes['s1mme_tag'])
+
+    @s1mme_tag.setter
+    def s1mme_tag(self, value):
+        self.set_attribute("s1mme_tag", value)
+
+    @property
+    def rru_tag(self):
+        return self.get_attribute(
+            "rru_tag",
+            self.default_attributes['rru_tag'])
+
+    @rru_tag.setter
+    def rru_tag(self, value):
+        self.set_attribute("rru_tag", value)
+
+
+    @property
+    def addresses(self):
+        if (not self.id) or (not self.instance):
+            return {}
+
+        addresses = {}
+        for ns in self.instance.ports.all():
+            if "s1u" in ns.network.name.lower():
+                addresses["s1u"] = (ns.ip, ns.mac)
+            elif "s1mme" in ns.network.name.lower():
+                addresses["s1mme"] = (ns.ip, ns.mac)
+            elif "rru" in ns.network.name.lower():
+                addresses["rru"] = (ns.ip, ns.mac)
+        return addresses
+
+
+    @property
+    def s1u_ip(self):
+        return self.addresses.get("s1u", (None, None))[0]
+    @property
+    def s1u_mac(self):
+        return self.addresses.get("s1u", (None, None))[1]
+    @property
+    def s1mme_ip(self):
+        return self.addresses.get("s1mme", (None, None))[0]
+    @property
+    def s1mme_mac(self):
+        return self.addresses.get("s1mme", (None, None))[1]
+    @property
+    def rru_ip(self):
+        return self.addresses.get("rru", (None, None))[0]
+    @property
+    def rru_mac(self):
+        return self.addresses.get("rru", (None, None))[1]
+
+def model_policy_mcord_servicecomponent(pk):
+    # This section of code is atomic to prevent race conditions
+    with transaction.atomic():
+        # We find all of the tenants that are waiting to update
+        component = VBBUComponent.objects.select_for_update().filter(pk=pk)
+        if not component:
+            return
+        # Since this code is atomic it is safe to always use the first tenant
+        component = component[0]
+        component.manage_container()
diff --git a/xos/services/mcord/templates/mcordadmin.html b/xos/services/mcord/templates/mcordadmin.html
new file mode 100644
index 0000000..78f99f6
--- /dev/null
+++ b/xos/services/mcord/templates/mcordadmin.html
@@ -0,0 +1,10 @@
+<!-- Template used to for the button leading to the HelloWorldTenantComplete form. -->
+<div class = "left-nav">
+  <ul>
+    <li>
+      <a href="/admin/mcordservice/mcordcomponent/">
+        MCORD Service Components
+      </a>
+    </li>
+  </ul>
+</div>
diff --git a/xos/services/mcord/view.py b/xos/services/mcord/view.py
new file mode 100644
index 0000000..2da4a24
--- /dev/null
+++ b/xos/services/mcord/view.py
@@ -0,0 +1,107 @@
+from django.http import HttpResponse
+from django.views.generic import TemplateView, View
+from django import template
+from core.models import *
+from services.helloworld.models import *
+import json
+import os
+import time
+import tempfile
+
+
+class MCordView(TemplateView):
+    head_template = r"""{% extends "admin/dashboard/dashboard_base.html" %}
+       {% load admin_static %}
+       {% block content %}
+    """
+
+    tail_template = r"{% endblock %}"
+
+    def get(self, request, name="root", *args, **kwargs):
+        head_template = self.head_template
+        tail_template = self.tail_template
+
+        title = request.GET.get('service', '')
+        url = "/mcord/?service=%s" % (title)
+
+        form = """
+        <h2 class="content-title">Change %s Service</h2>
+        <div id="content-main">
+            <form class="form-horizontal">
+                <div class="tab-content tab-content-main">
+                    <div class="suit-include suit-tab suit-tab-administration hide">
+                        <div class="left-nav">
+                            <ul>
+                                <li><a href="/admin/ceilometer/monitoringchannel/">Monitoring Channels</a></li>
+                            </ul>
+                        </div>
+                    </div>
+                    <fieldset class="module aligned suit-tab suit-tab-general show">
+                        <div class="panel fieldset-body">
+                            <div class="form-group field-backend_status_text ">
+                                <label class="control-label col-xs-12 col-sm-2"><label>Backend status text:</label></label>
+                                <div class="form-column col-xs-12 col-sm-8 col-md-6 col-lg-4">
+                                    <p><img src="/static/admin/img/icon_clock.gif"> Pending sync, last_status = 0 - Provisioning in progress</p>
+                                </div>
+                            </div>
+                            <div class="form-group field-name ">
+                                <label class="control-label col-xs-12 col-sm-2"><label class="required" for="id_name">Name:</label></label>
+                                <div class="form-column widget-AdminTextInputWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
+                                    <input class="vTextField form-control" id="id_name" maxlength="30" name="name" type="text" value="%s">
+                                    <div class="help-block">Service Name</div>
+                                </div>
+                            </div>
+                            <div class="form-group field-enabled ">
+                                <label class="control-label col-xs-12 col-sm-2"><label class="vCheckboxLabel" for="id_enabled">Enabled</label></label>
+                                <div class="form-column widget-CheckboxInput col-xs-12 col-sm-8 col-md-6 col-lg-4">
+                                    <input checked="checked" id="id_enabled" name="enabled" type="checkbox">
+                                </div>
+                            </div>
+                            <div class="form-group field-versionNumber ">
+                                <label class="control-label col-xs-12 col-sm-2"><label class="required" for="id_versionNumber">VersionNumber:</label></label>
+                                <div class="form-column widget-AdminTextInputWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
+                                    <input class="vTextField form-control" id="id_versionNumber" maxlength="30" name="versionNumber" type="text">
+                                    <div class="help-block">Version of Service Definition</div>
+                                </div>
+                            </div>
+                            <div class="form-group field-description ">
+                                <label class="control-label col-xs-12 col-sm-2"><label for="id_description">Description:</label></label>
+                                <div class="form-column widget-AdminTextareaWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
+                                    <textarea class="vLargeTextField form-control" cols="40" id="id_description" maxlength="254" name="description" rows="10"></textarea>
+                                    <div class="help-block">Description of Service</div>
+                                </div>
+                            </div>
+                            <div class="form-group field-view_url ">
+                                <label class="control-label col-xs-12 col-sm-2"><label for="id_view_url">View url:</label></label>
+                                <div class="form-column widget-AdminTextInputWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
+                                    <input class="vTextField form-control" id="id_view_url" maxlength="1024" name="view_url" type="text" value="%s">
+                                </div>
+                            </div>
+                            <div class="form-group field-icon_url ">
+                                <label class="control-label col-xs-12 col-sm-2"><label for="id_icon_url">Icon url:</label></label>
+                                <div class="form-column widget-AdminTextInputWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
+                                    <input class="vTextField form-control" id="id_icon_url" maxlength="1024" name="icon_url" type="text">
+                                </div>
+                            </div>
+                        </div>
+                    </fieldset>
+                </div>
+            </form>
+            <div class="form-buttons clearfix">
+                <button type="submit" class="btn btn-high btn-success" name="_save">Save</button>
+                <button type="submit" name="_continue" class=" btn btn-high btn-info">Save and continue editing</button>
+                <button type="submit" name="_addanother" class="btn btn-info">Save and add another</button>
+                <a href="delete/" class="text-error deletelink">Delete</a>
+            </div>
+        </div>
+        """ % (title, title, url)
+
+        t = template.Template(head_template + form + tail_template)
+
+        response_kwargs = {}
+        response_kwargs.setdefault('content_type', self.content_type)
+        return self.response_class(
+            request=request,
+            template=t,
+            **response_kwargs
+        )
diff --git a/xos/synchronizers/openstack/steps/sync_controller_networks.py b/xos/synchronizers/openstack/steps/sync_controller_networks.py
index 5375a8d..9286fa7 100644
--- a/xos/synchronizers/openstack/steps/sync_controller_networks.py
+++ b/xos/synchronizers/openstack/steps/sync_controller_networks.py
@@ -48,6 +48,19 @@
         ip = struct.unpack("!L", socket.inet_aton(network))[0]
         ip = ip & netmask | 1
         return socket.inet_ntoa(struct.pack("!L", ip))
+#MCORD
+    def alloc_start_ip(self, subnet):
+        parts = subnet.split(".")
+        if len(parts)!=4:
+            raise Exception("Invalid subnet %s" % subnet)
+        return ".".join(parts[:3]) + ".3"
+
+    def alloc_end_ip(self, subnet):
+        parts = subnet.split(".")
+        if len(parts)!=4:
+            raise Exception("Invalid subnet %s" % subnet)
+        return ".".join(parts[:3]) + ".254"
+#MCORD
 
     def save_controller_network(self, controller_network):
         network_name = controller_network.network.name
@@ -58,6 +71,24 @@
             cidr = controller_network.subnet.strip()
         else:
             cidr = self.alloc_subnet(controller_network.pk)
+#MCORD
+
+        if controller_network.network.start_ip and controller_network.network.start_ip.strip():
+            start_ip = controller_network.network.start_ip.strip()
+            print "DEF_START_IP", start_ip
+        else:
+            start_ip = self.alloc_start_ip(cidr) 
+            print "DEF_START_AIP", start_ip
+
+        if controller_network.network.end_ip and controller_network.network.end_ip.strip():
+            end_ip = controller_network.network.end_ip.strip()
+            print "DEF_START_IP", end_ip
+        else:
+            end_ip = self.alloc_end_ip(cidr) 
+            print "DEF_END_AIP", end_ip
+#MCORD
+
+
         self.cidr=cidr
         slice = controller_network.network.owner
 
@@ -72,6 +103,8 @@
                     'ansible_tag':'%s-%s@%s'%(network_name,slice.slicename,controller_network.controller.name),
                     'cidr':cidr,
                     'gateway':self.alloc_gateway(cidr),
+                    'start_ip':start_ip,
+                    'end_ip':end_ip,
                     'use_vtn':getattr(Config(), "networking_use_vtn", False),
                     'delete':False
                     }
diff --git a/xos/synchronizers/openstack/steps/sync_controller_networks.yaml b/xos/synchronizers/openstack/steps/sync_controller_networks.yaml
index b885516..070a050 100644
--- a/xos/synchronizers/openstack/steps/sync_controller_networks.yaml
+++ b/xos/synchronizers/openstack/steps/sync_controller_networks.yaml
@@ -35,5 +35,7 @@
         {% endif %}
         dns_nameservers=8.8.8.8
         cidr={{ cidr }}
+        allocation_pool_start={{ start_ip }}
+        allocation_pool_end={{ end_ip }}
         {% endif %}
   {% endif %}
diff --git a/xos/synchronizers/openstack/steps/sync_instances.py b/xos/synchronizers/openstack/steps/sync_instances.py
index 3a1bc52..faeb860 100644
--- a/xos/synchronizers/openstack/steps/sync_instances.py
+++ b/xos/synchronizers/openstack/steps/sync_instances.py
@@ -56,8 +56,9 @@
             if network:
                 tem = network.template
                 if (tem.visibility == "private") and (tem.translation=="none") and ("management" in tem.name):
-                    if len(result)!=1:
-                        raise Exception("Management network needs to be inserted in slot 1, but there are %d private nics" % len(result))
+#MCORD
+#                    if len(result)!=1:
+#                        raise Exception("Management network needs to be inserted in slot 1, but there are %d private nics" % len(result))
                     result.append(nic)
                     nics.remove(nic)
 
diff --git a/xos/synchronizers/vbbu/__init__.py b/xos/synchronizers/vbbu/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/xos/synchronizers/vbbu/__init__.py
diff --git a/xos/synchronizers/vbbu/model-deps b/xos/synchronizers/vbbu/model-deps
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/xos/synchronizers/vbbu/model-deps
@@ -0,0 +1 @@
+{}
diff --git a/xos/synchronizers/vbbu/run.sh b/xos/synchronizers/vbbu/run.sh
new file mode 100644
index 0000000..27d4e24
--- /dev/null
+++ b/xos/synchronizers/vbbu/run.sh
@@ -0,0 +1,3 @@
+# Runs the XOS observer using helloworldservice_config
+export XOS_DIR=/opt/xos
+python vbbu-synchronizer.py  -C $XOS_DIR/synchronizers/vbbu/vbbu_config
diff --git a/xos/synchronizers/vbbu/steps/sync_vbbu.py b/xos/synchronizers/vbbu/steps/sync_vbbu.py
new file mode 100644
index 0000000..59feb33
--- /dev/null
+++ b/xos/synchronizers/vbbu/steps/sync_vbbu.py
@@ -0,0 +1,37 @@
+import os
+import sys
+from django.db.models import Q, F
+from services.mcord.models import MCORDService, VBBUComponent
+from synchronizers.base.SyncInstanceUsingAnsible import SyncInstanceUsingAnsible
+
+parentdir = os.path.join(os.path.dirname(__file__), "..")
+sys.path.insert(0, parentdir)
+
+class SyncVBBUComponent(SyncInstanceUsingAnsible):
+
+    provides = [VBBUComponent]
+
+    observes = VBBUComponent
+
+    requested_interval = 0
+
+    template_name = "sync_vbbu.yaml"
+
+    service_key_name = "/opt/xos/configurations/mcord/mcord_private_key"
+
+    def __init__(self, *args, **kwargs):
+        super(SyncVBBUComponent, self).__init__(*args, **kwargs)
+
+    def fetch_pending(self, deleted):
+
+        if (not deleted):
+            objs = VBBUComponent.get_tenant_objects().filter(
+                Q(enacted__lt=F('updated')) | Q(enacted=None), Q(lazy_blocked=False))
+        else:
+
+            objs = VBBUComponent.get_deleted_tenant_objects()
+
+        return objs
+
+    def get_extra_attributes(self, o):
+        return {"display_message": o.display_message}
diff --git a/xos/synchronizers/vbbu/steps/sync_vbbu.yaml b/xos/synchronizers/vbbu/steps/sync_vbbu.yaml
new file mode 100644
index 0000000..2d9bf39
--- /dev/null
+++ b/xos/synchronizers/vbbu/steps/sync_vbbu.yaml
@@ -0,0 +1,11 @@
+---
+- hosts: {{ instance_name }}
+  gather_facts: False
+  connection: ssh
+  user: ubuntu
+  sudo: yes
+  tasks:
+
+  - name: write message
+    shell: echo "{{ display_message }}" > /var/tmp/index.html
+
diff --git a/xos/synchronizers/vbbu/stop.sh b/xos/synchronizers/vbbu/stop.sh
new file mode 100644
index 0000000..7ad7f4c
--- /dev/null
+++ b/xos/synchronizers/vbbu/stop.sh
@@ -0,0 +1,2 @@
+# Kill the observer
+pkill -9 -f vbbu-synchronizer.py
diff --git a/xos/synchronizers/vbbu/vbbu-synchronizer.py b/xos/synchronizers/vbbu/vbbu-synchronizer.py
new file mode 100644
index 0000000..95f4081
--- /dev/null
+++ b/xos/synchronizers/vbbu/vbbu-synchronizer.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+
+# This imports and runs ../../xos-observer.py
+# Runs the standard XOS observer
+
+import importlib
+import os
+import sys
+observer_path = os.path.join(os.path.dirname(
+    os.path.realpath(__file__)), "../../synchronizers/base")
+sys.path.append(observer_path)
+mod = importlib.import_module("xos-synchronizer")
+mod.main()
diff --git a/xos/synchronizers/vbbu/vbbu_config b/xos/synchronizers/vbbu/vbbu_config
new file mode 100644
index 0000000..3713b67
--- /dev/null
+++ b/xos/synchronizers/vbbu/vbbu_config
@@ -0,0 +1,40 @@
+# Required by XOS
+[db]
+name=xos
+user=postgres
+password=password
+host=localhost
+port=5432
+
+# Required by XOS
+[api]
+nova_enabled=True
+
+# Sets options for the observer
+[observer]
+# Optional name
+name=vbbu
+# This is the location to the dependency graph you generate
+dependency_graph=/opt/xos/synchronizers/vbbu/model-deps
+# The location of your SyncSteps
+steps_dir=/opt/xos/synchronizers/vbbu/steps
+# A temporary directory that will be used by ansible
+sys_dir=/opt/xos/synchronizers/vbbu/sys
+# Location of the file to save logging messages to the backend log is often used
+logfile=/var/log/xos_backend.log
+# If this option is true, then nothing will change, we simply pretend to run
+pretend=False
+# If this is False then XOS will use an exponential backoff when the observer
+# fails, since we will be waiting for an instance, we don't want this.
+backoff_disabled=True
+# We want the output from ansible to be logged
+save_ansible_output=True
+# This determines how we SSH to a client, if this is set to True then we try
+# to ssh using the instance name as a proxy, if this is disabled we ssh using
+# the NAT IP of the instance. On CloudLab the first option will fail so we must
+# set this to False
+proxy_ssh=True
+proxy_ssh_key=/root/setup/id_rsa
+proxy_ssh_user=root
+[networking]
+use_vtn=True
diff --git a/xos/tools/xos-manage b/xos/tools/xos-manage
index 23cda72..9219d36 100755
--- a/xos/tools/xos-manage
+++ b/xos/tools/xos-manage
@@ -143,6 +143,7 @@
     python ./manage.py makemigrations requestrouter
     python ./manage.py makemigrations syndicate_storage
     python ./manage.py makemigrations cord
+    python ./manage.py makemigrations mcord
     python ./manage.py makemigrations ceilometer
     python ./manage.py makemigrations helloworldservice_complete
     python ./manage.py makemigrations onos
diff --git a/xos/tosca/custom_types/xos.m4 b/xos/tosca/custom_types/xos.m4
index fd4f8ec..b8f14d7 100644
--- a/xos/tosca/custom_types/xos.m4
+++ b/xos/tosca/custom_types/xos.m4
@@ -733,6 +733,10 @@
                 type: string
                 required: false
                 description: default flavor to use for slice
+            default_node:
+                type: string
+                required: false
+                description: default node to use for this slice
             network:
                 type: string
                 required: false
diff --git a/xos/tosca/custom_types/xos.yaml b/xos/tosca/custom_types/xos.yaml
index 9bdcfc3..f8a36db 100644
--- a/xos/tosca/custom_types/xos.yaml
+++ b/xos/tosca/custom_types/xos.yaml
@@ -1100,6 +1100,10 @@
                 type: string
                 required: false
                 description: default flavor to use for slice
+            default_node:
+                type: string
+                required: false
+                description: default node to use for this slice
             network:
                 type: string
                 required: false
diff --git a/xos/tosca/resources/mcordservice.py b/xos/tosca/resources/mcordservice.py
new file mode 100644
index 0000000..d77e54d
--- /dev/null
+++ b/xos/tosca/resources/mcordservice.py
@@ -0,0 +1,19 @@
+import os
+import pdb
+import sys
+import tempfile
+sys.path.append("/opt/tosca")
+from translator.toscalib.tosca_template import ToscaTemplate
+import pdb
+
+from services.mcord.models import MCORDService
+
+from service import XOSService
+
+class XOSMCORDService(XOSService):
+    provides = "tosca.nodes.MCORDService"
+    xos_model = MCORDService
+    copyin_props = ["view_url", "icon_url", "enabled", "published", "public_key",
+                    "private_key_fn", "versionNumber",
+                    ]
+
diff --git a/xos/tosca/resources/network.py b/xos/tosca/resources/network.py
index 32e3fc1..ab60ce8 100644
--- a/xos/tosca/resources/network.py
+++ b/xos/tosca/resources/network.py
@@ -43,6 +43,19 @@
         if cidr:
             args["subnet"] = cidr
 
+        start_ip = self.get_property_default("start_ip", None)
+        if start_ip:
+            args["start_ip"] = start_ip 
+        print "DEF_RES_IP", start_ip 
+
+        end_ip = self.get_property_default("end_ip", None)
+        if end_ip:
+            args["end_ip"] = end_ip 
+
+#        default_ = self.get_property_default("gateway_ip", None)
+#        if gateway_ip:
+#            args["gateway_ip"] = gateway_ip
+
         return args
 
     def postprocess(self, obj):
diff --git a/xos/tosca/resources/slice.py b/xos/tosca/resources/slice.py
index 48e5eb0..724957f 100644
--- a/xos/tosca/resources/slice.py
+++ b/xos/tosca/resources/slice.py
@@ -5,7 +5,7 @@
 sys.path.append("/opt/tosca")
 from translator.toscalib.tosca_template import ToscaTemplate
 
-from core.models import Slice,User,Site,Network,NetworkSlice,SliceRole,SlicePrivilege,Service,Image,Flavor
+from core.models import Slice,User,Site,Network,NetworkSlice,SliceRole,SlicePrivilege,Service,Image,Flavor,Node
 
 from xosresource import XOSResource
 
@@ -36,6 +36,11 @@
             default_flavor = self.get_xos_object(Flavor, name=default_flavor_name, throw_exception=True)
             args["default_flavor"] = default_flavor
 
+        default_node_name = self.get_property_default("default_node", None)
+        if default_node_name:
+            default_node = self.get_xos_object(Node, name=default_node_name, throw_exception=True)
+            args["default_node"] = default_node
+
         return args
 
     def postprocess(self, obj):
diff --git a/xos/tosca/resources/vbbucomponent.py b/xos/tosca/resources/vbbucomponent.py
new file mode 100644
index 0000000..ee60d0c
--- /dev/null
+++ b/xos/tosca/resources/vbbucomponent.py
@@ -0,0 +1,40 @@
+import os
+import pdb
+import sys
+import tempfile
+sys.path.append("/opt/tosca")
+from translator.toscalib.tosca_template import ToscaTemplate
+import pdb
+
+from services.mcord.models import VBBUComponent, MCORDService
+
+from xosresource import XOSResource
+
+class XOSVBBUComponent(XOSResource):
+    provides = "tosca.nodes.VBBUComponent"
+    xos_model = VBBUComponent
+    copyin_props = ["s1u_tag", "s1mme_tag", "rru_tag", "display_message"]
+    name_field = None
+
+    def get_xos_args(self, throw_exception=True):
+        args = super(XOSVBBUComponent, self).get_xos_args()
+
+        provider_name = self.get_requirement("tosca.relationships.MemberOfService", throw_exception=throw_exception)
+        if provider_name:
+            args["provider_service"] = self.get_xos_object(MCORDService, throw_exception=throw_exception, name=provider_name)
+
+        return args
+
+    def get_existing_objs(self):
+        args = self.get_xos_args(throw_exception=False)
+        provider_service = args.get("provider", None)
+        if provider_service:
+            return [ self.get_xos_object(provider_service=provider_service) ]
+        return []
+
+    def postprocess(self, obj):
+        pass
+
+    def can_delete(self, obj):
+        return super(XOSVBBUComponent, self).can_delete(obj)
+
diff --git a/xos/xos/settings.py b/xos/xos/settings.py
index f457476..f58939c 100644
--- a/xos/xos/settings.py
+++ b/xos/xos/settings.py
@@ -175,6 +175,7 @@
     'core',
     'services.hpc',
     'services.cord',
+    'services.mcord',
     'services.helloworldservice_complete',
     'services.onos',
     'services.ceilometer',
diff --git a/xos/xos_configuration/xos_common_config b/xos/xos_configuration/xos_common_config
index 2855816..3a35798 100755
--- a/xos/xos_configuration/xos_common_config
+++ b/xos/xos_configuration/xos_common_config
@@ -37,9 +37,16 @@
 images_directory=/opt/xos/images
 dependency_graph=/opt/xos/model-deps
 logfile=/var/log/xos_backend.log
+proxy_ssh=True
+proxy_ssh_key=/root/setup/node_key
+proxy_ssh_user=root
 
 [gui]
 disable_minidashboard=True
 branding_name=Open Cloud
 branding_icon=/static/logo.png
 branding_favicon=/static/favicon.png
+
+[networking]
+use_vtn=True
+