CORD-871: Migrate monitoring service to new CORD build infrastructure - phase1

Usage of Monitoring Service build commands in new build system:
----------------------------------------------
Add the below alias to your bashrc file and source it.
CiaB platform-install brings up monitoring-service synchronizer by default.
Once the synchronizer is completely operational, run 'cord-monitoringservice' on your terminal.

alias deploy-monitoringservice="pushd /opt/cord/build/platform-install; ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml onboard-monitoringservice-playbook.yml"
alias instantiate-monitoringservice="pushd /opt/cord/build/platform-install; ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml instantiate-monitoringservice-playbook.yml"
alias generate-install-infra-agents-config=“ansible-playbook -i /etc/maas/ansible/pod-inventory --extra-vars=@/opt/cord/build/genconfig/config.yml monitoringagent-infra-install-config.yml"
alias install-infra-agents=“ansible-playbook -i /opt/cord_profile/monitoringagent_install.hosts monitoringagent-infra-install.yml"
alias generate-enable-infra-monitoring-config=“ansible-playbook -i /etc/maas/ansible/pod-inventory --extra-vars=@/opt/cord/build/genconfig/config.yml monitoringservice-enable-inframonitoring-config.yml"
alias enable-infra-monitoring=“ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml monitoringservice-enable-inframonitoring.yml"
alias test-monitoringservice-initial=“ansible-playbook -i inventory/rcord  --extra-vars @../genconfig/config.yml monitoringservice-test-initial.yaml"
alias test-monitoringservice-inframetrics=“ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml monitoringservice-test-inframetrics.yaml"
alias test-monitoringservice-vsgmetrics=“ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml monitoringservice-test-vsgmetrics.yaml"
alias test-monitoringservice-exampleservicemetrics=“ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml monitoringservice-test-exampleservicemetrics.yaml"

alias cord-monitoringservice="echo \"*******NOTE:Ensure monitoring service synchronizer is completely up and running********\"; instantiate-monitoringservice; test-monitoringservice-initial"
alias install-inframonitoring-agent="generate-install-infra-agents-config; install-infra-agents"
alias enable-inframonitoring="generate-enable-infra-monitoring-config; enable-infra-monitoring; test-monitoringservice-inframetrics"

Change-Id: I9895dd1107de50e9817325f750f0608d9eb72be5
diff --git a/instantiate-monitoringservice-playbook.yml b/instantiate-monitoringservice-playbook.yml
new file mode 100644
index 0000000..e9d3204
--- /dev/null
+++ b/instantiate-monitoringservice-playbook.yml
@@ -0,0 +1,36 @@
+---
+# instantiate-monitoringservice.yml
+# Adds the monitoringservice to the currently running pod
+# TODO: Due to some timing issues, the monitroing service synchronizer is not completely getting
+# onboarded with in the retries of onboard-monitoringservice.yml, so the ansible script would
+# eventually fail. Until it is fixed, once monitoring service synchronizer is ready, run this
+# ansible playbook which has only tasks for instantiating the monitoring service models.
+
+- name: Include vars
+  hosts: all
+  tasks:
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - "profile_manifests/{{ cord_profile }}.yml"
+        - profile_manifests/local_vars.yml
+
+- name: Create monitoringservice config
+  hosts: head
+  roles:
+    - monitoringservice-config
+
+- include: add-onboard-containers-playbook.yml
+
+- name: Check to see if XOS UI is ready
+  hosts: xos_ui
+  connection: docker
+  roles:
+    - xos-ready
+
+- name: Instantiate monitoringservice and channel
+  hosts: xos_ui
+  connection: docker
+  roles:
+    - monitoringservice-instantiate
+
diff --git a/monitoringagent-infra-install-config.yml b/monitoringagent-infra-install-config.yml
new file mode 100644
index 0000000..f23fcc1
--- /dev/null
+++ b/monitoringagent-infra-install-config.yml
@@ -0,0 +1,19 @@
+---
+# monitoringagent-infra-install-config.yml
+# Generates the inventory file to be used to run the monitoringagent-infra-install.yml playbook
+# should be run with /etc/maas/ansible/pod-inventory as inventory file
+# ansible-playbook -i /etc/maas/ansible/pod-inventory --extra-vars=@/opt/cord/build/genconfig/config.yml monitoringagent-infra-install-config.yml
+
+- name: Include vars
+  hosts: all
+  tasks:
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - "profile_manifests/{{ cord_profile }}.yml"
+        - profile_manifests/local_vars.yml
+
+- name: Create monitoringagent config
+  hosts: head
+  roles:
+    - monitoringagent-infra-install-config
diff --git a/monitoringagent-infra-install.yml b/monitoringagent-infra-install.yml
new file mode 100644
index 0000000..f099762
--- /dev/null
+++ b/monitoringagent-infra-install.yml
@@ -0,0 +1,13 @@
+---
+# monitoringagent-infra-install.yml
+# Install the monitoringagents on head node ceilometer-1 and all compute nodes
+# should be run with config generated inventory file /opt/cord_profile/monitoringagent_install.hosts
+# ansible-playbook -i /opt/cord_profile/monitoringagent_install.hosts monitoringagent-infra-install.yml
+
+- include: "/opt/cord/orchestration/xos_services/monitoring/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml"
+  vars:
+     instance_name: head
+
+- include: "/opt/cord/orchestration/xos_services/monitoring/xos/synchronizer/ceilometer/monitoring_agent/ceilometer_config.yaml"
+  vars:
+     instance_name: compute
diff --git a/monitoringservice-enable-inframonitoring-config.yml b/monitoringservice-enable-inframonitoring-config.yml
new file mode 100644
index 0000000..d7b0f4b
--- /dev/null
+++ b/monitoringservice-enable-inframonitoring-config.yml
@@ -0,0 +1,19 @@
+---
+# monitoringservice-enable-inframonitoring-config.yml
+# Generates the TOSCA files to be used to run the monitoringservice-enable-inframonitoring.yml playbook
+# should be run with /etc/maas/ansible/pod-inventory as inventory file
+# ansible-playbook -i /etc/maas/ansible/pod-inventory --extra-vars=@/opt/cord/build/genconfig/config.yml monitoringservice-enable-inframonitoring-config.yml
+
+- name: Include vars
+  hosts: all
+  tasks:
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - "profile_manifests/{{ cord_profile }}.yml"
+        - profile_manifests/local_vars.yml
+
+- name: Create config files for enabling infrastructure monitoring
+  hosts: head
+  roles:
+    - monitoringservice-enable-inframonitoring-config
diff --git a/monitoringservice-enable-inframonitoring.yml b/monitoringservice-enable-inframonitoring.yml
new file mode 100644
index 0000000..a0ea0be
--- /dev/null
+++ b/monitoringservice-enable-inframonitoring.yml
@@ -0,0 +1,22 @@
+---
+# monitoringservice-enable-inframonitoring.yml
+# Enables monitoring for infrastructure services (OpenStack and ONOS)
+# ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml monitoringservice-enable-inframonitoring.yml
+
+- name: Include vars
+  hosts: all
+  tasks:
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - "profile_manifests/{{ cord_profile }}.yml"
+        - profile_manifests/local_vars.yml
+
+- include: add-onboard-containers-playbook.yml
+
+- name: Run role to execute TOSCA file
+  hosts: xos_ui
+  connection: docker
+  roles:
+    - monitoringservice-enable-inframonitoring
+
diff --git a/monitoringservice-test-exampleservicemetrics.yaml b/monitoringservice-test-exampleservicemetrics.yaml
new file mode 100644
index 0000000..52fc0ef
--- /dev/null
+++ b/monitoringservice-test-exampleservicemetrics.yaml
@@ -0,0 +1,18 @@
+---
+# monitoringservice_test_exampleservicemetrics.yml
+# Tests the monitoringservice after enabling monitoring for exampleservice
+
+- name: Include vars
+  hosts: all
+  tasks:
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - "profile_manifests/{{ cord_profile }}.yml"
+        - profile_manifests/local_vars.yml
+
+- name: Invoke common+test-exampleservicemonitoring roles
+  hosts: head
+  roles:
+    - {role: "/opt/cord/orchestration/xos_services/monitoring/xos/test/roles/common", auth: {user: "{{ xos_admin_user }}", pass: "{{ xos_admin_pass }}"} }
+    - {role: "/opt/cord/orchestration/xos_services/monitoring/xos/test/roles/test-exampleservicemonitoring", auth: {user: "{{ xos_admin_user }}", pass: "{{ xos_admin_pass }}"} }
diff --git a/monitoringservice-test-inframetrics.yaml b/monitoringservice-test-inframetrics.yaml
new file mode 100644
index 0000000..4c770ca
--- /dev/null
+++ b/monitoringservice-test-inframetrics.yaml
@@ -0,0 +1,18 @@
+---
+# monitoringservice_test_inframetrics.yml
+# Tests the monitoringservice after infrastructure monitoring is enabled
+
+- name: Include vars
+  hosts: all
+  tasks:
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - "profile_manifests/{{ cord_profile }}.yml"
+        - profile_manifests/local_vars.yml
+
+- name: Invoke common+test-inframonitoring roles
+  hosts: head
+  roles:
+    - {role: "/opt/cord/orchestration/xos_services/monitoring/xos/test/roles/common", auth: {user: "{{ xos_admin_user }}", pass: "{{ xos_admin_pass }}"} }
+    - {role: "/opt/cord/orchestration/xos_services/monitoring/xos/test/roles/test-inframonitoring", auth: {user: "{{ xos_admin_user }}", pass: "{{ xos_admin_pass }}"} }
diff --git a/monitoringservice-test-initial.yaml b/monitoringservice-test-initial.yaml
new file mode 100644
index 0000000..e6f19b0
--- /dev/null
+++ b/monitoringservice-test-initial.yaml
@@ -0,0 +1,18 @@
+---
+# monitoringservice_test_initial.yml
+# Tests the monitoringservice after it is onboarded
+
+- name: Include vars
+  hosts: all
+  tasks:
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - "profile_manifests/{{ cord_profile }}.yml"
+        - profile_manifests/local_vars.yml
+
+- name: Invoke common+test-initial roles
+  hosts: head
+  roles:
+    - {role: "/opt/cord/orchestration/xos_services/monitoring/xos/test/roles/common", auth: {user: "{{ xos_admin_user }}", pass: "{{ xos_admin_pass }}"} }
+    - {role: "/opt/cord/orchestration/xos_services/monitoring/xos/test/roles/test-initial", auth: {user: "{{ xos_admin_user }}", pass: "{{ xos_admin_pass }}"} }
diff --git a/monitoringservice-test-vsgmetrics.yaml b/monitoringservice-test-vsgmetrics.yaml
new file mode 100644
index 0000000..91cedeb
--- /dev/null
+++ b/monitoringservice-test-vsgmetrics.yaml
@@ -0,0 +1,18 @@
+---
+# monitoringservice_test_vsgmetrics.yml
+# Tests the monitoringservice after enabling monitoring for vSG service
+
+- name: Include vars
+  hosts: all
+  tasks:
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - "profile_manifests/{{ cord_profile }}.yml"
+        - profile_manifests/local_vars.yml
+
+- name: Invoke common+test-vsgmonitoring roles
+  hosts: head
+  roles:
+    - {role: "/opt/cord/orchestration/xos_services/monitoring/xos/test/roles/common", auth: {user: "{{ xos_admin_user }}", pass: "{{ xos_admin_pass }}"} }
+    - {role: "/opt/cord/orchestration/xos_services/monitoring/xos/test/roles/test-vsgmonitoring", auth: {user: "{{ xos_admin_user }}", pass: "{{ xos_admin_pass }}"} }
diff --git a/onboard-monitoringservice-playbook.yml b/onboard-monitoringservice-playbook.yml
new file mode 100644
index 0000000..a0321b3
--- /dev/null
+++ b/onboard-monitoringservice-playbook.yml
@@ -0,0 +1,40 @@
+---
+# onboard-monitoringservice.yml
+# Adds the monitoringservice to the currently running pod
+
+- name: Include vars
+  hosts: all
+  tasks:
+    - name: Include variables
+      include_vars: "{{ item }}"
+      with_items:
+        - "profile_manifests/{{ cord_profile }}.yml"
+        - profile_manifests/local_vars.yml
+
+- name: Create monitoringservice config
+  hosts: head
+  roles:
+    - monitoringservice-config
+
+- include: add-bootstrap-containers-playbook.yml
+
+- name: Onboard monitoringservice
+  hosts: xos_bootstrap_ui
+  connection: docker
+  roles:
+    - monitoringservice-onboard
+
+- include: add-onboard-containers-playbook.yml
+
+- name: Check to see if XOS UI is ready
+  hosts: xos_ui
+  connection: docker
+  roles:
+    - xos-ready
+
+- name: Instantiate monitoringservice and channel
+  hosts: xos_ui
+  connection: docker
+  roles:
+    - monitoringservice-instantiate
+
diff --git a/profile_manifests/rcord.yml b/profile_manifests/rcord.yml
index 07d584c..e0ef9d6 100644
--- a/profile_manifests/rcord.yml
+++ b/profile_manifests/rcord.yml
@@ -62,6 +62,8 @@
 # needed onboarding synchronizer doesn't require service code to be present when started
   - name: exampleservice
     path: orchestration/xos_services/exampleservice
+  - name: monitoring
+    path: orchestration/xos_services/monitoring
 
 xos_service_sshkeys:
   - name: onos_rsa
@@ -81,6 +83,10 @@
     source_path: "~/.ssh/id_rsa"
   - name: exampleservice_rsa.pub
     source_path: "~/.ssh/id_rsa.pub"
+  - name: monitoringservice_rsa
+    source_path: "~/.ssh/id_rsa"
+  - name: monitoringservice_rsa.pub
+    source_path: "~/.ssh/id_rsa.pub"
 
 # VM networks/bridges on head
 virt_nets:
diff --git a/roles/monitoringagent-infra-install-config/tasks/main.yml b/roles/monitoringagent-infra-install-config/tasks/main.yml
new file mode 100644
index 0000000..a040b86
--- /dev/null
+++ b/roles/monitoringagent-infra-install-config/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+# monitoringagent-config/tasks/main.yml
+
+- name: Variable file holding all target nodes where infra monitoring agent to be installed
+  template:
+    src: "{{ item }}.j2"
+    dest: "{{ cord_profile_dir }}/{{ item }}"
+  with_items:
+    - "monitoringagent_install.hosts"
+
diff --git a/roles/monitoringagent-infra-install-config/templates/monitoringagent_install.hosts.j2 b/roles/monitoringagent-infra-install-config/templates/monitoringagent_install.hosts.j2
new file mode 100644
index 0000000..673d43c
--- /dev/null
+++ b/roles/monitoringagent-infra-install-config/templates/monitoringagent_install.hosts.j2
@@ -0,0 +1,7 @@
+[head]
+ceilometer-1.cord.lab
+
+[compute]
+{% for node in groups["compute"] %}
+{{ node }}
+{% endfor %}
diff --git a/roles/monitoringservice-config/defaults/main.yml b/roles/monitoringservice-config/defaults/main.yml
new file mode 100644
index 0000000..5016f47
--- /dev/null
+++ b/roles/monitoringservice-config/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# monitoringservice-config/defaults/main.yml
+
+cord_dir: "{{ ansible_user_dir + '/cord' }}"
+cord_profile_dir: "{{ ansible_user_dir + '/cord_profile' }}"
+
diff --git a/roles/monitoringservice-config/tasks/main.yml b/roles/monitoringservice-config/tasks/main.yml
new file mode 100644
index 0000000..c511f09
--- /dev/null
+++ b/roles/monitoringservice-config/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+# monitoringservice-config/tasks/main.yml
+
+- name: Copy monitoringservice onboarding TOSCA files to cord_profile
+  copy:
+    src: "{{ cord_dir }}/orchestration/xos_services/monitoring/xos/monitoring-onboard.yaml"
+    dest: "{{ cord_profile_dir }}/monitoring-onboard.yaml"
+
+- name: TOSCA to mount monitoringservice volume in XOS container
+  template:
+    src: "xos-monitoringservice.yaml.j2"
+    dest: "{{ cord_profile_dir }}/xos-monitoringservice.yaml"
+
+- name: TOSCA files to instantiate monitoringservice
+  template:
+    src: "{{ item }}.j2"
+    dest: "{{ cord_profile_dir }}/{{ item }}"
+  with_items:
+    - "monitoringservice.yaml"
+    - "monitoringtenant.yaml"
+
diff --git a/roles/monitoringservice-config/templates/monitoringservice.yaml.j2 b/roles/monitoringservice-config/templates/monitoringservice.yaml.j2
new file mode 100644
index 0000000..6142905
--- /dev/null
+++ b/roles/monitoringservice-config/templates/monitoringservice.yaml.j2
@@ -0,0 +1,181 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/monitoring_tosca_types.yaml
+
+topology_template:
+  node_templates:
+    service_ceilometer:
+      type: tosca.nodes.CeilometerService
+      requirements:
+      properties:
+          view_url: /admin/monitoring/ceilometerservice/$id$/
+          kind: ceilometer
+          ceilometer_enable_pub_sub: true
+          public_key: { get_artifact: [ SELF, pubkey, LOCAL_FILE] }
+          private_key_fn: /opt/xos/services/monitoring/keys/monitoringservice_rsa
+      artifacts:
+          pubkey: /opt/xos/services/monitoring/keys/monitoringservice_rsa.pub
+
+#    service_sflow:
+#      type: tosca.nodes.SFlowService
+#      requirements:
+#      properties:
+#          view_url: /admin/monitoring/sflowservice/$id$/
+#          kind: sflow
+#          sflow_port: 6343
+#          sflow_api_port: 33333
+
+    Private:
+      type: tosca.nodes.NetworkTemplate
+
+    management:
+      type: tosca.nodes.network.Network.XOS
+      properties:
+          no-create: true
+          no-delete: true
+          no-update: true
+
+#    ceilometer_network:
+#      type: tosca.nodes.network.Network.XOS
+#      properties:
+#          ip_version: 4
+#          labels: ceilometer_client_access
+#      requirements:
+#          - network_template:
+#              node: Private
+#              relationship: tosca.relationships.UsesNetworkTemplate
+#          - owner:
+#              node: {{ site_name }}_ceilometer
+#              relationship: tosca.relationships.MemberOfSlice
+#          - connection:
+#              node: {{ site_name }}_ceilometer
+#              relationship: tosca.relationships.ConnectsToSlice
+
+    {{ site_name }}:
+      type: tosca.nodes.Site
+
+    trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    ceilometer-trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    ceilometer-service-trusty-server-multi-nic:
+      type: tosca.nodes.Image
+
+    m1.small:
+      type: tosca.nodes.Flavor
+
+    {{ site_name }}_ceilometer:
+      description: Ceilometer Proxy Slice
+      type: tosca.nodes.Slice
+      requirements:
+          - ceilometer_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+          - site:
+              node: {{ site_name }}
+              relationship: tosca.relationships.MemberOfSite
+          - default_image:
+                node: ceilometer-trusty-server-multi-nic
+                relationship: tosca.relationships.DefaultImage
+          - management:
+              node: management
+              relationship: tosca.relationships.ConnectsToNetwork
+          - m1.small:
+              node: m1.small
+              relationship: tosca.relationships.DefaultFlavor
+
+    # Virtual machines
+    ceilometer_service_instance:
+      type: tosca.nodes.Compute
+      capabilities:
+        # Host container properties
+        host:
+         properties:
+           num_cpus: 2
+           disk_size: 40 GB
+           mem_size: 4 GB
+      requirements:
+          - slice:
+                node: {{ site_name }}_ceilometer
+                relationship: tosca.relationships.MemberOfSlice
+          - image:
+                node: ceilometer-service-trusty-server-multi-nic
+                relationship: tosca.relationships.UsesImage
+
+#    {{ site_name }}_sflow:
+#      description: Slice for sFlow service
+#      type: tosca.nodes.Slice
+#      requirements:
+#          - sflow_service:
+#              node: service_sflow
+#              relationship: tosca.relationships.MemberOfService
+#          - site:
+#              node: {{ site_name }}
+#              relationship: tosca.relationships.MemberOfSite
+
+# MOVING this section to monitoringtenant.yaml
+#    my_ceilometer_tenant:
+#      description: Ceilometer Service default Tenant
+#      type: tosca.nodes.CeilometerTenant
+#      requirements:
+#          - provider_service:
+#              node: service_ceilometer
+#              relationship: tosca.relationships.MemberOfService
+#          - dependency:
+#              node: ceilometer_service_instance
+#              relationship: tosca.relationships.DependsOn
+       
+    # Virtual machines
+#    sflow_service_instance:
+#      type: tosca.nodes.Compute
+#      capabilities:
+#        # Host container properties
+#        host:
+#         properties:
+#           num_cpus: 1
+#           disk_size: 10 GB
+#           mem_size: 4 MB
+#        # Guest Operating System properties
+#        os:
+#          properties:
+#            # host Operating System image properties
+#            architecture: x86_64
+#            type: linux
+#            distribution: Ubuntu
+#            version: 14.10
+#      requirements:
+#          - slice:
+#                node: {{ site_name }}_sflow
+#                relationship: tosca.relationships.MemberOfSlice
+
+# MOVING the below sections to monitoringtenant.yaml
+#    Ceilometer:
+#      type: tosca.nodes.DashboardView
+#      properties:
+#          url: template:xosCeilometerDashboard
+#    Tenant:
+#      type: tosca.nodes.DashboardView
+#      properties:
+#          no-create: true
+#          no-update: true
+#          no-delete: true
+#
+#    padmin@vicci.org:
+#      type: tosca.nodes.User
+#      properties:
+#          firstname: XOS
+#          lastname: admin
+#          is_admin: true
+#      requirements:
+#          - tenant_dashboard:
+#              node: Tenant
+#              relationship: tosca.relationships.UsesDashboard
+#          - ceilometer_dashboard:
+#              node: Ceilometer
+#              relationship: tosca.relationships.UsesDashboard
diff --git a/roles/monitoringservice-config/templates/monitoringtenant.yaml.j2 b/roles/monitoringservice-config/templates/monitoringtenant.yaml.j2
new file mode 100644
index 0000000..cc5e881
--- /dev/null
+++ b/roles/monitoringservice-config/templates/monitoringtenant.yaml.j2
@@ -0,0 +1,52 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Setup CORD-related services -- vOLT, vCPE, vBNG.
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/monitoring_tosca_types.yaml
+
+topology_template:
+  node_templates:
+    service_ceilometer:
+      type: tosca.nodes.CeilometerService
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    my_ceilometer_tenant:
+      description: Ceilometer Service default Tenant
+      type: tosca.nodes.CeilometerTenant
+      requirements:
+          - provider_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.MemberOfService
+       
+    Ceilometer:
+      type: tosca.nodes.DashboardView
+      properties:
+          url: template:xosCeilometerDashboard
+    Tenant:
+      type: tosca.nodes.DashboardView
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+# XOS Users
+# Default admin user account
+    {{ xos_admin_user }}:
+      type: tosca.nodes.User
+      properties:
+          password: {{ xos_admin_pass }}
+          firstname: {{ xos_admin_first }}
+          lastname: {{ xos_admin_last }}
+          is_admin: true
+      requirements:
+          - tenant_dashboard:
+              node: Tenant
+              relationship: tosca.relationships.UsesDashboard
+          - ceilometer_dashboard:
+              node: Ceilometer
+              relationship: tosca.relationships.UsesDashboard
diff --git a/roles/monitoringservice-config/templates/xos-monitoringservice.yaml.j2 b/roles/monitoringservice-config/templates/xos-monitoringservice.yaml.j2
new file mode 100644
index 0000000..b97ffc7
--- /dev/null
+++ b/roles/monitoringservice-config/templates/xos-monitoringservice.yaml.j2
@@ -0,0 +1,24 @@
+---
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Have the XOS container mount the monitoringservice volume
+
+imports:
+   - custom_types/xos.yaml
+
+topology_template:
+  node_templates:
+
+    xos:
+      type: tosca.nodes.XOS
+
+    /opt/xos_services/monitoring:
+      type: tosca.nodes.XOSVolume
+      properties:
+          host_path: "{{ cord_dir }}/orchestration/xos_services/monitoring"
+          read_only: True
+      requirements:
+          - xos:
+             node: xos
+             relationship: tosca.relationships.UsedByXOS
+
diff --git a/roles/monitoringservice-enable-inframonitoring-config/tasks/main.yml b/roles/monitoringservice-enable-inframonitoring-config/tasks/main.yml
new file mode 100644
index 0000000..203e85a
--- /dev/null
+++ b/roles/monitoringservice-enable-inframonitoring-config/tasks/main.yml
@@ -0,0 +1,12 @@
+
+---
+# monitoringservice-config/tasks/main.yml
+
+- name: TOSCA files to enable infrastructure monitoring
+  template:
+    src: "{{ item }}.j2"
+    dest: "{{ cord_profile_dir }}/{{ item }}"
+  with_items:
+    - "inframonitoring.yaml"
+    - "onos_monitoring_service_endpoints.json"
+
diff --git a/roles/monitoringservice-enable-inframonitoring-config/templates/inframonitoring.yaml.j2 b/roles/monitoringservice-enable-inframonitoring-config/templates/inframonitoring.yaml.j2
new file mode 100644
index 0000000..a2b55af
--- /dev/null
+++ b/roles/monitoringservice-enable-inframonitoring-config/templates/inframonitoring.yaml.j2
@@ -0,0 +1,74 @@
+tosca_definitions_version: tosca_simple_yaml_1_0
+
+description: Auto generated file to enable monitoring of infrastructure services (OpenStack, ONOS...etc).
+
+imports:
+   - custom_types/xos.yaml
+   - custom_types/monitoring_tosca_types.yaml
+
+topology_template:
+  node_templates:
+    service_ceilometer:
+      type: tosca.nodes.CeilometerService
+      properties:
+          no-create: true
+          no-update: true
+          no-delete: true
+
+    onos_monitoring_agent_head_node:
+      description: Monitoring agent info
+      type: tosca.nodes.InfraMonitoringAgentInfo
+      properties:
+          start_url: http://ceilometer-1.cord.lab:5004/monitoring/agent/onos/start
+          start_url_json_data: { get_artifact: [ SELF, onos_monitoring_service_endpoints, LOCAL_FILE] }
+          stop_url: http://ceilometer-1.cord.lab:5004/monitoring/agent/onos/stop
+      artifacts:
+          onos_monitoring_service_endpoints: /opt/cord_profile/onos_monitoring_service_endpoints.json
+
+    os_monitoring_agent_head_node:
+      description: Openstack Monitoring agent info
+      type: tosca.nodes.InfraMonitoringAgentInfo
+      properties:
+          start_url: http://ceilometer-1.cord.lab:5004/monitoring/agent/openstack/start
+          stop_url: http://ceilometer-1.cord.lab:5004/monitoring/agent/openstack/stop
+
+# OpenStack compute nodes
+{% for node in groups["compute"] %}
+    os_monitoring_agent_cp_{{ loop.index }}:
+      description: Openstack Monitoring agent info for {{ node }}
+      type: tosca.nodes.InfraMonitoringAgentInfo
+      properties:
+          start_url: http://{{ node }}:5004/monitoring/agent/openstack/start
+          stop_url: http://{{ node }}:5004/monitoring/agent/openstack/stop
+{% endfor %}
+
+    os_monitoring_collector_plugin:
+      description: Infra Monitoring collector plugin info
+      type: tosca.nodes.MonitoringCollectorPluginInfo
+      properties:
+          plugin_folder_path: /opt/xos/synchronizers/monitoring/ceilometer/ceilometer-plugins/network/ext_services/openstack_infra/
+          plugin_rabbit_exchange: openstack_infra
+ 
+    openstack_monitoring_publisher_tenant:
+      description: OpenStack Monitoring Publisher Tenant
+      type: tosca.nodes.OpenStackMonitoringPublisher
+      requirements:
+          - provider_service:
+              node: service_ceilometer
+              relationship: tosca.relationships.TenantOfService
+          - monitoring_collector_plugin:
+              node: os_monitoring_collector_plugin
+              relationship: tosca.relationships.ProvidesMonitoringCollectorPluginInfo
+          - monitoring_agent_1:
+              node: os_monitoring_agent_head_node
+              relationship: tosca.relationships.ProvidesInfraMonitoringAgentInfo
+          - monitoring_agent_2:
+              node: onos_monitoring_agent_head_node
+              relationship: tosca.relationships.ProvidesInfraMonitoringAgentInfo
+{% set count = 2 %}
+{% for node in groups["compute"] %}
+          - monitoring_agent_{{ count + loop.index }}:
+              node: os_monitoring_agent_cp_{{ loop.index }}
+              relationship: tosca.relationships.ProvidesInfraMonitoringAgentInfo
+{% endfor %}
+
diff --git a/roles/monitoringservice-enable-inframonitoring-config/templates/onos_monitoring_service_endpoints.json.j2 b/roles/monitoringservice-enable-inframonitoring-config/templates/onos_monitoring_service_endpoints.json.j2
new file mode 100644
index 0000000..af9ad84
--- /dev/null
+++ b/roles/monitoringservice-enable-inframonitoring-config/templates/onos_monitoring_service_endpoints.json.j2
@@ -0,0 +1,6 @@
+{
+    "resources" : [
+        "onos://onos-cord.cord.lab:8182?auth=basic&user=onos&password=rocks&scheme=http",
+        "onos://onos-fabric.cord.lab:8181?auth=basic&user=onos&password=rocks&scheme=http"
+     ]
+}
diff --git a/roles/monitoringservice-enable-inframonitoring/tasks/main.yml b/roles/monitoringservice-enable-inframonitoring/tasks/main.yml
new file mode 100644
index 0000000..defdb5c
--- /dev/null
+++ b/roles/monitoringservice-enable-inframonitoring/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+# monitoringservice-enable-inframonitoring/tasks/main.yml
+
+- name: Run TOSCA to enable infra monitoring
+  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/inframonitoring.yaml"
+  tags:
+    - skip_ansible_lint # TOSCA loading should be idempotent
+
diff --git a/roles/monitoringservice-instantiate/tasks/main.yml b/roles/monitoringservice-instantiate/tasks/main.yml
new file mode 100644
index 0000000..818cba9
--- /dev/null
+++ b/roles/monitoringservice-instantiate/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+# monitoringservice-instantiate/tasks/main.yml
+
+- name: Run TOSCA to instantiate monitoringservice
+  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/monitoringservice.yaml"
+  tags:
+    - skip_ansible_lint # TOSCA loading should be idempotent
+
+- name: Run TOSCA to instantiate monitoring channel
+  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/monitoringtenant.yaml"
+  tags:
+    - skip_ansible_lint # TOSCA loading should be idempotent
+
diff --git a/roles/monitoringservice-onboard/defaults/main.yml b/roles/monitoringservice-onboard/defaults/main.yml
new file mode 100644
index 0000000..ff338b7
--- /dev/null
+++ b/roles/monitoringservice-onboard/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# monitoringservice-onboard/defaults/main.yml
+
+cord_dir: "{{ ansible_user_dir + '/cord' }}"
+cord_profile_dir: "{{ ansible_user_dir + '/cord_profile' }}"
+
+xos_bootstrap_ui_port: 9001
+
diff --git a/roles/monitoringservice-onboard/tasks/main.yml b/roles/monitoringservice-onboard/tasks/main.yml
new file mode 100644
index 0000000..9538d7f
--- /dev/null
+++ b/roles/monitoringservice-onboard/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+# monitoringservice-onboard/tasks/main.yml
+
+- name: Disable onboarding
+  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/disable-onboarding.yaml"
+  tags:
+    - skip_ansible_lint # TOSCA loading should be idempotent
+
+- name: Have XOS container mount monitoringservice volume
+  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/xos-monitoringservice.yaml"
+  tags:
+    - skip_ansible_lint # TOSCA loading should be idempotent
+
+- name: Onboard monitoringservice
+  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/monitoring-onboard.yaml"
+  tags:
+    - skip_ansible_lint # TOSCA loading should be idempotent
+
+- name: Enable onboarding
+  command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} /opt/cord_profile/enable-onboarding.yaml"
+  tags:
+    - skip_ansible_lint # TOSCA loading should be idempotent
+
+- name: Wait for monitoring to be onboarded
+  uri:
+    url: "http://localhost:{{ xos_bootstrap_ui_port }}/api/utility/onboarding/services/monitoring/ready/"
+    method: GET
+    return_content: yes
+  register: xos_onboard_status
+  until: '"true" in xos_onboard_status.content'
+  retries: 60
+  delay: 2
+
+- name: Wait for XOS to be onboarded after monitoringservice onboarding
+  uri:
+    url: "http://localhost:{{ xos_bootstrap_ui_port }}/api/utility/onboarding/xos/ready/"
+    method: GET
+    return_content: yes
+  register: xos_onboard_status
+  until: '"true" in xos_onboard_status.content'
+  retries: 60
+  delay: 2
+