Merge branch 'master' of github.com:open-cloud/xos
diff --git a/xos/configurations/cord/openstack_ceilometer_patch.tar.gz b/xos/configurations/cord/openstack_ceilometer_patch.tar.gz
index 6a6ffa7..2c4f02c 100644
--- a/xos/configurations/cord/openstack_ceilometer_patch.tar.gz
+++ b/xos/configurations/cord/openstack_ceilometer_patch.tar.gz
Binary files differ
diff --git a/xos/core/static/xos.css b/xos/core/static/xos.css
index 38a9763..592bb7c 100644
--- a/xos/core/static/xos.css
+++ b/xos/core/static/xos.css
@@ -1329,6 +1329,12 @@
/* SUIT CHANGES */
+.form-buttons {
+ margin-top: 20px;
+ padding: 10px;
+ border-top: 1px solid #cccccc;
+}
+
.form-buttons a.deletelink {
color: #fff;
background-color: #d9534f;
diff --git a/xos/core/xoslib/methods/ceilometerview.py b/xos/core/xoslib/methods/ceilometerview.py
index a633819..b56065a 100644
--- a/xos/core/xoslib/methods/ceilometerview.py
+++ b/xos/core/xoslib/methods/ceilometerview.py
@@ -1094,9 +1094,19 @@
tenant_ceilometer_url = getTenantCeilometerProxyURL(request.user)
if (not tenant_ceilometer_url):
raise XOSMissingField("Tenant ceilometer URL is missing")
+
+ tenant_id = request.QUERY_PARAMS.get('tenant', None)
+ resource_id = request.QUERY_PARAMS.get('resource', None)
+
+ query = []
+ if tenant_id:
+ query.extend(make_query(tenant_id=tenant_id))
+ if resource_id:
+ query.extend(make_query(resource_id=resource_id))
+
tenant_map = getTenantControllerTenantMap(request.user)
- resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url)
- meters = Meters(request, ceilometer_url=tenant_ceilometer_url, tenant_map=tenant_map, resource_map=resource_map)
+ resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url, query=query)
+ meters = Meters(request, ceilometer_url=tenant_ceilometer_url, query=query, tenant_map=tenant_map, resource_map=resource_map)
services = {
_('Nova'): meters.list_nova(),
_('Neutron'): meters.list_neutron(),
@@ -1165,7 +1175,7 @@
return Response(row)
#Statistics query for all meter
- resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url)
+ resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url, query=query)
meters = Meters(request, ceilometer_url=tenant_ceilometer_url, query=query, tenant_map=tenant_map, resource_map=resource_map)
services = {
_('Nova'): meters.list_nova(),
@@ -1239,6 +1249,25 @@
sample["resource_name"] = sample["resource_id"]
return Response(samples)
+class XOSSliceServiceList(APIView):
+ method_kind = "list"
+ method_name = "xos-slice-service-mapping"
+
+ def get(self, request, format=None):
+ if (not request.user.is_authenticated()):
+ raise PermissionDenied("You must be authenticated in order to use this API")
+ tenant_map = getTenantControllerTenantMap(request.user)
+ service_map={}
+ for k,v in tenant_map.iteritems():
+ if not (v['service'] in service_map.keys()):
+ service_map[v['service']] = {}
+ service_map[v['service']]['service'] = v['service']
+ service_map[v['service']]['slices'] = []
+ slice_details['slice'] = v['slice']
+ slice_details['project_id'] = k
+ service_map[v['service']]['slices'].append(slice_details)
+ return Response(service_map.values())
+
class XOSInstanceStatisticsList(APIView):
method_kind = "list"
method_name = "xos-instance-statistics"
@@ -1295,9 +1324,15 @@
#Statistics query for all meter
resource_map = get_resource_map(request, ceilometer_url=tenant_ceilometer_url, query=query)
meters = Meters(request, ceilometer_url=tenant_ceilometer_url, query=query, tenant_map=tenant_map, resource_map=resource_map)
+ exclude_nova_meters_info = [ "instance", "instance:<type>", "disk.read.requests", "disk.write.requests",
+ "disk.read.bytes", "disk.write.bytes", "disk.read.requests.rate", "disk.write.requests.rate", "disk.read.bytes.rate",
+ "disk.write.bytes.rate", "disk.root.size", "disk.ephemeral.size"]
+ exclude_neutron_meters_info = [ 'network.create', 'network.update', 'subnet.create',
+ 'subnet.update', 'port.create', 'port.update', 'router.create', 'router.update',
+ 'ip.floating.create', 'ip.floating.update']
services = {
- _('Nova'): meters.list_nova(),
- _('Neutron'): meters.list_neutron(),
+ _('Nova'): meters.list_nova(except_meters=exclude_nova_meters_info),
+ _('Neutron'): meters.list_neutron(except_meters=exclude_neutron_meters_info),
_('VCPE'): meters.list_vcpe(),
_('SDN'): meters.list_sdn(),
}
diff --git a/xos/synchronizers/monitoring_channel/steps/sync_sflowservice.yaml b/xos/synchronizers/monitoring_channel/steps/sync_sflowservice.yaml
index b0b3f50..8d853a2 100644
--- a/xos/synchronizers/monitoring_channel/steps/sync_sflowservice.yaml
+++ b/xos/synchronizers/monitoring_channel/steps/sync_sflowservice.yaml
@@ -53,7 +53,7 @@
image: srikanthvavila/sflowpubsub
expose:
- {{ sflow_api_port }}
- - {{ sflow_port }}
+ - {{ sflow_port }}/udp
ports:
- "{{ sflow_port }}:{{ sflow_port }}/udp"
- "{{ sflow_api_port }}:{{ sflow_api_port }}"
diff --git a/xos/synchronizers/vcpe/steps/sync_vcpetenant.yaml b/xos/synchronizers/vcpe/steps/sync_vcpetenant.yaml
index b9b4b9e..d887547 100644
--- a/xos/synchronizers/vcpe/steps/sync_vcpetenant.yaml
+++ b/xos/synchronizers/vcpe/steps/sync_vcpetenant.yaml
@@ -100,13 +100,13 @@
# - name: DEBUG
# debug: var=cron_job_pids_count.stdout
- - name: make sure ~/bin exists
- file: path=~/bin state=directory owner=root group=root
- when: cron_job_pids_count.stdout == "0"
+# - name: make sure ~/bin exists
+# file: path=~/bin state=directory owner=root group=root
+# when: cron_job_pids_count.stdout == "0"
- name: Copy cron job to destination
copy: src=/opt/xos/synchronizers/vcpe/vcpe_stats_notifier.py
- dest=~/bin/vcpe_stats_notifier.py
+ dest=/usr/local/sbin/vcpe_stats_notifier.py
when: cron_job_pids_count.stdout == "0"
- name: install python-kombu
@@ -114,7 +114,7 @@
when: cron_job_pids_count.stdout == "0"
- name: Initiate vcpe_stats_notifier cron job
- command: python ~/bin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
+ command: sudo python /usr/local/sbin/vcpe_stats_notifier.py --keystone_tenant_id={{ keystone_tenant_id }} --keystone_user_id={{ keystone_user_id }} --rabbit_user={{ rabbit_user }} --rabbit_password={{ rabbit_password }} --rabbit_host={{ rabbit_host }} --vcpeservice_rabbit_exchange='vcpeservice'
async: 9999999999999999
poll: 0
when: cron_job_pids_count.stdout == "0"
diff --git a/xos/synchronizers/vcpe/vcpe_stats_notifier.py b/xos/synchronizers/vcpe/vcpe_stats_notifier.py
index d726e3c..4d2cc76 100644
--- a/xos/synchronizers/vcpe/vcpe_stats_notifier.py
+++ b/xos/synchronizers/vcpe/vcpe_stats_notifier.py
@@ -8,6 +8,7 @@
import time, threading
import sys, getopt
import logging
+import os
logfile = "vcpe_stats_notifier.log"
@@ -21,8 +22,8 @@
handler.setFormatter(formatter)
logger.addHandler(handler)
-def extract_dns_stats_from_all_vcpes():
- p = subprocess.Popen('docker ps', shell=True, stdout=subprocess.PIPE)
+def get_all_docker_containers():
+ p = subprocess.Popen('docker ps --no-trunc', shell=True, stdout=subprocess.PIPE)
firstline = True
dockercontainers = {}
while True:
@@ -37,6 +38,56 @@
container_fields = {}
container_fields['id'] = fields[0]
dockercontainers[fields[-1]] = container_fields
+ return dockercontainers
+
+def extract_compute_stats_from_all_vcpes(dockercontainers):
+ for k,v in dockercontainers.iteritems():
+ cmd = 'sudo docker stats --no-stream=true ' + v['id']
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+ firstline = True
+ while True:
+ out = p.stdout.readline()
+ if out == '' and p.poll() != None:
+ break
+ if out != '':
+ if firstline is True:
+ firstline = False
+ else:
+ fields = out.split()
+ #['CONTAINER_ID', 'CPU%', 'MEMUSE', 'UNITS', '/', 'MEMLIMIT', 'UNITS', 'MEM%', 'NET I/O', 'UNITS', '/', 'NET I/O LIMIT', 'UNITS', 'BLOCK I/O', 'UNITS', '/', 'BLOCK I/O LIMIT', 'UNITS']
+ v['cpu_util'] = fields[1][:-1]
+ if fields[6] == 'GB':
+ v['memory'] = str(float(fields[5]) * 1000)
+ else:
+ v['memory'] = fields[5]
+ if fields[3] == 'GB':
+ v['memory_usage'] = str(float(fields[2]) * 1000)
+ else:
+ v['memory_usage'] = fields[2]
+ v['network_stats'] = []
+ for intf in ['eth0', 'eth1']:
+ cmd = 'sudo docker exec ' + v['id'] + ' ifconfig ' + intf
+ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
+ out,err = p.communicate()
+ if out:
+ intf_stats = {}
+ m = re.search("RX bytes:(\d+)", str(out))
+ if m:
+ intf_stats['rx_bytes'] = m.group(1)
+ m = re.search("TX bytes:(\d+)", str(out))
+ if m:
+ intf_stats['tx_bytes'] = m.group(1)
+ m = re.search("RX packets:(\d+)", str(out))
+ if m:
+ intf_stats['rx_packets'] = m.group(1)
+ m = re.search("TX packets:(\d+)", str(out))
+ if m:
+ intf_stats['tx_packets'] = m.group(1)
+ if intf_stats:
+ intf_stats['intf'] = intf
+ v['network_stats'].append(intf_stats)
+
+def extract_dns_stats_from_all_vcpes(dockercontainers):
for k,v in dockercontainers.iteritems():
cmd = 'docker exec ' + v['id'] + ' killall -10 dnsmasq'
p = subprocess.Popen (cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
@@ -112,9 +163,11 @@
logger.debug('publish_cpe_stats invoked')
- cpe_container_stats = extract_dns_stats_from_all_vcpes()
+ dockercontainers = get_all_docker_containers()
+ cpe_container_compute_stats = extract_compute_stats_from_all_vcpes(dockercontainers)
+ cpe_container_dns_stats = extract_dns_stats_from_all_vcpes(dockercontainers)
- for k,v in cpe_container_stats.iteritems():
+ for k,v in cpe_container_dns_stats.iteritems():
msg = {'event_type': 'vcpe',
'message_id':six.text_type(uuid.uuid4()),
'publisher_id': cpe_publisher_id,
@@ -128,6 +181,29 @@
producer.publish(msg)
logger.debug('Publishing vcpe event: %s', msg)
+ compute_payload = {}
+ if 'cpu_util' in v:
+ compute_payload['cpu_util']= v['cpu_util']
+ if 'memory' in v:
+ compute_payload['memory']= v['memory']
+ if 'memory_usage' in v:
+ compute_payload['memory_usage']= v['memory_usage']
+ if ('network_stats' in v) and (v['network_stats']):
+ compute_payload['network_stats']= v['network_stats']
+ if compute_payload:
+ compute_payload['vcpe_id'] = k
+ compute_payload['user_id'] = keystone_user_id
+ compute_payload['tenant_id'] = keystone_tenant_id
+ msg = {'event_type': 'vcpe.compute.stats',
+ 'message_id':six.text_type(uuid.uuid4()),
+ 'publisher_id': cpe_publisher_id,
+ 'timestamp':datetime.datetime.now().isoformat(),
+ 'priority':'INFO',
+ 'payload': compute_payload
+ }
+ producer.publish(msg)
+ logger.debug('Publishing vcpe.dns.cache.size event: %s', msg)
+
if 'cache_size' in v:
msg = {'event_type': 'vcpe.dns.cache.size',
'message_id':six.text_type(uuid.uuid4()),