blob: 2cbb7e7f214cc3b6d3890916dc53c8f182c86493 [file] [log] [blame]
Scott Bakerf2c26de2014-06-10 20:37:40 -07001from bigquery_analytics import BigQueryAnalytics, BIGQUERY_AVAILABLE
Scott Baker050b1b82014-03-27 09:13:41 -07002import datetime
3import re
Scott Bakerc527fda2014-03-20 17:14:52 -07004import os
5import sys
Scott Baker95b28d62014-04-18 10:45:26 -07006import time
Scott Baker43adf1b2014-03-19 21:54:55 -07007import json
Scott Baker78ab1012014-03-19 23:44:39 -07008import traceback
Scott Baker050b1b82014-03-27 09:13:41 -07009import urllib2
Scott Baker43adf1b2014-03-19 21:54:55 -070010
Scott Bakerb43ffde2015-02-04 15:21:17 -080011# XXX hardcoded path
12sys.path.append("/opt/planetstack")
Scott Bakerc527fda2014-03-20 17:14:52 -070013
14os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
Scott Baker584b37a2014-04-24 17:02:28 -070015from django.conf import settings
Scott Bakerc527fda2014-03-20 17:14:52 -070016from django import db
17from django.db import connection
18from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice, Service
19
20BLUE_LOAD=5000000
21RED_LOAD=15000000
22
Scott Baker95b28d62014-04-18 10:45:26 -070023glo_cached_queries = {}
24
Scott Baker43adf1b2014-03-19 21:54:55 -070025class PlanetStackAnalytics(BigQueryAnalytics):
Scott Baker584b37a2014-04-24 17:02:28 -070026 def __init__(self, tableName=None):
27 if not tableName:
28 tableName = settings.BIGQUERY_TABLE
29
Scott Baker43adf1b2014-03-19 21:54:55 -070030 BigQueryAnalytics.__init__(self, tableName)
31
Scott Bakerc527fda2014-03-20 17:14:52 -070032 def service_to_sliceNames(self, serviceName):
33 service=Service.objects.get(name=serviceName)
34 try:
35 slices = service.slices.all()
36 except:
37 # BUG in data model -- Slice.service has related name 'service' and
38 # it should be 'slices'
39 slices = service.service.all()
40
41 return [slice.name for slice in slices]
42
Scott Baker0fd787d2014-05-13 17:03:47 -070043 def compose_query(self, filter={}, timeBucket="60", avg=[], sum=[], count=[], computed=[], val=[], groupBy=["Time"], orderBy=["Time"], tableName=None, latest=False, maxAge=60*60):
Scott Baker584b37a2014-04-24 17:02:28 -070044 if tableName is None:
45 tableName = self.tableName
46
Scott Bakercdd2c822014-04-23 20:07:08 -070047 maxAge = maxAge * 1000
48 tablePart = "[%s.%s@-%d--1]" % ("vicci", tableName, maxAge)
Scott Baker43adf1b2014-03-19 21:54:55 -070049
50 fields = []
51 fieldNames = []
Scott Baker95b28d62014-04-18 10:45:26 -070052 srcFieldNames = ["time"]
Scott Baker43adf1b2014-03-19 21:54:55 -070053
Scott Baker050b1b82014-03-27 09:13:41 -070054 fields.append("SEC_TO_TIMESTAMP(INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s) as Time" % (str(timeBucket),str(timeBucket)))
55 #fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s as Time" % (str(timeBucket),str(timeBucket)))
Scott Baker43adf1b2014-03-19 21:54:55 -070056
57 for fieldName in avg:
58 fields.append("AVG(%s) as avg_%s" % (fieldName, fieldName.replace("%","")))
59 fieldNames.append("avg_%s" % fieldName.replace("%",""))
Scott Baker95b28d62014-04-18 10:45:26 -070060 srcFieldNames.append(fieldName)
Scott Baker43adf1b2014-03-19 21:54:55 -070061
62 for fieldName in sum:
63 fields.append("SUM(%s) as sum_%s" % (fieldName, fieldName.replace("%","")))
64 fieldNames.append("sum_%s" % fieldName.replace("%",""))
Scott Baker95b28d62014-04-18 10:45:26 -070065 srcFieldNames.append(fieldName)
Scott Baker43adf1b2014-03-19 21:54:55 -070066
67 for fieldName in count:
68 fields.append("COUNT(distinct %s) as count_%s" % (fieldName, fieldName.replace("%","")))
69 fieldNames.append("count_%s" % fieldName.replace("%",""))
Scott Baker95b28d62014-04-18 10:45:26 -070070 srcFieldNames.append(fieldName)
71
72 for fieldName in val:
73 fields.append(fieldName)
74 fieldNames.append(fieldName)
75 srcFieldNames.append(fieldName)
Scott Baker43adf1b2014-03-19 21:54:55 -070076
77 for fieldName in computed:
78 operator = "/"
79 parts = fieldName.split("/")
80 computedFieldName = "computed_" + parts[0].replace("%","")+"_div_"+parts[1].replace("%","")
81 if len(parts)==1:
82 operator = "*"
83 parts = computed.split("*")
84 computedFieldName = "computed_" + parts[0].replace("%","")+"_mult_"+parts[1].replace("%","")
85 fields.append("SUM(%s)%sSUM(%s) as %s" % (parts[0], operator, parts[1], computedFieldName))
86 fieldNames.append(computedFieldName)
Scott Baker95b28d62014-04-18 10:45:26 -070087 srcFieldNames.append(parts[0])
88 srcFieldNames.append(parts[1])
Scott Baker43adf1b2014-03-19 21:54:55 -070089
Scott Bakerc527fda2014-03-20 17:14:52 -070090 for fieldName in groupBy:
Scott Baker050b1b82014-03-27 09:13:41 -070091 if (fieldName not in ["Time"]):
Scott Bakerc527fda2014-03-20 17:14:52 -070092 fields.append(fieldName)
93 fieldNames.append(fieldName)
Scott Baker95b28d62014-04-18 10:45:26 -070094 srcFieldNames.append(fieldName)
Scott Bakerc527fda2014-03-20 17:14:52 -070095
Scott Baker43adf1b2014-03-19 21:54:55 -070096 fields = ", ".join(fields)
97
98 where = []
99
Scott Baker0fd787d2014-05-13 17:03:47 -0700100 if filter.get("slice",None):
101 where.append("%%slice='%s'" % filter["slice"])
102 if filter.get("site",None):
103 where.append("%%site='%s'" % filter["site"])
104 if filter.get("node",None):
105 where.append("%%hostname='%s'" % filter["node"])
106 if filter.get("event",None):
107 where.append("event='%s'" % filter["event"])
108 if filter.get("service",None):
109 sliceNames = self.service_to_sliceNames(filter["service"])
Scott Bakerc527fda2014-03-20 17:14:52 -0700110 if sliceNames:
111 where.append("(" + " OR ".join(["%%slice='%s'" % sliceName for sliceName in sliceNames]) +")")
Scott Baker43adf1b2014-03-19 21:54:55 -0700112
113 if where:
114 where = " WHERE " + " AND ".join(where)
115 else:
116 where =""
117
118 if groupBy:
Scott Bakerc527fda2014-03-20 17:14:52 -0700119 groupBySub = " GROUP BY " + ",".join(groupBy + ["%hostname"])
Scott Baker43adf1b2014-03-19 21:54:55 -0700120 groupBy = " GROUP BY " + ",".join(groupBy)
121 else:
Scott Bakerc527fda2014-03-20 17:14:52 -0700122 groupBySub = " GROUP BY %hostname"
Scott Baker43adf1b2014-03-19 21:54:55 -0700123 groupBy = ""
124
125 if orderBy:
126 orderBy = " ORDER BY " + ",".join(orderBy)
127 else:
128 orderBy = ""
129
Scott Baker95b28d62014-04-18 10:45:26 -0700130 if latest:
131 latestFields = ["table1.%s as %s" % (x,x) for x in srcFieldNames]
132 latestFields = ", ".join(latestFields)
133 tablePart = """(SELECT %s FROM %s AS table1
134 JOIN
135 (SELECT %%hostname, event, max(time) as maxtime from %s GROUP BY %%hostname, event) AS latest
136 ON
137 table1.%%hostname = latest.%%hostname AND table1.event = latest.event AND table1.time = latest.maxtime)""" % (latestFields, tablePart, tablePart)
138
Scott Baker43adf1b2014-03-19 21:54:55 -0700139 if computed:
Scott Baker95b28d62014-04-18 10:45:26 -0700140 subQuery = "SELECT %%hostname, %s FROM %s" % (fields, tablePart)
Scott Baker43adf1b2014-03-19 21:54:55 -0700141 if where:
142 subQuery = subQuery + where
Scott Bakerc527fda2014-03-20 17:14:52 -0700143 subQuery = subQuery + groupBySub
Scott Baker43adf1b2014-03-19 21:54:55 -0700144
145 sumFields = []
146 for fieldName in fieldNames:
147 if fieldName.startswith("avg"):
148 sumFields.append("AVG(%s) as avg_%s"%(fieldName,fieldName))
Scott Bakerc527fda2014-03-20 17:14:52 -0700149 sumFields.append("MAX(%s) as max_%s"%(fieldName,fieldName))
150 elif (fieldName.startswith("count")) or (fieldName.startswith("sum")) or (fieldName.startswith("computed")):
Scott Baker43adf1b2014-03-19 21:54:55 -0700151 sumFields.append("SUM(%s) as sum_%s"%(fieldName,fieldName))
Scott Bakerc527fda2014-03-20 17:14:52 -0700152 else:
153 sumFields.append(fieldName)
Scott Baker43adf1b2014-03-19 21:54:55 -0700154
155 sumFields = ",".join(sumFields)
156
Scott Baker050b1b82014-03-27 09:13:41 -0700157 query = "SELECT %s, %s FROM (%s)" % ("Time", sumFields, subQuery)
Scott Baker43adf1b2014-03-19 21:54:55 -0700158 if groupBy:
159 query = query + groupBy
160 if orderBy:
161 query = query + orderBy
162 else:
Scott Baker95b28d62014-04-18 10:45:26 -0700163 query = "SELECT %s FROM %s" % (fields, tablePart)
Scott Baker43adf1b2014-03-19 21:54:55 -0700164 if where:
165 query = query + " " + where
166 if groupBy:
167 query = query + groupBy
168 if orderBy:
169 query = query + orderBy
170
171 return query
172
173 def get_list_from_req(self, req, name, default=[]):
174 value = req.GET.get(name, None)
175 if not value:
176 return default
Scott Baker050b1b82014-03-27 09:13:41 -0700177 value=value.replace("@","%")
Scott Baker43adf1b2014-03-19 21:54:55 -0700178 return value.split(",")
179
Scott Baker050b1b82014-03-27 09:13:41 -0700180 def format_result(self, format, result, query, dataSourceUrl):
Scott Bakerf2c26de2014-06-10 20:37:40 -0700181 if not BIGQUERY_AVAILABLE:
182 msg = "BigQuery Statistics Unavaiable"
183 else:
184 msg = None
185
Scott Baker43adf1b2014-03-19 21:54:55 -0700186 if (format == "json_dicts"):
Scott Bakerf2c26de2014-06-10 20:37:40 -0700187 result = {"query": query, "rows": result, "dataSourceUrl": dataSourceUrl, "msg": msg}
Scott Baker43adf1b2014-03-19 21:54:55 -0700188 return ("application/javascript", json.dumps(result))
189
190 elif (format == "json_arrays"):
191 new_result = []
192 for row in result:
193 new_row = []
194 for key in sorted(row.keys()):
195 new_row.append(row[key])
196 new_result.append(new_row)
Scott Bakerf2c26de2014-06-10 20:37:40 -0700197 new_result = {"query": query, "rows": new_result, "msg": msg}
Scott Baker43adf1b2014-03-19 21:54:55 -0700198 return ("application/javascript", json.dumps(new_result))
199
200 elif (format == "html_table"):
201 new_rows = []
202 for row in result:
203 new_row = []
204 for key in sorted(row.keys()):
205 new_row.append("<TD>%s</TD>" % str(row[key]))
206 new_rows.append("<TR>%s</TR>" % "".join(new_row))
207
208 new_result = "<TABLE>%s</TABLE>" % "\n".join(new_rows)
209
210 return ("text/html", new_result)
211
Scott Bakera5885442014-04-21 01:28:48 -0700212 def merge_datamodel_sites(self, rows, slice=None):
Scott Bakerc527fda2014-03-20 17:14:52 -0700213 """ For a query that included "site" in its groupby, merge in the
214 opencloud site information.
215 """
Scott Bakera5885442014-04-21 01:28:48 -0700216
217 if slice:
218 try:
219 slice = Slice.objects.get(name=slice)
220 except:
221 slice = None
222
Scott Bakerc527fda2014-03-20 17:14:52 -0700223 for row in rows:
224 sitename = row["site"]
225 try:
226 model_site = Site.objects.get(name=sitename)
227 except:
228 # we didn't find it in the data model
229 continue
230
Scott Bakera5885442014-04-21 01:28:48 -0700231 allocated_slivers = 0
232 if model_site and slice:
233 for sliver in slice.slivers.all():
234 if sliver.node.site == model_site:
235 allocated_slivers = allocated_slivers + 1
236
Scott Bakerc527fda2014-03-20 17:14:52 -0700237 row["lat"] = float(model_site.location.latitude)
238 row["long"] = float(model_site.location.longitude)
239 row["url"] = model_site.site_url
240 row["numNodes"] = model_site.nodes.count()
Scott Bakera5885442014-04-21 01:28:48 -0700241 row["allocated_slivers"] = allocated_slivers
Scott Bakerc527fda2014-03-20 17:14:52 -0700242
Scott Baker95b28d62014-04-18 10:45:26 -0700243 max_cpu = row.get("max_avg_cpu", row.get("max_cpu",0))
244 cpu=float(max_cpu)/100.0
245 row["hotness"] = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
246
Scott Baker0fd787d2014-05-13 17:03:47 -0700247 def compose_cached_query(self, querySpec='default'):
Scott Baker95b28d62014-04-18 10:45:26 -0700248 """ Compose a query that returns the 'most recent' row for each (hostname, event)
249 pair.
Scott Baker0fd787d2014-05-13 17:03:47 -0700250
251 Note that groupByFields cannot contain any values that are 'Null' or those
252 rows will be excluded. For example, if groupByFields includes cp, then
253 there will be no libvirt_event rows, since libvirt_event does not have
254 cp.
255
256 This means we can't really have 'one query to rule them'. Settle on
257 having a couple of different queries, and have the caller specify
258 which one he wants.
Scott Baker95b28d62014-04-18 10:45:26 -0700259 """
260
Scott Baker0fd787d2014-05-13 17:03:47 -0700261 fieldNames = ["%hostname", "%bytes_sent", "%bytes_hit", "%healthy", "time", "event", "%site", "%elapsed", "%cpu"]
262
263 if querySpec=="default":
264 groupByFields = ["%hostname", "event"]
265 elif (querySpec=="hpc"):
266 fieldNames.append("%cp")
267 groupByFields = ["%hostname", "event", "%cp"]
268 else:
269 raise ValueError("Unknown queryspec %s" % querySpec)
Scott Baker95b28d62014-04-18 10:45:26 -0700270
271 fields = ["table1.%s AS %s" % (x,x) for x in fieldNames]
272 fields = ", ".join(fields)
273
274 tableDesc = "%s.%s" % (self.projectName, self.tableName)
275
276 groupByOn = ["table1.time = latest.maxtime"]
277 for field in groupByFields:
278 groupByOn.append("table1.%s = latest.%s" % (field, field))
279
280 groupByOn = " AND ".join(groupByOn)
281 groupByFields = ", ".join(groupByFields)
282
283 base_query = "SELECT %s FROM [%s@-3600000--1] AS table1 JOIN (SELECT %s, max(time) as maxtime from [%s@-3600000--1] GROUP BY %s) AS latest ON %s" % \
284 (fields, tableDesc, groupByFields, tableDesc, groupByFields, groupByOn)
285
286 return base_query
287
Scott Baker75095b62014-04-21 17:32:09 -0700288 def get_cached_query_results(self, q, wait=True):
Scott Baker95b28d62014-04-18 10:45:26 -0700289 global glo_cached_queries
290
291 if q in glo_cached_queries:
292 if (time.time() - glo_cached_queries[q]["time"]) <= 60:
293 print "using cached query"
294 return glo_cached_queries[q]["rows"]
295
Scott Baker75095b62014-04-21 17:32:09 -0700296 if not wait:
297 return None
298
Scott Baker95b28d62014-04-18 10:45:26 -0700299 print "refreshing cached query"
300 result = self.run_query(q)
301 glo_cached_queries[q] = {"time": time.time(), "rows": result}
302
303 return result
Scott Bakerc527fda2014-03-20 17:14:52 -0700304
Scott Baker43adf1b2014-03-19 21:54:55 -0700305 def process_request(self, req):
306 print req.GET
307
Scott Baker050b1b82014-03-27 09:13:41 -0700308 tqx = req.GET.get("tqx", None)
Scott Baker43adf1b2014-03-19 21:54:55 -0700309
310 slice = req.GET.get("slice", None)
311 site = req.GET.get("site", None)
312 node = req.GET.get("node", None)
Scott Bakerc527fda2014-03-20 17:14:52 -0700313 service = req.GET.get("service", None)
Scott Baker584b37a2014-04-24 17:02:28 -0700314 event = req.GET.get("event", "libvirt_heartbeat")
Scott Baker0fd787d2014-05-13 17:03:47 -0700315 cp = req.GET.get("cp", None)
Scott Baker43adf1b2014-03-19 21:54:55 -0700316
317 format = req.GET.get("format", "json_dicts")
318
Scott Bakercdd2c822014-04-23 20:07:08 -0700319 timeBucket = int(req.GET.get("timeBucket", 60))
Scott Baker43adf1b2014-03-19 21:54:55 -0700320 avg = self.get_list_from_req(req, "avg")
321 sum = self.get_list_from_req(req, "sum")
322 count = self.get_list_from_req(req, "count")
323 computed = self.get_list_from_req(req, "computed")
Scott Baker050b1b82014-03-27 09:13:41 -0700324 groupBy = self.get_list_from_req(req, "groupBy", ["Time"])
325 orderBy = self.get_list_from_req(req, "orderBy", ["Time"])
Scott Baker43adf1b2014-03-19 21:54:55 -0700326
327 maxRows = req.GET.get("maxRows", None)
Scott Bakerc527fda2014-03-20 17:14:52 -0700328 mergeDataModelSites = req.GET.get("mergeDataModelSites", None)
Scott Baker43adf1b2014-03-19 21:54:55 -0700329
Scott Bakercdd2c822014-04-23 20:07:08 -0700330 maxAge = int(req.GET.get("maxAge", 60*60))
331
Scott Baker95b28d62014-04-18 10:45:26 -0700332 cached = req.GET.get("cached", None)
Scott Baker4e025af2014-04-28 23:31:29 -0700333 cachedGroupBy = self.get_list_from_req(req, "cachedGroupBy", ["doesnotexist"])
Scott Baker95b28d62014-04-18 10:45:26 -0700334
Scott Baker0fd787d2014-05-13 17:03:47 -0700335 filter={}
336 if slice:
337 filter["slice"] = slice
338 if site:
339 filter["site"] = site
340 if node:
341 filter["hostname"] = node
342 if event:
343 filter["event"] = event
344 if cp:
345 filter["cp"] = cp
346
347 q = self.compose_query(filter, timeBucket, avg, sum, count, computed, [], groupBy, orderBy, maxAge=maxAge)
Scott Baker43adf1b2014-03-19 21:54:55 -0700348
349 print q
Scott Baker95b28d62014-04-18 10:45:26 -0700350
Scott Baker050b1b82014-03-27 09:13:41 -0700351 dataSourceUrl = "http://" + req.META["SERVER_NAME"] + ":" + req.META["SERVER_PORT"] + req.META["PATH_INFO"] + "?" + req.META["QUERY_STRING"].replace("format=","origFormat=").replace("%","%25") + "&format=charts";
Scott Baker43adf1b2014-03-19 21:54:55 -0700352
Scott Baker050b1b82014-03-27 09:13:41 -0700353 if (format=="dataSourceUrl"):
354 result = {"dataSourceUrl": dataSourceUrl}
355 return ("application/javascript", result)
356
357 elif (format=="raw"):
Scott Baker43adf1b2014-03-19 21:54:55 -0700358 result = self.run_query_raw(q)
Scott Baker050b1b82014-03-27 09:13:41 -0700359 result["dataSourceUrl"] = dataSourceUrl
360
361 result = json.dumps(result);
362
363 return ("application/javascript", result)
364
Scott Baker95b28d62014-04-18 10:45:26 -0700365 elif (format=="nodata"):
366 result = {"dataSourceUrl": dataSourceUrl, "query": q}
367 result = json.dumps(result);
368 return {"application/javascript", result}
369
Scott Baker050b1b82014-03-27 09:13:41 -0700370 elif (format=="charts"):
371 bq_result = self.run_query_raw(q)
372
373 # cloudscrutiny code is probably better!
374 table = {}
375 table["cols"] = self.schema_to_cols(bq_result["schema"])
376 rows = []
Scott Bakerf96a3f02014-04-21 00:27:56 -0700377 if "rows" in bq_result:
378 for row in bq_result["rows"]:
379 rowcols = []
380 for (colnum,col) in enumerate(row["f"]):
381 if (colnum==0):
382 dt = datetime.datetime.fromtimestamp(float(col["v"]))
383 rowcols.append({"v": 'new Date("%s")' % dt.isoformat()})
384 else:
385 try:
386 rowcols.append({"v": float(col["v"])})
387 except:
388 rowcols.append({"v": col["v"]})
389 rows.append({"c": rowcols})
Scott Baker050b1b82014-03-27 09:13:41 -0700390 table["rows"] = rows
391
392 if tqx:
393 reqId = tqx.strip("reqId:")
394 else:
395 reqId = "0"
396
397 result = {"status": "okColumnChart", "reqId": reqId, "table": table, "version": "0.6"}
398
399 result = "google.visualization.Query.setResponse(" + json.dumps(result) + ");"
400
401 def unquote_it(x): return x.group()[1:-1].replace('\\"', '"')
402
403 p = re.compile(r'"new Date\(\\"[^"]*\\"\)"')
404 result=p.sub(unquote_it, result)
405
406 return ("application/javascript", result)
407
Scott Baker43adf1b2014-03-19 21:54:55 -0700408 else:
Scott Baker95b28d62014-04-18 10:45:26 -0700409 if cached:
Scott Baker0fd787d2014-05-13 17:03:47 -0700410 results = self.get_cached_query_results(self.compose_cached_query(cached))
Scott Bakerc527fda2014-03-20 17:14:52 -0700411
Scott Baker4e025af2014-04-28 23:31:29 -0700412 result = self.postprocess_results(results, filter=filter, sum=sum, count=count, avg=avg, computed=computed, maxDeltaTime=120, groupBy=cachedGroupBy)
Scott Baker95b28d62014-04-18 10:45:26 -0700413 else:
414 result = self.run_query(q)
Scott Bakerc527fda2014-03-20 17:14:52 -0700415
Scott Baker43adf1b2014-03-19 21:54:55 -0700416 if maxRows:
417 result = result[-int(maxRows):]
418
Scott Baker95b28d62014-04-18 10:45:26 -0700419 if mergeDataModelSites:
420 self.merge_datamodel_sites(result)
Scott Baker43adf1b2014-03-19 21:54:55 -0700421
Scott Baker95b28d62014-04-18 10:45:26 -0700422 return self.format_result(format, result, q, dataSourceUrl)
Scott Baker43adf1b2014-03-19 21:54:55 -0700423
424def DoPlanetStackAnalytics(request):
425 bq = PlanetStackAnalytics()
426 result = bq.process_request(request)
427
428 return result
429
430def main():
Scott Baker584b37a2014-04-24 17:02:28 -0700431 bq = PlanetStackAnalytics(tableName="demoevents")
Scott Baker43adf1b2014-03-19 21:54:55 -0700432
Scott Baker0fd787d2014-05-13 17:03:47 -0700433 q = bq.compose_cached_query()
Scott Baker95b28d62014-04-18 10:45:26 -0700434 results = bq.run_query(q)
435
Scott Baker75095b62014-04-21 17:32:09 -0700436 #results = bq.postprocess_results(results,
437 # filter={"slice": "HyperCache"},
438 # groupBy=["site"],
439 # computed=["bytes_sent/elapsed"],
440 # sum=["bytes_sent", "computed_bytes_sent_div_elapsed"], avg=["cpu"],
441 # maxDeltaTime=60)
442
Scott Baker0fd787d2014-05-13 17:03:47 -0700443 #results = bq.postprocess_results(results, filter={"slice": "HyperCache"}, maxi=["cpu"], count=["hostname"], computed=["bytes_sent/elapsed"], groupBy=["Time", "site"], maxDeltaTime=80)
444
445 results = bq.postprocess_results(results,filter={"event": "libvirt_heartbeat"}, avg=["cpu"], count=["hostname"], groupBy=["doesnotexist"])
Scott Baker95b28d62014-04-18 10:45:26 -0700446
447 bq.dump_table(results)
448
Scott Baker75095b62014-04-21 17:32:09 -0700449 sys.exit(0)
450
Scott Baker95b28d62014-04-18 10:45:26 -0700451 q=bq.compose_query(sum=["%bytes_sent"], avg=["%cpu"], latest=True, groupBy=["Time", "%site"])
452 print q
453 bq.dump_table(bq.run_query(q))
454
Scott Baker050b1b82014-03-27 09:13:41 -0700455 q=bq.compose_query(avg=["%cpu","%bandwidth"], count=["%hostname"], slice="HyperCache")
Scott Baker43adf1b2014-03-19 21:54:55 -0700456 print q
457 bq.dump_table(bq.run_query(q))
458
459 q=bq.compose_query(computed=["%bytes_sent/%elapsed"])
460 print
461 print q
462 bq.dump_table(bq.run_query(q))
Scott Baker43adf1b2014-03-19 21:54:55 -0700463
Scott Baker050b1b82014-03-27 09:13:41 -0700464 q=bq.compose_query(timeBucket=60*60, avg=["%cpu"], count=["%hostname"], computed=["%bytes_sent/%elapsed"])
Scott Baker43adf1b2014-03-19 21:54:55 -0700465 print
466 print q
467 bq.dump_table(bq.run_query(q))
Scott Bakerc527fda2014-03-20 17:14:52 -0700468
Scott Baker43adf1b2014-03-19 21:54:55 -0700469if __name__ == "__main__":
470 main()
471
472
473
474
475