blob: 3ba79a6b6bf6c918a75893cfff1bb919f3cfa3d1 [file] [log] [blame]
Scott Baker43adf1b2014-03-19 21:54:55 -07001from bigquery_analytics import BigQueryAnalytics
Scott Baker050b1b82014-03-27 09:13:41 -07002import datetime
3import re
Scott Bakerc527fda2014-03-20 17:14:52 -07004import os
5import sys
Scott Baker95b28d62014-04-18 10:45:26 -07006import time
Scott Baker43adf1b2014-03-19 21:54:55 -07007import json
Scott Baker78ab1012014-03-19 23:44:39 -07008import traceback
Scott Baker050b1b82014-03-27 09:13:41 -07009import urllib2
Scott Baker43adf1b2014-03-19 21:54:55 -070010
Scott Bakerc527fda2014-03-20 17:14:52 -070011if os.path.exists("/home/smbaker/projects/vicci/plstackapi/planetstack"):
12 sys.path.append("/home/smbaker/projects/vicci/plstackapi/planetstack")
13else:
14 sys.path.append("/opt/planetstack")
15
16os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
Scott Baker584b37a2014-04-24 17:02:28 -070017from django.conf import settings
Scott Bakerc527fda2014-03-20 17:14:52 -070018from django import db
19from django.db import connection
20from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice, Service
21
22BLUE_LOAD=5000000
23RED_LOAD=15000000
24
Scott Baker95b28d62014-04-18 10:45:26 -070025glo_cached_queries = {}
26
Scott Baker43adf1b2014-03-19 21:54:55 -070027class PlanetStackAnalytics(BigQueryAnalytics):
Scott Baker584b37a2014-04-24 17:02:28 -070028 def __init__(self, tableName=None):
29 if not tableName:
30 tableName = settings.BIGQUERY_TABLE
31
Scott Baker43adf1b2014-03-19 21:54:55 -070032 BigQueryAnalytics.__init__(self, tableName)
33
Scott Bakerc527fda2014-03-20 17:14:52 -070034 def service_to_sliceNames(self, serviceName):
35 service=Service.objects.get(name=serviceName)
36 try:
37 slices = service.slices.all()
38 except:
39 # BUG in data model -- Slice.service has related name 'service' and
40 # it should be 'slices'
41 slices = service.service.all()
42
43 return [slice.name for slice in slices]
44
Scott Baker0fd787d2014-05-13 17:03:47 -070045 def compose_query(self, filter={}, timeBucket="60", avg=[], sum=[], count=[], computed=[], val=[], groupBy=["Time"], orderBy=["Time"], tableName=None, latest=False, maxAge=60*60):
Scott Baker584b37a2014-04-24 17:02:28 -070046 if tableName is None:
47 tableName = self.tableName
48
Scott Bakercdd2c822014-04-23 20:07:08 -070049 maxAge = maxAge * 1000
50 tablePart = "[%s.%s@-%d--1]" % ("vicci", tableName, maxAge)
Scott Baker43adf1b2014-03-19 21:54:55 -070051
52 fields = []
53 fieldNames = []
Scott Baker95b28d62014-04-18 10:45:26 -070054 srcFieldNames = ["time"]
Scott Baker43adf1b2014-03-19 21:54:55 -070055
Scott Baker050b1b82014-03-27 09:13:41 -070056 fields.append("SEC_TO_TIMESTAMP(INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s) as Time" % (str(timeBucket),str(timeBucket)))
57 #fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s as Time" % (str(timeBucket),str(timeBucket)))
Scott Baker43adf1b2014-03-19 21:54:55 -070058
59 for fieldName in avg:
60 fields.append("AVG(%s) as avg_%s" % (fieldName, fieldName.replace("%","")))
61 fieldNames.append("avg_%s" % fieldName.replace("%",""))
Scott Baker95b28d62014-04-18 10:45:26 -070062 srcFieldNames.append(fieldName)
Scott Baker43adf1b2014-03-19 21:54:55 -070063
64 for fieldName in sum:
65 fields.append("SUM(%s) as sum_%s" % (fieldName, fieldName.replace("%","")))
66 fieldNames.append("sum_%s" % fieldName.replace("%",""))
Scott Baker95b28d62014-04-18 10:45:26 -070067 srcFieldNames.append(fieldName)
Scott Baker43adf1b2014-03-19 21:54:55 -070068
69 for fieldName in count:
70 fields.append("COUNT(distinct %s) as count_%s" % (fieldName, fieldName.replace("%","")))
71 fieldNames.append("count_%s" % fieldName.replace("%",""))
Scott Baker95b28d62014-04-18 10:45:26 -070072 srcFieldNames.append(fieldName)
73
74 for fieldName in val:
75 fields.append(fieldName)
76 fieldNames.append(fieldName)
77 srcFieldNames.append(fieldName)
Scott Baker43adf1b2014-03-19 21:54:55 -070078
79 for fieldName in computed:
80 operator = "/"
81 parts = fieldName.split("/")
82 computedFieldName = "computed_" + parts[0].replace("%","")+"_div_"+parts[1].replace("%","")
83 if len(parts)==1:
84 operator = "*"
85 parts = computed.split("*")
86 computedFieldName = "computed_" + parts[0].replace("%","")+"_mult_"+parts[1].replace("%","")
87 fields.append("SUM(%s)%sSUM(%s) as %s" % (parts[0], operator, parts[1], computedFieldName))
88 fieldNames.append(computedFieldName)
Scott Baker95b28d62014-04-18 10:45:26 -070089 srcFieldNames.append(parts[0])
90 srcFieldNames.append(parts[1])
Scott Baker43adf1b2014-03-19 21:54:55 -070091
Scott Bakerc527fda2014-03-20 17:14:52 -070092 for fieldName in groupBy:
Scott Baker050b1b82014-03-27 09:13:41 -070093 if (fieldName not in ["Time"]):
Scott Bakerc527fda2014-03-20 17:14:52 -070094 fields.append(fieldName)
95 fieldNames.append(fieldName)
Scott Baker95b28d62014-04-18 10:45:26 -070096 srcFieldNames.append(fieldName)
Scott Bakerc527fda2014-03-20 17:14:52 -070097
Scott Baker43adf1b2014-03-19 21:54:55 -070098 fields = ", ".join(fields)
99
100 where = []
101
Scott Baker0fd787d2014-05-13 17:03:47 -0700102 if filter.get("slice",None):
103 where.append("%%slice='%s'" % filter["slice"])
104 if filter.get("site",None):
105 where.append("%%site='%s'" % filter["site"])
106 if filter.get("node",None):
107 where.append("%%hostname='%s'" % filter["node"])
108 if filter.get("event",None):
109 where.append("event='%s'" % filter["event"])
110 if filter.get("service",None):
111 sliceNames = self.service_to_sliceNames(filter["service"])
Scott Bakerc527fda2014-03-20 17:14:52 -0700112 if sliceNames:
113 where.append("(" + " OR ".join(["%%slice='%s'" % sliceName for sliceName in sliceNames]) +")")
Scott Baker43adf1b2014-03-19 21:54:55 -0700114
115 if where:
116 where = " WHERE " + " AND ".join(where)
117 else:
118 where =""
119
120 if groupBy:
Scott Bakerc527fda2014-03-20 17:14:52 -0700121 groupBySub = " GROUP BY " + ",".join(groupBy + ["%hostname"])
Scott Baker43adf1b2014-03-19 21:54:55 -0700122 groupBy = " GROUP BY " + ",".join(groupBy)
123 else:
Scott Bakerc527fda2014-03-20 17:14:52 -0700124 groupBySub = " GROUP BY %hostname"
Scott Baker43adf1b2014-03-19 21:54:55 -0700125 groupBy = ""
126
127 if orderBy:
128 orderBy = " ORDER BY " + ",".join(orderBy)
129 else:
130 orderBy = ""
131
Scott Baker95b28d62014-04-18 10:45:26 -0700132 if latest:
133 latestFields = ["table1.%s as %s" % (x,x) for x in srcFieldNames]
134 latestFields = ", ".join(latestFields)
135 tablePart = """(SELECT %s FROM %s AS table1
136 JOIN
137 (SELECT %%hostname, event, max(time) as maxtime from %s GROUP BY %%hostname, event) AS latest
138 ON
139 table1.%%hostname = latest.%%hostname AND table1.event = latest.event AND table1.time = latest.maxtime)""" % (latestFields, tablePart, tablePart)
140
Scott Baker43adf1b2014-03-19 21:54:55 -0700141 if computed:
Scott Baker95b28d62014-04-18 10:45:26 -0700142 subQuery = "SELECT %%hostname, %s FROM %s" % (fields, tablePart)
Scott Baker43adf1b2014-03-19 21:54:55 -0700143 if where:
144 subQuery = subQuery + where
Scott Bakerc527fda2014-03-20 17:14:52 -0700145 subQuery = subQuery + groupBySub
Scott Baker43adf1b2014-03-19 21:54:55 -0700146
147 sumFields = []
148 for fieldName in fieldNames:
149 if fieldName.startswith("avg"):
150 sumFields.append("AVG(%s) as avg_%s"%(fieldName,fieldName))
Scott Bakerc527fda2014-03-20 17:14:52 -0700151 sumFields.append("MAX(%s) as max_%s"%(fieldName,fieldName))
152 elif (fieldName.startswith("count")) or (fieldName.startswith("sum")) or (fieldName.startswith("computed")):
Scott Baker43adf1b2014-03-19 21:54:55 -0700153 sumFields.append("SUM(%s) as sum_%s"%(fieldName,fieldName))
Scott Bakerc527fda2014-03-20 17:14:52 -0700154 else:
155 sumFields.append(fieldName)
Scott Baker43adf1b2014-03-19 21:54:55 -0700156
157 sumFields = ",".join(sumFields)
158
Scott Baker050b1b82014-03-27 09:13:41 -0700159 query = "SELECT %s, %s FROM (%s)" % ("Time", sumFields, subQuery)
Scott Baker43adf1b2014-03-19 21:54:55 -0700160 if groupBy:
161 query = query + groupBy
162 if orderBy:
163 query = query + orderBy
164 else:
Scott Baker95b28d62014-04-18 10:45:26 -0700165 query = "SELECT %s FROM %s" % (fields, tablePart)
Scott Baker43adf1b2014-03-19 21:54:55 -0700166 if where:
167 query = query + " " + where
168 if groupBy:
169 query = query + groupBy
170 if orderBy:
171 query = query + orderBy
172
173 return query
174
175 def get_list_from_req(self, req, name, default=[]):
176 value = req.GET.get(name, None)
177 if not value:
178 return default
Scott Baker050b1b82014-03-27 09:13:41 -0700179 value=value.replace("@","%")
Scott Baker43adf1b2014-03-19 21:54:55 -0700180 return value.split(",")
181
Scott Baker050b1b82014-03-27 09:13:41 -0700182 def format_result(self, format, result, query, dataSourceUrl):
Scott Baker43adf1b2014-03-19 21:54:55 -0700183 if (format == "json_dicts"):
Scott Baker050b1b82014-03-27 09:13:41 -0700184 result = {"query": query, "rows": result, "dataSourceUrl": dataSourceUrl}
Scott Baker43adf1b2014-03-19 21:54:55 -0700185 return ("application/javascript", json.dumps(result))
186
187 elif (format == "json_arrays"):
188 new_result = []
189 for row in result:
190 new_row = []
191 for key in sorted(row.keys()):
192 new_row.append(row[key])
193 new_result.append(new_row)
194 new_result = {"query": query, "rows": new_result}
195 return ("application/javascript", json.dumps(new_result))
196
197 elif (format == "html_table"):
198 new_rows = []
199 for row in result:
200 new_row = []
201 for key in sorted(row.keys()):
202 new_row.append("<TD>%s</TD>" % str(row[key]))
203 new_rows.append("<TR>%s</TR>" % "".join(new_row))
204
205 new_result = "<TABLE>%s</TABLE>" % "\n".join(new_rows)
206
207 return ("text/html", new_result)
208
Scott Bakera5885442014-04-21 01:28:48 -0700209 def merge_datamodel_sites(self, rows, slice=None):
Scott Bakerc527fda2014-03-20 17:14:52 -0700210 """ For a query that included "site" in its groupby, merge in the
211 opencloud site information.
212 """
Scott Bakera5885442014-04-21 01:28:48 -0700213
214 if slice:
215 try:
216 slice = Slice.objects.get(name=slice)
217 except:
218 slice = None
219
Scott Bakerc527fda2014-03-20 17:14:52 -0700220 for row in rows:
221 sitename = row["site"]
222 try:
223 model_site = Site.objects.get(name=sitename)
224 except:
225 # we didn't find it in the data model
226 continue
227
Scott Bakera5885442014-04-21 01:28:48 -0700228 allocated_slivers = 0
229 if model_site and slice:
230 for sliver in slice.slivers.all():
231 if sliver.node.site == model_site:
232 allocated_slivers = allocated_slivers + 1
233
Scott Bakerc527fda2014-03-20 17:14:52 -0700234 row["lat"] = float(model_site.location.latitude)
235 row["long"] = float(model_site.location.longitude)
236 row["url"] = model_site.site_url
237 row["numNodes"] = model_site.nodes.count()
Scott Bakera5885442014-04-21 01:28:48 -0700238 row["allocated_slivers"] = allocated_slivers
Scott Bakerc527fda2014-03-20 17:14:52 -0700239
Scott Baker95b28d62014-04-18 10:45:26 -0700240 max_cpu = row.get("max_avg_cpu", row.get("max_cpu",0))
241 cpu=float(max_cpu)/100.0
242 row["hotness"] = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
243
Scott Baker0fd787d2014-05-13 17:03:47 -0700244 def compose_cached_query(self, querySpec='default'):
Scott Baker95b28d62014-04-18 10:45:26 -0700245 """ Compose a query that returns the 'most recent' row for each (hostname, event)
246 pair.
Scott Baker0fd787d2014-05-13 17:03:47 -0700247
248 Note that groupByFields cannot contain any values that are 'Null' or those
249 rows will be excluded. For example, if groupByFields includes cp, then
250 there will be no libvirt_event rows, since libvirt_event does not have
251 cp.
252
253 This means we can't really have 'one query to rule them'. Settle on
254 having a couple of different queries, and have the caller specify
255 which one he wants.
Scott Baker95b28d62014-04-18 10:45:26 -0700256 """
257
Scott Baker0fd787d2014-05-13 17:03:47 -0700258 fieldNames = ["%hostname", "%bytes_sent", "%bytes_hit", "%healthy", "time", "event", "%site", "%elapsed", "%cpu"]
259
260 if querySpec=="default":
261 groupByFields = ["%hostname", "event"]
262 elif (querySpec=="hpc"):
263 fieldNames.append("%cp")
264 groupByFields = ["%hostname", "event", "%cp"]
265 else:
266 raise ValueError("Unknown queryspec %s" % querySpec)
Scott Baker95b28d62014-04-18 10:45:26 -0700267
268 fields = ["table1.%s AS %s" % (x,x) for x in fieldNames]
269 fields = ", ".join(fields)
270
271 tableDesc = "%s.%s" % (self.projectName, self.tableName)
272
273 groupByOn = ["table1.time = latest.maxtime"]
274 for field in groupByFields:
275 groupByOn.append("table1.%s = latest.%s" % (field, field))
276
277 groupByOn = " AND ".join(groupByOn)
278 groupByFields = ", ".join(groupByFields)
279
280 base_query = "SELECT %s FROM [%s@-3600000--1] AS table1 JOIN (SELECT %s, max(time) as maxtime from [%s@-3600000--1] GROUP BY %s) AS latest ON %s" % \
281 (fields, tableDesc, groupByFields, tableDesc, groupByFields, groupByOn)
282
283 return base_query
284
Scott Baker75095b62014-04-21 17:32:09 -0700285 def get_cached_query_results(self, q, wait=True):
Scott Baker95b28d62014-04-18 10:45:26 -0700286 global glo_cached_queries
287
288 if q in glo_cached_queries:
289 if (time.time() - glo_cached_queries[q]["time"]) <= 60:
290 print "using cached query"
291 return glo_cached_queries[q]["rows"]
292
Scott Baker75095b62014-04-21 17:32:09 -0700293 if not wait:
294 return None
295
Scott Baker95b28d62014-04-18 10:45:26 -0700296 print "refreshing cached query"
297 result = self.run_query(q)
298 glo_cached_queries[q] = {"time": time.time(), "rows": result}
299
300 return result
Scott Bakerc527fda2014-03-20 17:14:52 -0700301
Scott Baker43adf1b2014-03-19 21:54:55 -0700302 def process_request(self, req):
303 print req.GET
304
Scott Baker050b1b82014-03-27 09:13:41 -0700305 tqx = req.GET.get("tqx", None)
Scott Baker43adf1b2014-03-19 21:54:55 -0700306
307 slice = req.GET.get("slice", None)
308 site = req.GET.get("site", None)
309 node = req.GET.get("node", None)
Scott Bakerc527fda2014-03-20 17:14:52 -0700310 service = req.GET.get("service", None)
Scott Baker584b37a2014-04-24 17:02:28 -0700311 event = req.GET.get("event", "libvirt_heartbeat")
Scott Baker0fd787d2014-05-13 17:03:47 -0700312 cp = req.GET.get("cp", None)
Scott Baker43adf1b2014-03-19 21:54:55 -0700313
314 format = req.GET.get("format", "json_dicts")
315
Scott Bakercdd2c822014-04-23 20:07:08 -0700316 timeBucket = int(req.GET.get("timeBucket", 60))
Scott Baker43adf1b2014-03-19 21:54:55 -0700317 avg = self.get_list_from_req(req, "avg")
318 sum = self.get_list_from_req(req, "sum")
319 count = self.get_list_from_req(req, "count")
320 computed = self.get_list_from_req(req, "computed")
Scott Baker050b1b82014-03-27 09:13:41 -0700321 groupBy = self.get_list_from_req(req, "groupBy", ["Time"])
322 orderBy = self.get_list_from_req(req, "orderBy", ["Time"])
Scott Baker43adf1b2014-03-19 21:54:55 -0700323
324 maxRows = req.GET.get("maxRows", None)
Scott Bakerc527fda2014-03-20 17:14:52 -0700325 mergeDataModelSites = req.GET.get("mergeDataModelSites", None)
Scott Baker43adf1b2014-03-19 21:54:55 -0700326
Scott Bakercdd2c822014-04-23 20:07:08 -0700327 maxAge = int(req.GET.get("maxAge", 60*60))
328
Scott Baker95b28d62014-04-18 10:45:26 -0700329 cached = req.GET.get("cached", None)
Scott Baker4e025af2014-04-28 23:31:29 -0700330 cachedGroupBy = self.get_list_from_req(req, "cachedGroupBy", ["doesnotexist"])
Scott Baker95b28d62014-04-18 10:45:26 -0700331
Scott Baker0fd787d2014-05-13 17:03:47 -0700332 filter={}
333 if slice:
334 filter["slice"] = slice
335 if site:
336 filter["site"] = site
337 if node:
338 filter["hostname"] = node
339 if event:
340 filter["event"] = event
341 if cp:
342 filter["cp"] = cp
343
344 q = self.compose_query(filter, timeBucket, avg, sum, count, computed, [], groupBy, orderBy, maxAge=maxAge)
Scott Baker43adf1b2014-03-19 21:54:55 -0700345
346 print q
Scott Baker95b28d62014-04-18 10:45:26 -0700347
Scott Baker050b1b82014-03-27 09:13:41 -0700348 dataSourceUrl = "http://" + req.META["SERVER_NAME"] + ":" + req.META["SERVER_PORT"] + req.META["PATH_INFO"] + "?" + req.META["QUERY_STRING"].replace("format=","origFormat=").replace("%","%25") + "&format=charts";
Scott Baker43adf1b2014-03-19 21:54:55 -0700349
Scott Baker050b1b82014-03-27 09:13:41 -0700350 if (format=="dataSourceUrl"):
351 result = {"dataSourceUrl": dataSourceUrl}
352 return ("application/javascript", result)
353
354 elif (format=="raw"):
Scott Baker43adf1b2014-03-19 21:54:55 -0700355 result = self.run_query_raw(q)
Scott Baker050b1b82014-03-27 09:13:41 -0700356 result["dataSourceUrl"] = dataSourceUrl
357
358 result = json.dumps(result);
359
360 return ("application/javascript", result)
361
Scott Baker95b28d62014-04-18 10:45:26 -0700362 elif (format=="nodata"):
363 result = {"dataSourceUrl": dataSourceUrl, "query": q}
364 result = json.dumps(result);
365 return {"application/javascript", result}
366
Scott Baker050b1b82014-03-27 09:13:41 -0700367 elif (format=="charts"):
368 bq_result = self.run_query_raw(q)
369
370 # cloudscrutiny code is probably better!
371 table = {}
372 table["cols"] = self.schema_to_cols(bq_result["schema"])
373 rows = []
Scott Bakerf96a3f02014-04-21 00:27:56 -0700374 if "rows" in bq_result:
375 for row in bq_result["rows"]:
376 rowcols = []
377 for (colnum,col) in enumerate(row["f"]):
378 if (colnum==0):
379 dt = datetime.datetime.fromtimestamp(float(col["v"]))
380 rowcols.append({"v": 'new Date("%s")' % dt.isoformat()})
381 else:
382 try:
383 rowcols.append({"v": float(col["v"])})
384 except:
385 rowcols.append({"v": col["v"]})
386 rows.append({"c": rowcols})
Scott Baker050b1b82014-03-27 09:13:41 -0700387 table["rows"] = rows
388
389 if tqx:
390 reqId = tqx.strip("reqId:")
391 else:
392 reqId = "0"
393
394 result = {"status": "okColumnChart", "reqId": reqId, "table": table, "version": "0.6"}
395
396 result = "google.visualization.Query.setResponse(" + json.dumps(result) + ");"
397
398 def unquote_it(x): return x.group()[1:-1].replace('\\"', '"')
399
400 p = re.compile(r'"new Date\(\\"[^"]*\\"\)"')
401 result=p.sub(unquote_it, result)
402
403 return ("application/javascript", result)
404
Scott Baker43adf1b2014-03-19 21:54:55 -0700405 else:
Scott Baker95b28d62014-04-18 10:45:26 -0700406 if cached:
Scott Baker0fd787d2014-05-13 17:03:47 -0700407 results = self.get_cached_query_results(self.compose_cached_query(cached))
Scott Bakerc527fda2014-03-20 17:14:52 -0700408
Scott Baker4e025af2014-04-28 23:31:29 -0700409 result = self.postprocess_results(results, filter=filter, sum=sum, count=count, avg=avg, computed=computed, maxDeltaTime=120, groupBy=cachedGroupBy)
Scott Baker95b28d62014-04-18 10:45:26 -0700410 else:
411 result = self.run_query(q)
Scott Bakerc527fda2014-03-20 17:14:52 -0700412
Scott Baker43adf1b2014-03-19 21:54:55 -0700413 if maxRows:
414 result = result[-int(maxRows):]
415
Scott Baker95b28d62014-04-18 10:45:26 -0700416 if mergeDataModelSites:
417 self.merge_datamodel_sites(result)
Scott Baker43adf1b2014-03-19 21:54:55 -0700418
Scott Baker95b28d62014-04-18 10:45:26 -0700419 return self.format_result(format, result, q, dataSourceUrl)
Scott Baker43adf1b2014-03-19 21:54:55 -0700420
421def DoPlanetStackAnalytics(request):
422 bq = PlanetStackAnalytics()
423 result = bq.process_request(request)
424
425 return result
426
427def main():
Scott Baker584b37a2014-04-24 17:02:28 -0700428 bq = PlanetStackAnalytics(tableName="demoevents")
Scott Baker43adf1b2014-03-19 21:54:55 -0700429
Scott Baker0fd787d2014-05-13 17:03:47 -0700430 q = bq.compose_cached_query()
Scott Baker95b28d62014-04-18 10:45:26 -0700431 results = bq.run_query(q)
432
Scott Baker75095b62014-04-21 17:32:09 -0700433 #results = bq.postprocess_results(results,
434 # filter={"slice": "HyperCache"},
435 # groupBy=["site"],
436 # computed=["bytes_sent/elapsed"],
437 # sum=["bytes_sent", "computed_bytes_sent_div_elapsed"], avg=["cpu"],
438 # maxDeltaTime=60)
439
Scott Baker0fd787d2014-05-13 17:03:47 -0700440 #results = bq.postprocess_results(results, filter={"slice": "HyperCache"}, maxi=["cpu"], count=["hostname"], computed=["bytes_sent/elapsed"], groupBy=["Time", "site"], maxDeltaTime=80)
441
442 results = bq.postprocess_results(results,filter={"event": "libvirt_heartbeat"}, avg=["cpu"], count=["hostname"], groupBy=["doesnotexist"])
Scott Baker95b28d62014-04-18 10:45:26 -0700443
444 bq.dump_table(results)
445
Scott Baker75095b62014-04-21 17:32:09 -0700446 sys.exit(0)
447
Scott Baker95b28d62014-04-18 10:45:26 -0700448 q=bq.compose_query(sum=["%bytes_sent"], avg=["%cpu"], latest=True, groupBy=["Time", "%site"])
449 print q
450 bq.dump_table(bq.run_query(q))
451
Scott Baker050b1b82014-03-27 09:13:41 -0700452 q=bq.compose_query(avg=["%cpu","%bandwidth"], count=["%hostname"], slice="HyperCache")
Scott Baker43adf1b2014-03-19 21:54:55 -0700453 print q
454 bq.dump_table(bq.run_query(q))
455
456 q=bq.compose_query(computed=["%bytes_sent/%elapsed"])
457 print
458 print q
459 bq.dump_table(bq.run_query(q))
Scott Baker43adf1b2014-03-19 21:54:55 -0700460
Scott Baker050b1b82014-03-27 09:13:41 -0700461 q=bq.compose_query(timeBucket=60*60, avg=["%cpu"], count=["%hostname"], computed=["%bytes_sent/%elapsed"])
Scott Baker43adf1b2014-03-19 21:54:55 -0700462 print
463 print q
464 bq.dump_table(bq.run_query(q))
Scott Bakerc527fda2014-03-20 17:14:52 -0700465
Scott Baker43adf1b2014-03-19 21:54:55 -0700466if __name__ == "__main__":
467 main()
468
469
470
471
472