Scott Baker | f2c26de | 2014-06-10 20:37:40 -0700 | [diff] [blame] | 1 | from bigquery_analytics import BigQueryAnalytics, BIGQUERY_AVAILABLE |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 2 | import datetime |
| 3 | import re |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 4 | import os |
| 5 | import sys |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 6 | import time |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 7 | import json |
Scott Baker | 78ab101 | 2014-03-19 23:44:39 -0700 | [diff] [blame] | 8 | import traceback |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 9 | import urllib2 |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 10 | |
Scott Baker | 1c73219 | 2015-02-04 15:21:17 -0800 | [diff] [blame] | 11 | # XXX hardcoded path |
| 12 | sys.path.append("/opt/planetstack") |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 13 | |
| 14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings") |
Scott Baker | 584b37a | 2014-04-24 17:02:28 -0700 | [diff] [blame] | 15 | from django.conf import settings |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 16 | from django import db |
| 17 | from django.db import connection |
| 18 | from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice, Service |
| 19 | |
| 20 | BLUE_LOAD=5000000 |
| 21 | RED_LOAD=15000000 |
| 22 | |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 23 | glo_cached_queries = {} |
| 24 | |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 25 | class PlanetStackAnalytics(BigQueryAnalytics): |
Scott Baker | 584b37a | 2014-04-24 17:02:28 -0700 | [diff] [blame] | 26 | def __init__(self, tableName=None): |
| 27 | if not tableName: |
| 28 | tableName = settings.BIGQUERY_TABLE |
| 29 | |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 30 | BigQueryAnalytics.__init__(self, tableName) |
| 31 | |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 32 | def service_to_sliceNames(self, serviceName): |
| 33 | service=Service.objects.get(name=serviceName) |
| 34 | try: |
| 35 | slices = service.slices.all() |
| 36 | except: |
| 37 | # BUG in data model -- Slice.service has related name 'service' and |
| 38 | # it should be 'slices' |
| 39 | slices = service.service.all() |
| 40 | |
| 41 | return [slice.name for slice in slices] |
| 42 | |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 43 | def compose_query(self, filter={}, timeBucket="60", avg=[], sum=[], count=[], computed=[], val=[], groupBy=["Time"], orderBy=["Time"], tableName=None, latest=False, maxAge=60*60): |
Scott Baker | 584b37a | 2014-04-24 17:02:28 -0700 | [diff] [blame] | 44 | if tableName is None: |
| 45 | tableName = self.tableName |
| 46 | |
Scott Baker | cdd2c82 | 2014-04-23 20:07:08 -0700 | [diff] [blame] | 47 | maxAge = maxAge * 1000 |
| 48 | tablePart = "[%s.%s@-%d--1]" % ("vicci", tableName, maxAge) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 49 | |
| 50 | fields = [] |
| 51 | fieldNames = [] |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 52 | srcFieldNames = ["time"] |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 53 | |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 54 | fields.append("SEC_TO_TIMESTAMP(INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s) as Time" % (str(timeBucket),str(timeBucket))) |
| 55 | #fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s as Time" % (str(timeBucket),str(timeBucket))) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 56 | |
| 57 | for fieldName in avg: |
| 58 | fields.append("AVG(%s) as avg_%s" % (fieldName, fieldName.replace("%",""))) |
| 59 | fieldNames.append("avg_%s" % fieldName.replace("%","")) |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 60 | srcFieldNames.append(fieldName) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 61 | |
| 62 | for fieldName in sum: |
| 63 | fields.append("SUM(%s) as sum_%s" % (fieldName, fieldName.replace("%",""))) |
| 64 | fieldNames.append("sum_%s" % fieldName.replace("%","")) |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 65 | srcFieldNames.append(fieldName) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 66 | |
| 67 | for fieldName in count: |
| 68 | fields.append("COUNT(distinct %s) as count_%s" % (fieldName, fieldName.replace("%",""))) |
| 69 | fieldNames.append("count_%s" % fieldName.replace("%","")) |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 70 | srcFieldNames.append(fieldName) |
| 71 | |
| 72 | for fieldName in val: |
| 73 | fields.append(fieldName) |
| 74 | fieldNames.append(fieldName) |
| 75 | srcFieldNames.append(fieldName) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 76 | |
| 77 | for fieldName in computed: |
| 78 | operator = "/" |
| 79 | parts = fieldName.split("/") |
| 80 | computedFieldName = "computed_" + parts[0].replace("%","")+"_div_"+parts[1].replace("%","") |
| 81 | if len(parts)==1: |
| 82 | operator = "*" |
| 83 | parts = computed.split("*") |
| 84 | computedFieldName = "computed_" + parts[0].replace("%","")+"_mult_"+parts[1].replace("%","") |
| 85 | fields.append("SUM(%s)%sSUM(%s) as %s" % (parts[0], operator, parts[1], computedFieldName)) |
| 86 | fieldNames.append(computedFieldName) |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 87 | srcFieldNames.append(parts[0]) |
| 88 | srcFieldNames.append(parts[1]) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 89 | |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 90 | for fieldName in groupBy: |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 91 | if (fieldName not in ["Time"]): |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 92 | fields.append(fieldName) |
| 93 | fieldNames.append(fieldName) |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 94 | srcFieldNames.append(fieldName) |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 95 | |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 96 | fields = ", ".join(fields) |
| 97 | |
| 98 | where = [] |
| 99 | |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 100 | if filter.get("slice",None): |
| 101 | where.append("%%slice='%s'" % filter["slice"]) |
| 102 | if filter.get("site",None): |
| 103 | where.append("%%site='%s'" % filter["site"]) |
| 104 | if filter.get("node",None): |
| 105 | where.append("%%hostname='%s'" % filter["node"]) |
| 106 | if filter.get("event",None): |
| 107 | where.append("event='%s'" % filter["event"]) |
| 108 | if filter.get("service",None): |
| 109 | sliceNames = self.service_to_sliceNames(filter["service"]) |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 110 | if sliceNames: |
| 111 | where.append("(" + " OR ".join(["%%slice='%s'" % sliceName for sliceName in sliceNames]) +")") |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 112 | |
| 113 | if where: |
| 114 | where = " WHERE " + " AND ".join(where) |
| 115 | else: |
| 116 | where ="" |
| 117 | |
| 118 | if groupBy: |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 119 | groupBySub = " GROUP BY " + ",".join(groupBy + ["%hostname"]) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 120 | groupBy = " GROUP BY " + ",".join(groupBy) |
| 121 | else: |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 122 | groupBySub = " GROUP BY %hostname" |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 123 | groupBy = "" |
| 124 | |
| 125 | if orderBy: |
| 126 | orderBy = " ORDER BY " + ",".join(orderBy) |
| 127 | else: |
| 128 | orderBy = "" |
| 129 | |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 130 | if latest: |
| 131 | latestFields = ["table1.%s as %s" % (x,x) for x in srcFieldNames] |
| 132 | latestFields = ", ".join(latestFields) |
| 133 | tablePart = """(SELECT %s FROM %s AS table1 |
| 134 | JOIN |
| 135 | (SELECT %%hostname, event, max(time) as maxtime from %s GROUP BY %%hostname, event) AS latest |
| 136 | ON |
| 137 | table1.%%hostname = latest.%%hostname AND table1.event = latest.event AND table1.time = latest.maxtime)""" % (latestFields, tablePart, tablePart) |
| 138 | |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 139 | if computed: |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 140 | subQuery = "SELECT %%hostname, %s FROM %s" % (fields, tablePart) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 141 | if where: |
| 142 | subQuery = subQuery + where |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 143 | subQuery = subQuery + groupBySub |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 144 | |
| 145 | sumFields = [] |
| 146 | for fieldName in fieldNames: |
| 147 | if fieldName.startswith("avg"): |
| 148 | sumFields.append("AVG(%s) as avg_%s"%(fieldName,fieldName)) |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 149 | sumFields.append("MAX(%s) as max_%s"%(fieldName,fieldName)) |
| 150 | elif (fieldName.startswith("count")) or (fieldName.startswith("sum")) or (fieldName.startswith("computed")): |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 151 | sumFields.append("SUM(%s) as sum_%s"%(fieldName,fieldName)) |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 152 | else: |
| 153 | sumFields.append(fieldName) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 154 | |
| 155 | sumFields = ",".join(sumFields) |
| 156 | |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 157 | query = "SELECT %s, %s FROM (%s)" % ("Time", sumFields, subQuery) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 158 | if groupBy: |
| 159 | query = query + groupBy |
| 160 | if orderBy: |
| 161 | query = query + orderBy |
| 162 | else: |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 163 | query = "SELECT %s FROM %s" % (fields, tablePart) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 164 | if where: |
| 165 | query = query + " " + where |
| 166 | if groupBy: |
| 167 | query = query + groupBy |
| 168 | if orderBy: |
| 169 | query = query + orderBy |
| 170 | |
| 171 | return query |
| 172 | |
| 173 | def get_list_from_req(self, req, name, default=[]): |
| 174 | value = req.GET.get(name, None) |
| 175 | if not value: |
| 176 | return default |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 177 | value=value.replace("@","%") |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 178 | return value.split(",") |
| 179 | |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 180 | def format_result(self, format, result, query, dataSourceUrl): |
Scott Baker | f2c26de | 2014-06-10 20:37:40 -0700 | [diff] [blame] | 181 | if not BIGQUERY_AVAILABLE: |
| 182 | msg = "BigQuery Statistics Unavaiable" |
| 183 | else: |
| 184 | msg = None |
| 185 | |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 186 | if (format == "json_dicts"): |
Scott Baker | f2c26de | 2014-06-10 20:37:40 -0700 | [diff] [blame] | 187 | result = {"query": query, "rows": result, "dataSourceUrl": dataSourceUrl, "msg": msg} |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 188 | return ("application/javascript", json.dumps(result)) |
| 189 | |
| 190 | elif (format == "json_arrays"): |
| 191 | new_result = [] |
| 192 | for row in result: |
| 193 | new_row = [] |
| 194 | for key in sorted(row.keys()): |
| 195 | new_row.append(row[key]) |
| 196 | new_result.append(new_row) |
Scott Baker | f2c26de | 2014-06-10 20:37:40 -0700 | [diff] [blame] | 197 | new_result = {"query": query, "rows": new_result, "msg": msg} |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 198 | return ("application/javascript", json.dumps(new_result)) |
| 199 | |
| 200 | elif (format == "html_table"): |
| 201 | new_rows = [] |
| 202 | for row in result: |
| 203 | new_row = [] |
| 204 | for key in sorted(row.keys()): |
| 205 | new_row.append("<TD>%s</TD>" % str(row[key])) |
| 206 | new_rows.append("<TR>%s</TR>" % "".join(new_row)) |
| 207 | |
| 208 | new_result = "<TABLE>%s</TABLE>" % "\n".join(new_rows) |
| 209 | |
| 210 | return ("text/html", new_result) |
| 211 | |
Scott Baker | a588544 | 2014-04-21 01:28:48 -0700 | [diff] [blame] | 212 | def merge_datamodel_sites(self, rows, slice=None): |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 213 | """ For a query that included "site" in its groupby, merge in the |
| 214 | opencloud site information. |
| 215 | """ |
Scott Baker | a588544 | 2014-04-21 01:28:48 -0700 | [diff] [blame] | 216 | |
| 217 | if slice: |
| 218 | try: |
| 219 | slice = Slice.objects.get(name=slice) |
| 220 | except: |
| 221 | slice = None |
| 222 | |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 223 | for row in rows: |
| 224 | sitename = row["site"] |
| 225 | try: |
| 226 | model_site = Site.objects.get(name=sitename) |
| 227 | except: |
| 228 | # we didn't find it in the data model |
| 229 | continue |
| 230 | |
Scott Baker | a588544 | 2014-04-21 01:28:48 -0700 | [diff] [blame] | 231 | allocated_slivers = 0 |
| 232 | if model_site and slice: |
| 233 | for sliver in slice.slivers.all(): |
| 234 | if sliver.node.site == model_site: |
| 235 | allocated_slivers = allocated_slivers + 1 |
| 236 | |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 237 | row["lat"] = float(model_site.location.latitude) |
| 238 | row["long"] = float(model_site.location.longitude) |
| 239 | row["url"] = model_site.site_url |
| 240 | row["numNodes"] = model_site.nodes.count() |
Scott Baker | a588544 | 2014-04-21 01:28:48 -0700 | [diff] [blame] | 241 | row["allocated_slivers"] = allocated_slivers |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 242 | |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 243 | max_cpu = row.get("max_avg_cpu", row.get("max_cpu",0)) |
| 244 | cpu=float(max_cpu)/100.0 |
| 245 | row["hotness"] = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD)) |
| 246 | |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 247 | def compose_cached_query(self, querySpec='default'): |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 248 | """ Compose a query that returns the 'most recent' row for each (hostname, event) |
| 249 | pair. |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 250 | |
| 251 | Note that groupByFields cannot contain any values that are 'Null' or those |
| 252 | rows will be excluded. For example, if groupByFields includes cp, then |
| 253 | there will be no libvirt_event rows, since libvirt_event does not have |
| 254 | cp. |
| 255 | |
| 256 | This means we can't really have 'one query to rule them'. Settle on |
| 257 | having a couple of different queries, and have the caller specify |
| 258 | which one he wants. |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 259 | """ |
| 260 | |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 261 | fieldNames = ["%hostname", "%bytes_sent", "%bytes_hit", "%healthy", "time", "event", "%site", "%elapsed", "%cpu"] |
| 262 | |
| 263 | if querySpec=="default": |
| 264 | groupByFields = ["%hostname", "event"] |
| 265 | elif (querySpec=="hpc"): |
| 266 | fieldNames.append("%cp") |
| 267 | groupByFields = ["%hostname", "event", "%cp"] |
| 268 | else: |
| 269 | raise ValueError("Unknown queryspec %s" % querySpec) |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 270 | |
| 271 | fields = ["table1.%s AS %s" % (x,x) for x in fieldNames] |
| 272 | fields = ", ".join(fields) |
| 273 | |
| 274 | tableDesc = "%s.%s" % (self.projectName, self.tableName) |
| 275 | |
| 276 | groupByOn = ["table1.time = latest.maxtime"] |
| 277 | for field in groupByFields: |
| 278 | groupByOn.append("table1.%s = latest.%s" % (field, field)) |
| 279 | |
| 280 | groupByOn = " AND ".join(groupByOn) |
| 281 | groupByFields = ", ".join(groupByFields) |
| 282 | |
| 283 | base_query = "SELECT %s FROM [%s@-3600000--1] AS table1 JOIN (SELECT %s, max(time) as maxtime from [%s@-3600000--1] GROUP BY %s) AS latest ON %s" % \ |
| 284 | (fields, tableDesc, groupByFields, tableDesc, groupByFields, groupByOn) |
| 285 | |
| 286 | return base_query |
| 287 | |
Scott Baker | 75095b6 | 2014-04-21 17:32:09 -0700 | [diff] [blame] | 288 | def get_cached_query_results(self, q, wait=True): |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 289 | global glo_cached_queries |
| 290 | |
| 291 | if q in glo_cached_queries: |
| 292 | if (time.time() - glo_cached_queries[q]["time"]) <= 60: |
| 293 | print "using cached query" |
| 294 | return glo_cached_queries[q]["rows"] |
| 295 | |
Scott Baker | 75095b6 | 2014-04-21 17:32:09 -0700 | [diff] [blame] | 296 | if not wait: |
| 297 | return None |
| 298 | |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 299 | print "refreshing cached query" |
| 300 | result = self.run_query(q) |
| 301 | glo_cached_queries[q] = {"time": time.time(), "rows": result} |
| 302 | |
| 303 | return result |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 304 | |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 305 | def process_request(self, req): |
| 306 | print req.GET |
| 307 | |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 308 | tqx = req.GET.get("tqx", None) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 309 | |
| 310 | slice = req.GET.get("slice", None) |
| 311 | site = req.GET.get("site", None) |
| 312 | node = req.GET.get("node", None) |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 313 | service = req.GET.get("service", None) |
Scott Baker | 584b37a | 2014-04-24 17:02:28 -0700 | [diff] [blame] | 314 | event = req.GET.get("event", "libvirt_heartbeat") |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 315 | cp = req.GET.get("cp", None) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 316 | |
| 317 | format = req.GET.get("format", "json_dicts") |
| 318 | |
Scott Baker | cdd2c82 | 2014-04-23 20:07:08 -0700 | [diff] [blame] | 319 | timeBucket = int(req.GET.get("timeBucket", 60)) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 320 | avg = self.get_list_from_req(req, "avg") |
| 321 | sum = self.get_list_from_req(req, "sum") |
| 322 | count = self.get_list_from_req(req, "count") |
| 323 | computed = self.get_list_from_req(req, "computed") |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 324 | groupBy = self.get_list_from_req(req, "groupBy", ["Time"]) |
| 325 | orderBy = self.get_list_from_req(req, "orderBy", ["Time"]) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 326 | |
| 327 | maxRows = req.GET.get("maxRows", None) |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 328 | mergeDataModelSites = req.GET.get("mergeDataModelSites", None) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 329 | |
Scott Baker | cdd2c82 | 2014-04-23 20:07:08 -0700 | [diff] [blame] | 330 | maxAge = int(req.GET.get("maxAge", 60*60)) |
| 331 | |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 332 | cached = req.GET.get("cached", None) |
Scott Baker | 4e025af | 2014-04-28 23:31:29 -0700 | [diff] [blame] | 333 | cachedGroupBy = self.get_list_from_req(req, "cachedGroupBy", ["doesnotexist"]) |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 334 | |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 335 | filter={} |
| 336 | if slice: |
| 337 | filter["slice"] = slice |
| 338 | if site: |
| 339 | filter["site"] = site |
| 340 | if node: |
| 341 | filter["hostname"] = node |
| 342 | if event: |
| 343 | filter["event"] = event |
| 344 | if cp: |
| 345 | filter["cp"] = cp |
| 346 | |
| 347 | q = self.compose_query(filter, timeBucket, avg, sum, count, computed, [], groupBy, orderBy, maxAge=maxAge) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 348 | |
| 349 | print q |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 350 | |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 351 | dataSourceUrl = "http://" + req.META["SERVER_NAME"] + ":" + req.META["SERVER_PORT"] + req.META["PATH_INFO"] + "?" + req.META["QUERY_STRING"].replace("format=","origFormat=").replace("%","%25") + "&format=charts"; |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 352 | |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 353 | if (format=="dataSourceUrl"): |
| 354 | result = {"dataSourceUrl": dataSourceUrl} |
| 355 | return ("application/javascript", result) |
| 356 | |
| 357 | elif (format=="raw"): |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 358 | result = self.run_query_raw(q) |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 359 | result["dataSourceUrl"] = dataSourceUrl |
| 360 | |
| 361 | result = json.dumps(result); |
| 362 | |
| 363 | return ("application/javascript", result) |
| 364 | |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 365 | elif (format=="nodata"): |
| 366 | result = {"dataSourceUrl": dataSourceUrl, "query": q} |
| 367 | result = json.dumps(result); |
| 368 | return {"application/javascript", result} |
| 369 | |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 370 | elif (format=="charts"): |
| 371 | bq_result = self.run_query_raw(q) |
| 372 | |
| 373 | # cloudscrutiny code is probably better! |
| 374 | table = {} |
| 375 | table["cols"] = self.schema_to_cols(bq_result["schema"]) |
| 376 | rows = [] |
Scott Baker | f96a3f0 | 2014-04-21 00:27:56 -0700 | [diff] [blame] | 377 | if "rows" in bq_result: |
| 378 | for row in bq_result["rows"]: |
| 379 | rowcols = [] |
| 380 | for (colnum,col) in enumerate(row["f"]): |
| 381 | if (colnum==0): |
| 382 | dt = datetime.datetime.fromtimestamp(float(col["v"])) |
| 383 | rowcols.append({"v": 'new Date("%s")' % dt.isoformat()}) |
| 384 | else: |
| 385 | try: |
| 386 | rowcols.append({"v": float(col["v"])}) |
| 387 | except: |
| 388 | rowcols.append({"v": col["v"]}) |
| 389 | rows.append({"c": rowcols}) |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 390 | table["rows"] = rows |
| 391 | |
| 392 | if tqx: |
| 393 | reqId = tqx.strip("reqId:") |
| 394 | else: |
| 395 | reqId = "0" |
| 396 | |
| 397 | result = {"status": "okColumnChart", "reqId": reqId, "table": table, "version": "0.6"} |
| 398 | |
| 399 | result = "google.visualization.Query.setResponse(" + json.dumps(result) + ");" |
| 400 | |
| 401 | def unquote_it(x): return x.group()[1:-1].replace('\\"', '"') |
| 402 | |
| 403 | p = re.compile(r'"new Date\(\\"[^"]*\\"\)"') |
| 404 | result=p.sub(unquote_it, result) |
| 405 | |
| 406 | return ("application/javascript", result) |
| 407 | |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 408 | else: |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 409 | if cached: |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 410 | results = self.get_cached_query_results(self.compose_cached_query(cached)) |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 411 | |
Scott Baker | 4e025af | 2014-04-28 23:31:29 -0700 | [diff] [blame] | 412 | result = self.postprocess_results(results, filter=filter, sum=sum, count=count, avg=avg, computed=computed, maxDeltaTime=120, groupBy=cachedGroupBy) |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 413 | else: |
| 414 | result = self.run_query(q) |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 415 | |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 416 | if maxRows: |
| 417 | result = result[-int(maxRows):] |
| 418 | |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 419 | if mergeDataModelSites: |
| 420 | self.merge_datamodel_sites(result) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 421 | |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 422 | return self.format_result(format, result, q, dataSourceUrl) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 423 | |
| 424 | def DoPlanetStackAnalytics(request): |
| 425 | bq = PlanetStackAnalytics() |
| 426 | result = bq.process_request(request) |
| 427 | |
| 428 | return result |
| 429 | |
| 430 | def main(): |
Scott Baker | 584b37a | 2014-04-24 17:02:28 -0700 | [diff] [blame] | 431 | bq = PlanetStackAnalytics(tableName="demoevents") |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 432 | |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 433 | q = bq.compose_cached_query() |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 434 | results = bq.run_query(q) |
| 435 | |
Scott Baker | 75095b6 | 2014-04-21 17:32:09 -0700 | [diff] [blame] | 436 | #results = bq.postprocess_results(results, |
| 437 | # filter={"slice": "HyperCache"}, |
| 438 | # groupBy=["site"], |
| 439 | # computed=["bytes_sent/elapsed"], |
| 440 | # sum=["bytes_sent", "computed_bytes_sent_div_elapsed"], avg=["cpu"], |
| 441 | # maxDeltaTime=60) |
| 442 | |
Scott Baker | 0fd787d | 2014-05-13 17:03:47 -0700 | [diff] [blame] | 443 | #results = bq.postprocess_results(results, filter={"slice": "HyperCache"}, maxi=["cpu"], count=["hostname"], computed=["bytes_sent/elapsed"], groupBy=["Time", "site"], maxDeltaTime=80) |
| 444 | |
| 445 | results = bq.postprocess_results(results,filter={"event": "libvirt_heartbeat"}, avg=["cpu"], count=["hostname"], groupBy=["doesnotexist"]) |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 446 | |
| 447 | bq.dump_table(results) |
| 448 | |
Scott Baker | 75095b6 | 2014-04-21 17:32:09 -0700 | [diff] [blame] | 449 | sys.exit(0) |
| 450 | |
Scott Baker | 95b28d6 | 2014-04-18 10:45:26 -0700 | [diff] [blame] | 451 | q=bq.compose_query(sum=["%bytes_sent"], avg=["%cpu"], latest=True, groupBy=["Time", "%site"]) |
| 452 | print q |
| 453 | bq.dump_table(bq.run_query(q)) |
| 454 | |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 455 | q=bq.compose_query(avg=["%cpu","%bandwidth"], count=["%hostname"], slice="HyperCache") |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 456 | print q |
| 457 | bq.dump_table(bq.run_query(q)) |
| 458 | |
| 459 | q=bq.compose_query(computed=["%bytes_sent/%elapsed"]) |
| 460 | print |
| 461 | print q |
| 462 | bq.dump_table(bq.run_query(q)) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 463 | |
Scott Baker | 050b1b8 | 2014-03-27 09:13:41 -0700 | [diff] [blame] | 464 | q=bq.compose_query(timeBucket=60*60, avg=["%cpu"], count=["%hostname"], computed=["%bytes_sent/%elapsed"]) |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 465 | print |
| 466 | print q |
| 467 | bq.dump_table(bq.run_query(q)) |
Scott Baker | c527fda | 2014-03-20 17:14:52 -0700 | [diff] [blame] | 468 | |
Scott Baker | 43adf1b | 2014-03-19 21:54:55 -0700 | [diff] [blame] | 469 | if __name__ == "__main__": |
| 470 | main() |
| 471 | |
| 472 | |
| 473 | |
| 474 | |
| 475 | |