Merge branch 'master' of ssh://git.planet-lab.org/git/plstackapi
diff --git a/planetstack/core/plus/views.py b/planetstack/core/plus/views.py
index e37faef..6af7e7d 100644
--- a/planetstack/core/plus/views.py
+++ b/planetstack/core/plus/views.py
@@ -24,7 +24,7 @@
 else:
     sys.path.append("/opt/planetstack/hpc_wizard")
 import hpc_wizard
-from planetstack_analytics import DoPlanetStackAnalytics
+from planetstack_analytics import DoPlanetStackAnalytics, PlanetStackAnalytics
 
 class DashboardWelcomeView(TemplateView):
     template_name = 'admin/dashboard/welcome.html'
@@ -192,13 +192,32 @@
     return userSliceInfo
 
 def getCDNOperatorData(randomizeData = False):
-    return hpc_wizard.get_hpc_wizard().get_sites_for_view()
+    bq = PlanetStackAnalytics()
 
-def getPageSummary(request):
-    slice = request.GET.get('slice', None)
-    site = request.GET.get('site', None)
-    node = request.GET.get('node', None)
+    #hpc_sliceNames = bq.service_to_sliceNames("HPC Service")
 
+    rows = bq.get_cached_query_results(bq.compose_latest_query())
+
+    #rows = [x for x in rows if x.get("slice","") in hpc_sliceNames]
+
+    rows = bq.postprocess_results(rows, filter={"slice": "HyperCache"}, maxi=["cpu"], count=["hostname"], computed=["bytes_sent/elapsed"], groupBy=["Time","site"])
+
+    bq.merge_datamodel_sites(rows)
+
+    new_rows = {}
+    for row in rows:
+        new_row = {"lat": float(row.get("lat", 0)),
+               "long": float(row.get("long", 0)),
+               "health": 0,
+               "numNodes": int(row.get("numNodes",0)),
+               "numHPCSlivers": int(row.get("count_hostname", 0)),
+               "siteUrl": str(row.get("url", "")),
+               "hot": float(row.get("hotness", 0.0)),
+               "bandwidth": row.get("sum_computed_bytes_sent_div_elapsed",0),
+               "load": int(float(row.get("max_avg_cpu", row.get("max_cpu",0))))}
+        new_rows[str(row["site"])] = new_row
+
+    return new_rows
 
 class SimulatorView(View):
     def get(self, request, **kwargs):
@@ -276,7 +295,7 @@
             node.sliverCount = 0
             for sliver in node.slivers.all():
                  if sliver.slice.id == slice.id:
-                     node.sliverCount = node.sliverCount +1
+                     node.sliverCount = node.sliverCount + 1
 
     # Allocate slivers to nodes
     # for now, assume we want to allocate all slivers from the same site
@@ -395,7 +414,22 @@
 
 class DashboardSummaryAjaxView(View):
     def get(self, request, **kwargs):
-        return HttpResponse(json.dumps(hpc_wizard.get_hpc_wizard().get_summary_for_view()), mimetype='application/javascript')
+        def avg(x):
+            return float(sum(x))/len(x)
+
+        sites = getCDNOperatorData().values()
+
+        sites = [site for site in sites if site["numHPCSlivers"]>0]
+
+        total_slivers = sum( [site["numHPCSlivers"] for site in sites] )
+        total_bandwidth = sum( [site["bandwidth"] for site in sites] )
+        average_cpu = int(avg( [site["load"] for site in sites] ))
+
+        result= {"total_slivers": total_slivers,
+                "total_bandwidth": total_bandwidth,
+                "average_cpu": average_cpu}
+
+        return HttpResponse(json.dumps(result), mimetype='application/javascript')
 
 class DashboardAddOrRemoveSliverView(View):
     # TODO: deprecate this view in favor of using TenantAddOrRemoveSliverView
diff --git a/planetstack/core/static/page_analytics.js b/planetstack/core/static/page_analytics.js
index 75221b7..4e961b4 100644
--- a/planetstack/core/static/page_analytics.js
+++ b/planetstack/core/static/page_analytics.js
@@ -49,7 +49,7 @@
 
 function updatePageAnalytics() {
     $.ajax({
-    url : '/analytics/bigquery/?avg=%cpu&count=%hostname' + getObjectQuery(),
+    url : '/analytics/bigquery/?avg=%cpu&count=%hostname&cached=1' + getObjectQuery(),
     dataType : 'json',
     type : 'GET',
     success: function(newData)
@@ -74,7 +74,7 @@
 
 function updatePageBandwidth() {
     $.ajax({
-    url : '/analytics/bigquery/?computed=%bytes_sent/%elapsed' + getObjectQuery(),
+    url : '/analytics/bigquery/?computed=%bytes_sent/%elapsed&cached=1' + getObjectQuery(),
     dataType : 'json',
     type : 'GET',
     success: function(newData)
diff --git a/planetstack/hpc_wizard/bigquery_analytics.py b/planetstack/hpc_wizard/bigquery_analytics.py
index f50a9ac..2e65707 100644
--- a/planetstack/hpc_wizard/bigquery_analytics.py
+++ b/planetstack/hpc_wizard/bigquery_analytics.py
@@ -40,6 +40,15 @@
 mappings = {}
 reverse_mappings = {}
 
+def to_number(s):
+   try:
+       if "." in str(s):
+           return float(s)
+       else:
+           return int(s)
+   except:
+       return 0
+
 class MappingException(Exception):
     pass
 
@@ -108,6 +117,88 @@
 
         return result
 
+    """ Filter_results, groupby_results, do_computed_fields, and postprocess_results
+        are all used for postprocessing queries. The idea is to do one query that
+        includes the ungrouped and unfiltered data, and cache it for multiple
+        consumers who will filter and group it as necessary.
+
+        TODO: Find a more generalized source for these sorts operations. Perhaps
+        put the results in SQLite and then run SQL queries against it.
+    """
+
+    def filter_results(self, rows, name, value):
+        result = [row for row in rows if row.get(name)==value]
+        return result
+
+    def groupby_results(self, rows, groupBy=[], sum=[], count=[], avg=[], maxi=[]):
+        new_rows = {}
+        for row in rows:
+            groupby_key = [row.get(k, None) for k in groupBy]
+
+            if str(groupby_key) not in new_rows:
+                new_row = {}
+                for k in groupBy:
+                    new_row[k] = row.get(k, None)
+
+                new_rows[str(groupby_key)] = new_row
+            else:
+                new_row = new_rows[str(groupby_key)]
+
+            for k in sum:
+                new_row["sum_" + k] = new_row.get("sum_" + k, 0) + to_number(row.get(k,0))
+
+            for k in avg:
+                new_row["avg_" + k] = new_row.get("avg_" + k, 0) + to_number(row.get(k,0))
+                new_row["avg_base_" + k] = new_row.get("avg_base_"+k,0) + 1
+
+            for k in maxi:
+                new_row["max_" + k] = max(new_row.get("max_" + k, 0), to_number(row.get(k,0)))
+
+            for k in count:
+                new_row["count_" + k] = new_row.get("count_" + k, 0) + 1
+
+        for row in new_rows.values():
+            for k in avg:
+                row["avg_" + k] = float(row["avg_" + k]) / row["avg_base_" + k]
+                del row["avg_base_" + k]
+
+        return new_rows.values()
+
+    def do_computed_fields(self, rows, computed=[]):
+        computedFieldNames=[]
+        for row in rows:
+            for k in computed:
+                if "/" in k:
+                    parts = k.split("/")
+                    computedFieldName = "computed_" + parts[0].replace("%","")+"_div_"+parts[1].replace("%","")
+                    try:
+                        row[computedFieldName] = to_number(row[parts[0]]) / to_number(row[parts[1]])
+                    except:
+                        pass
+
+                    if computedFieldName not in computedFieldNames:
+                        computedFieldNames.append(computedFieldName)
+        return (computedFieldNames, rows)
+
+    def postprocess_results(self, rows, filter={}, groupBy=[], sum=[], count=[], avg=[], computed=[], maxi=[], maxDeltaTime=None):
+        sum = [x.replace("%","") for x in sum]
+        count = [x.replace("%","") for x in count]
+        avg = [x.replace("%","") for x in avg]
+        computed = [x.replace("%","") for x in computed]
+        maxi = [x.replace("%","") for x in maxi]
+
+        for (k,v) in filter.items():
+            rows = self.filter_results(rows, k, v)
+
+        if maxDeltaTime is not None:
+            maxTime = max([float(row["time"]) for row in rows])
+            rows = [row for row in rows if float(row["time"])>=maxTime-maxDeltaTime]
+
+        (computedFieldNames, rows) = self.do_computed_fields(rows, computed)
+        sum = sum + computedFieldNames
+        rows = self.groupby_results(rows, groupBy, sum, count, avg, maxi)
+        return rows
+
     def remap(self, match):
         if not self.tableName in mappings:
             raise MappingException("no mapping for table %s" % self.tableName)
diff --git a/planetstack/hpc_wizard/planetstack_analytics.py b/planetstack/hpc_wizard/planetstack_analytics.py
index e75e253..8fee0dc 100644
--- a/planetstack/hpc_wizard/planetstack_analytics.py
+++ b/planetstack/hpc_wizard/planetstack_analytics.py
@@ -3,6 +3,7 @@
 import re
 import os
 import sys
+import time
 import json
 import traceback
 import urllib2
@@ -20,6 +21,8 @@
 BLUE_LOAD=5000000
 RED_LOAD=15000000
 
+glo_cached_queries = {}
+
 class PlanetStackAnalytics(BigQueryAnalytics):
     def __init__(self, tableName="demoevents"):
         BigQueryAnalytics.__init__(self, tableName)
@@ -35,11 +38,12 @@
 
         return [slice.name for slice in slices]
 
-    def compose_query(self, slice=None, site=None, node=None, service=None, timeBucket="60", avg=[], sum=[], count=[], computed=[], groupBy=["Time"], orderBy=["Time"], tableName="demoevents"):
-        tablePart = "%s.%s@-3600000--1" % ("vicci", tableName)
+    def compose_query(self, slice=None, site=None, node=None, service=None, timeBucket="60", avg=[], sum=[], count=[], computed=[], val=[], groupBy=["Time"], orderBy=["Time"], tableName="demoevents", latest=False):
+        tablePart = "[%s.%s@-3600000--1]" % ("vicci", tableName)
 
         fields = []
         fieldNames = []
+        srcFieldNames = ["time"]
 
         fields.append("SEC_TO_TIMESTAMP(INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s) as Time" % (str(timeBucket),str(timeBucket)))
         #fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s as Time" % (str(timeBucket),str(timeBucket)))
@@ -47,14 +51,22 @@
         for fieldName in avg:
             fields.append("AVG(%s) as avg_%s" % (fieldName, fieldName.replace("%","")))
             fieldNames.append("avg_%s" % fieldName.replace("%",""))
+            srcFieldNames.append(fieldName)
 
         for fieldName in sum:
             fields.append("SUM(%s) as sum_%s" % (fieldName, fieldName.replace("%","")))
             fieldNames.append("sum_%s" % fieldName.replace("%",""))
+            srcFieldNames.append(fieldName)
 
         for fieldName in count:
             fields.append("COUNT(distinct %s) as count_%s" % (fieldName, fieldName.replace("%","")))
             fieldNames.append("count_%s" % fieldName.replace("%",""))
+            srcFieldNames.append(fieldName)
+
+        for fieldName in val:
+            fields.append(fieldName)
+            fieldNames.append(fieldName)
+            srcFieldNames.append(fieldName)
 
         for fieldName in computed:
             operator = "/"
@@ -66,11 +78,14 @@
                 computedFieldName = "computed_" + parts[0].replace("%","")+"_mult_"+parts[1].replace("%","")
             fields.append("SUM(%s)%sSUM(%s) as %s" % (parts[0], operator, parts[1], computedFieldName))
             fieldNames.append(computedFieldName)
+            srcFieldNames.append(parts[0])
+            srcFieldNames.append(parts[1])
 
         for fieldName in groupBy:
             if (fieldName not in ["Time"]):
                 fields.append(fieldName)
                 fieldNames.append(fieldName)
+                srcFieldNames.append(fieldName)
 
         fields = ", ".join(fields)
 
@@ -104,12 +119,20 @@
         else:
             orderBy = ""
 
+        if latest:
+            latestFields = ["table1.%s as %s" % (x,x) for x in srcFieldNames]
+            latestFields = ", ".join(latestFields)
+            tablePart = """(SELECT %s FROM %s AS table1
+                            JOIN
+                                (SELECT %%hostname, event, max(time) as maxtime from %s GROUP BY %%hostname, event) AS latest
+                            ON
+                                table1.%%hostname = latest.%%hostname AND table1.event = latest.event AND table1.time = latest.maxtime)""" % (latestFields, tablePart, tablePart)
+
         if computed:
-            subQuery = "SELECT %%hostname, %s FROM [%s]" % (fields, tablePart)
+            subQuery = "SELECT %%hostname, %s FROM %s" % (fields, tablePart)
             if where:
                 subQuery = subQuery + where
             subQuery = subQuery + groupBySub
-            #subQuery = subQuery + " GROUP BY %s,%%hostname" % timeField
 
             sumFields = []
             for fieldName in fieldNames:
@@ -129,7 +152,7 @@
             if orderBy:
                 query = query + orderBy
         else:
-            query = "SELECT %s FROM [%s]" % (fields, tablePart)
+            query = "SELECT %s FROM %s" % (fields, tablePart)
             if where:
                 query = query + " " + where
             if groupBy:
@@ -173,28 +196,6 @@
 
             return ("text/html", new_result)
 
-        elif (format == "json_hpcdash"):
-            new_rows = {}
-            for row in result:
-                new_row = {"lat": float(row.get("lat", 0)),
-                           "long": float(row.get("long", 0)),
-                           "health": 0,
-                           "numNodes": int(row.get("numNodes",0)),
-                           "numHPCSlivers": int(row.get("sum_count_hostname", 0)),
-                           "siteUrl": row.get("url", ""),
-                           "hot": float(row.get("hotness", 0.0)),
-                           "load": int(float(row.get("max_avg_cpu", 0)))}
-                new_rows[row["site"]] = new_row
-            return ("application/javascript", json.dumps(new_rows))
-
-    def only_largest(self, rows, fieldName):
-        """ Given a fieldName, only return the set of rows that had the
-            maximum value of that fieldName.
-        """
-        maxVal = max( [int(row[fieldName]) for row in rows] )
-        new_rows = [row for row in rows if int(row[fieldName])==maxVal]
-        return new_rows
-
     def merge_datamodel_sites(self, rows):
         """ For a query that included "site" in its groupby, merge in the
             opencloud site information.
@@ -212,9 +213,48 @@
             row["url"] = model_site.site_url
             row["numNodes"] = model_site.nodes.count()
 
-            if "max_avg_cpu" in row:
-                cpu=float(row["max_avg_cpu"])/100.0
-                row["hotness"] = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
+            max_cpu = row.get("max_avg_cpu", row.get("max_cpu",0))
+            cpu=float(max_cpu)/100.0
+            row["hotness"] = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
+
+    def compose_latest_query(self, fieldNames=None, groupByFields=["%hostname", "event"]):
+        """ Compose a query that returns the 'most recent' row for each (hostname, event)
+            pair.
+        """
+
+        if not fieldNames:
+            fieldNames = ["%hostname", "%bytes_sent", "time", "event", "%site", "%elapsed", "%slice", "%cpu"]
+
+        fields = ["table1.%s AS %s" % (x,x) for x in fieldNames]
+        fields = ", ".join(fields)
+
+        tableDesc = "%s.%s" % (self.projectName, self.tableName)
+
+        groupByOn = ["table1.time = latest.maxtime"]
+        for field in groupByFields:
+            groupByOn.append("table1.%s = latest.%s" % (field, field))
+
+        groupByOn = " AND ".join(groupByOn)
+        groupByFields = ", ".join(groupByFields)
+
+        base_query = "SELECT %s FROM [%s@-3600000--1] AS table1 JOIN (SELECT %s, max(time) as maxtime from [%s@-3600000--1] GROUP BY %s) AS latest ON %s" % \
+                      (fields, tableDesc, groupByFields, tableDesc, groupByFields, groupByOn)
+
+        return base_query
+
+    def get_cached_query_results(self, q):
+        global glo_cached_queries
+
+        if q in glo_cached_queries:
+            if (time.time() - glo_cached_queries[q]["time"]) <= 60:
+                print "using cached query"
+                return glo_cached_queries[q]["rows"]
+
+        print "refreshing cached query"
+        result = self.run_query(q)
+        glo_cached_queries[q] = {"time": time.time(), "rows": result}
+
+        return result
 
     def process_request(self, req):
         print req.GET
@@ -237,12 +277,14 @@
         orderBy = self.get_list_from_req(req, "orderBy", ["Time"])
 
         maxRows = req.GET.get("maxRows", None)
-        onlyLargest = req.GET.get("onlyLargest", None)
         mergeDataModelSites = req.GET.get("mergeDataModelSites", None)
 
-        q = self.compose_query(slice, site, node, service, timeField, avg, sum, count, computed, groupBy, orderBy)
+        cached = req.GET.get("cached", None)
+
+        q = self.compose_query(slice, site, node, service, timeField, avg, sum, count, computed, [], groupBy, orderBy)
 
         print q
+
         dataSourceUrl = "http://" + req.META["SERVER_NAME"] + ":" + req.META["SERVER_PORT"] + req.META["PATH_INFO"] + "?" + req.META["QUERY_STRING"].replace("format=","origFormat=").replace("%","%25") + "&format=charts";
 
         if (format=="dataSourceUrl"):
@@ -257,6 +299,11 @@
 
             return ("application/javascript", result)
 
+        elif (format=="nodata"):
+            result = {"dataSourceUrl": dataSourceUrl, "query": q}
+            result = json.dumps(result);
+            return {"application/javascript", result}
+
         elif (format=="charts"):
             bq_result = self.run_query_raw(q)
 
@@ -295,19 +342,28 @@
             return ("application/javascript", result)
 
         else:
-            result = self.run_query(q)
+            if cached:
+                results = self.get_cached_query_results(self.compose_latest_query())
 
-            if onlyLargest:
-                result = self.only_largest(result, onlyLargest)
+                filter={}
+                if slice:
+                    filter["slice"] = slice
+                if site:
+                    filter["site"] = site
+                if node:
+                    filter["hostname"] = node
 
-            if mergeDataModelSites:
-                self.merge_datamodel_sites(result)
+                result = self.postprocess_results(results, filter=filter, sum=sum, count=count, avg=avg, computed=computed, maxDeltaTime=120, groupBy=["doesnotexist"])
+            else:
+                result = self.run_query(q)
 
             if maxRows:
                 result = result[-int(maxRows):]
 
-            return self.format_result(format, result, q, dataSourceUrl)
+            if mergeDataModelSites:
+                self.merge_datamodel_sites(result)
 
+            return self.format_result(format, result, q, dataSourceUrl)
 
 def DoPlanetStackAnalytics(request):
     bq = PlanetStackAnalytics()
@@ -318,6 +374,24 @@
 def main():
     bq = PlanetStackAnalytics()
 
+    q = bq.compose_latest_query()
+    results = bq.run_query(q)
+
+    results = bq.postprocess_results(results,
+                                     #filter={"site": "Princeton"},
+                                     groupBy=["site"],
+                                     computed=["bytes_sent/elapsed"],
+                                     sum=["bytes_sent", "computed_bytes_sent_div_elapsed"], avg=["cpu"],
+                                     maxDeltaTime=60)
+
+    bq.dump_table(results)
+
+    q=bq.compose_query(sum=["%bytes_sent"], avg=["%cpu"], latest=True, groupBy=["Time", "%site"])
+    print q
+    bq.dump_table(bq.run_query(q))
+
+    sys.exit(0)
+
     q=bq.compose_query(avg=["%cpu","%bandwidth"], count=["%hostname"], slice="HyperCache")
     print q
     bq.dump_table(bq.run_query(q))
@@ -332,15 +406,6 @@
     print q
     bq.dump_table(bq.run_query(q))
 
-    q=bq.compose_query(avg=["%cpu"], count=["%hostname"], computed=["%bytes_sent/%elapsed"], service="HPC Service", groupBy=["Time","%site"])
-    print
-    print q
-    result=bq.run_query(q)
-    result = bq.only_largest(result, "Time")
-    bq.merge_datamodel_sites(result)
-    #bq.dump_table(result)
-    print bq.format_result("json_hpcdash", result, q)
-
 if __name__ == "__main__":
     main()