cleanup, WIP
diff --git a/planetstack/core/dashboard/views/analytics.py b/planetstack/core/dashboard/views/analytics.py
index e1794c1..3904023 100644
--- a/planetstack/core/dashboard/views/analytics.py
+++ b/planetstack/core/dashboard/views/analytics.py
@@ -1,5 +1,6 @@
 from view_common import *
 import random
+from planetstack_analytics import DoPlanetStackAnalytics
 
 class DashboardAnalyticsAjaxView(View):
     def get(self, request, name="hello_world", **kwargs):
diff --git a/planetstack/core/dashboard/views/cdn.py b/planetstack/core/dashboard/views/cdn.py
index c276cca..a4713e5 100644
--- a/planetstack/core/dashboard/views/cdn.py
+++ b/planetstack/core/dashboard/views/cdn.py
@@ -1,4 +1,89 @@
 from view_common import *
+from planetstack_analytics import DoPlanetStackAnalytics, PlanetStackAnalytics, RED_LOAD, BLUE_LOAD
+
+def getCDNContentProviderData():
+    cps = []
+    for dm_cp in ContentProvider.objects.all():
+        cp = {"name": dm_cp.name,
+              "account": dm_cp.account}
+        cps.append(cp)
+
+    return cps
+
+def getCDNOperatorData(randomizeData = False, wait=True):
+    HPC_SLICE_NAME = "HyperCache"
+
+    bq = PlanetStackAnalytics()
+
+    rows = bq.get_cached_query_results(bq.compose_cached_query(), wait)
+
+    # wait=False on the first time the Dashboard is opened. This means we might
+    # not have any rows yet. The dashboard code polls every 30 seconds, so it
+    # will eventually pick them up.
+
+    if rows:
+        rows = bq.postprocess_results(rows, filter={"event": "hpc_heartbeat"}, maxi=["cpu"], count=["hostname"], computed=["bytes_sent/elapsed"], groupBy=["Time","site"], maxDeltaTime=80)
+
+        # dictionaryize the statistics rows by site name
+        stats_rows = {}
+        for row in rows:
+            stats_rows[row["site"]] = row
+    else:
+        stats_rows = {}
+
+    slice = Slice.objects.filter(name=HPC_SLICE_NAME)
+    if slice:
+        slice_slivers = list(slice[0].slivers.all())
+    else:
+        slice_slivers = []
+
+    new_rows = {}
+    for site in Site.objects.all():
+        # compute number of slivers allocated in the data model
+        allocated_slivers = 0
+        for sliver in slice_slivers:
+            if sliver.node.site == site:
+                allocated_slivers = allocated_slivers + 1
+
+        stats_row = stats_rows.get(site.name,{})
+
+        max_cpu = stats_row.get("max_avg_cpu", stats_row.get("max_cpu",0))
+        cpu=float(max_cpu)/100.0
+        hotness = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
+
+        try:
+           lat=float(site.location.latitude)
+           long=float(site.location.longitude)
+        except:
+           lat=0
+           long=0
+
+        # format it to what that CDN Operations View is expecting
+        new_row = {"lat": lat,
+               "long": long,
+               "health": 0,
+               #"numNodes": int(site.nodes.count()),
+               "activeHPCSlivers": int(stats_row.get("count_hostname", 0)),     # measured number of slivers, from bigquery statistics
+               "numHPCSlivers": allocated_slivers,                              # allocated number of slivers, from data model
+               "siteUrl": str(site.site_url),
+               "bandwidth": stats_row.get("sum_computed_bytes_sent_div_elapsed",0),
+               "load": max_cpu,
+               "hot": float(hotness)}
+        new_rows[str(site.name)] = new_row
+
+    # get rid of sites with 0 slivers that overlap other sites with >0 slivers
+    for (k,v) in new_rows.items():
+        bad=False
+        if v["numHPCSlivers"]==0:
+            for v2 in new_rows.values():
+                if (v!=v2) and (v2["numHPCSlivers"]>=0):
+                    d = haversine(v["lat"],v["long"],v2["lat"],v2["long"])
+                    if d<100:
+                         bad=True
+            if bad:
+                del new_rows[k]
+
+    return new_rows
 
 class DashboardSummaryAjaxView(View):
     def get(self, request, **kwargs):
diff --git a/planetstack/core/dashboard/views/view_common.py b/planetstack/core/dashboard/views/view_common.py
index e2b28a4..060775d 100644
--- a/planetstack/core/dashboard/views/view_common.py
+++ b/planetstack/core/dashboard/views/view_common.py
@@ -19,12 +19,6 @@
 import traceback
 import math
 
-if os.path.exists("/home/smbaker/projects/vicci/cdn/bigquery"):
-    sys.path.append("/home/smbaker/projects/vicci/cdn/bigquery")
-else:
-    sys.path.append("/opt/planetstack/hpc_wizard")
-from planetstack_analytics import DoPlanetStackAnalytics, PlanetStackAnalytics, RED_LOAD, BLUE_LOAD
-
 def getDashboardContext(user, context={}, tableFormat = False):
         context = {}
 
@@ -33,8 +27,8 @@
             context['userSliceInfo'] = userSliceTableFormatter(userSliceData)
         else:
             context['userSliceInfo'] = userSliceData
-        context['cdnData'] = getCDNOperatorData(wait=False)
-        context['cdnContentProviders'] = getCDNContentProviderData()
+#        context['cdnData'] = getCDNOperatorData(wait=False)
+#        context['cdnContentProviders'] = getCDNContentProviderData()
 
         (dashboards, unusedDashboards)= getDashboards(user)
         unusedDashboards=[x for x in unusedDashboards if x!="Customize"]
@@ -99,90 +93,6 @@
 
     return userSliceInfo
 
-def getCDNContentProviderData():
-    cps = []
-    for dm_cp in ContentProvider.objects.all():
-        cp = {"name": dm_cp.name,
-              "account": dm_cp.account}
-        cps.append(cp)
-
-    return cps
-
-def getCDNOperatorData(randomizeData = False, wait=True):
-    HPC_SLICE_NAME = "HyperCache"
-
-    bq = PlanetStackAnalytics()
-
-    rows = bq.get_cached_query_results(bq.compose_cached_query(), wait)
-
-    # wait=False on the first time the Dashboard is opened. This means we might
-    # not have any rows yet. The dashboard code polls every 30 seconds, so it
-    # will eventually pick them up.
-
-    if rows:
-        rows = bq.postprocess_results(rows, filter={"event": "hpc_heartbeat"}, maxi=["cpu"], count=["hostname"], computed=["bytes_sent/elapsed"], groupBy=["Time","site"], maxDeltaTime=80)
-
-        # dictionaryize the statistics rows by site name
-        stats_rows = {}
-        for row in rows:
-            stats_rows[row["site"]] = row
-    else:
-        stats_rows = {}
-
-    slice = Slice.objects.filter(name=HPC_SLICE_NAME)
-    if slice:
-        slice_slivers = list(slice[0].slivers.all())
-    else:
-        slice_slivers = []
-
-    new_rows = {}
-    for site in Site.objects.all():
-        # compute number of slivers allocated in the data model
-        allocated_slivers = 0
-        for sliver in slice_slivers:
-            if sliver.node.site == site:
-                allocated_slivers = allocated_slivers + 1
-
-        stats_row = stats_rows.get(site.name,{})
-
-        max_cpu = stats_row.get("max_avg_cpu", stats_row.get("max_cpu",0))
-        cpu=float(max_cpu)/100.0
-        hotness = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
-
-        try:
-           lat=float(site.location.latitude)
-           long=float(site.location.longitude)
-        except:
-           lat=0
-           long=0
-
-        # format it to what that CDN Operations View is expecting
-        new_row = {"lat": lat,
-               "long": long,
-               "health": 0,
-               #"numNodes": int(site.nodes.count()),
-               "activeHPCSlivers": int(stats_row.get("count_hostname", 0)),     # measured number of slivers, from bigquery statistics
-               "numHPCSlivers": allocated_slivers,                              # allocated number of slivers, from data model
-               "siteUrl": str(site.site_url),
-               "bandwidth": stats_row.get("sum_computed_bytes_sent_div_elapsed",0),
-               "load": max_cpu,
-               "hot": float(hotness)}
-        new_rows[str(site.name)] = new_row
-
-    # get rid of sites with 0 slivers that overlap other sites with >0 slivers
-    for (k,v) in new_rows.items():
-        bad=False
-        if v["numHPCSlivers"]==0:
-            for v2 in new_rows.values():
-                if (v!=v2) and (v2["numHPCSlivers"]>=0):
-                    d = haversine(v["lat"],v["long"],v2["lat"],v2["long"])
-                    if d<100:
-                         bad=True
-            if bad:
-                del new_rows[k]
-
-    return new_rows
-
 def slice_increase_slivers(user, user_ip, siteList, slice, image, count, noAct=False):
     sitesChanged = {}