def display(self): """Render page with column chart and data table""" # MongoDB handler md_handler = MongoDB() if hasattr(c, "message"): return render("/error.html") # Checkbox options c.chart_type = request.GET.get("chart", None) c.table = request.GET.get("table", "false") init = request.GET.get("metric", "true") c.chart = "true" if c.chart_type else "false" # Aggregation option c.agg_type = request.GET.get("metric", "Average") # Number of records if c.chart == "true" and c.table == "true" and init != "true": c.rowcount = len(request.GET) / 3 - 1 else: c.rowcount = len(request.GET) / 3 # Data table c.headers = ["Label", "Full Load Time (ms)", "Total Requests", "Total Size (kB)", "Page Speed Score", "onLoad Event (ms)", "Start Render Time (ms)", "Time to First Byte (ms)", "Total DNS Time (ms)", "Total Transfer Time (ms)", "Total Server Time (ms)", "Avg. Connecting Time (ms)", "Avg. Blocking Time (ms)", "Text Size (kB)", "Media Size (kB)", "Cache Size (kB)", "Redirects", "Bad Rquests", "Domains"] c.metrics_table = list() c.metrics_table.append(list()) # Chart points c.points = str() # Aggregator aggregator = Aggregator() # Test results from database for row_index in range(c.rowcount): # Parameters from GET request label = request.GET["step_" + str(row_index + 1) + "_label"] start_ts = request.GET["step_" + str(row_index + 1) + "_start_ts"] end_ts = request.GET["step_" + str(row_index + 1) + "_end_ts"] # Add label c.metrics_table[0].append(label) c.points += label + "#" # Fetch test results condition = { "label": label, "timestamp": {"$gte": start_ts, "$lte": end_ts} } documents = md_handler.collection.find(condition, fields=aggregator.METRICS) # Add data row to aggregator aggregator.add_row(label, row_index, documents) # Aggregated data per column column = 1 for metric in aggregator.METRICS: c.metrics_table.append(list()) c.points = c.points[:-1] + ";" for row_index in range(c.rowcount): data_list = aggregator.data[metric][row_index] value = aggregator.get_aggregated_value(data_list, c.agg_type, metric) c.points += str(value) + "#" c.metrics_table[column].append(value) column += 1 # Names of series titles = str() for title in aggregator.TITLES: titles += title + "#" # Final chart points c.points = titles[:-1] + ";" + c.points[:-1] c.points = aggregator.exclude_missing(c.points) return render("/display/core.html")
def dashboardChart(self): """Generate data for timeline chart""" #labels #aggMethod #timeFrameInDays #metrics # Parameters from GET request label = h.decode_uri(request.GET["labels"]) # Aggregation option c.agg_type = request.GET.get("aggMethod", "Average") timeFrameInDays = int(request.GET.get("timeFrameInDays", "7")) metric = h.decode_uri(request.GET["metric"]) yLabels = str() yLabels += metric # Metrics FIELDS = ["label", "timestamp"] FIELDS.append(metric) # Read data for timeline from database in custom format (hash separated) labels = label.split(",") startTs = strftime("%Y-%m-%d 00:00:00", gmtime(time.time()-(timeFrameInDays*24*60*60))) condition = { "label": { '$in': labels}, "timestamp": {"$gte": startTs} } results = MongoDB().collection.find( condition, fields = FIELDS, sort = [("label", 1), ("timestamp", 1)]) # Create a list of documents grouped by their labels resultsByLabel = list() documents = list() prevLabel = results[0]["label"] for result in results: curLabel = result["label"] if curLabel == prevLabel: resultsByLabel.append(result) else: prevLabel = curLabel documents.append(resultsByLabel) resultsByLabel = list() resultsByLabel.append(result) # Consider getting last timestamp when reach last result # Use this value to set the "days back" we are going # As well as the timestamp ranges # Add last result to avoid off by one documents.append(resultsByLabel) # Aggregator aggregator = Aggregator() # Now create the aggregate rows by label / timestamp agregated daily docs = list() index = 0 seriesNames = str() points = str() categories = str() timestamps = list() for t in range (0, timeFrameInDays): newTime = time.strftime("%Y-%m-%d", gmtime(time.time()-(timeFrameInDays-t)*24*60*60)) categories += newTime + "#" timestamps.append(newTime) categories = categories[:-1] # initialize a 2D array to capture the aggregate of all tests (labels) for each date aggregated_docs = [list() for x in range(len(timestamps))] # Loop through documents list which is grouped by the label for doc in documents: index += 1 seriesNames += doc[0]["label"] + "#" counter = 0 for row in doc: ts = timestamps[counter] timestamp = row["timestamp"][:-9] # Date has changed, so add the row and reset for the next loop # Data is getting reversed in the pionts array somehow, need to check this if timestamp == ts: docs.append(row[metric]) aggregated_docs[counter].append(row[metric]) else: if len(docs) > 0: points += str(aggregator.get_aggregated_value(docs, c.agg_type, c.agg_type) / 1000) + str("#") else: points += "n/a#" # set vars for the next loop docs = list() counter += 1 if counter >= len(timestamps): break points = points[:-1] points += ";" agg_points = str() for x in range (0, len(aggregated_docs)): if len(aggregated_docs[x]) > 0: agg_points += str(aggregator.get_aggregated_value(aggregated_docs[x], c.agg_type, c.agg_type) / 1000) + str("#") else: agg_points += "n/a#" seriesNames = seriesNames + "Aggregate" points = points[:-1] agg_points = agg_points[:-1] # Final chart points c.points = yLabels +';'+seriesNames +';'+categories+';'+points+';'+agg_points return c.points
def dashboardAggregateTrendingChart(self): """Generate data for average trending timeline chart""" # Load the config file and it's data filename = os.path.join(config["app_conf"]["dashboard_config_dir"], config["app_conf"]["dashboard_config_filename"]) with open(filename) as json_file: configData = json.load(json_file) aggTrendCharts = str() for jsonObj in configData: aggTrendCharts = jsonObj["aggTrendCharts"]; defaultAggMethod = aggTrendCharts["defaultAggMethod"] # requestParams tabName = h.decode_uri(request.GET["tabName"]) metric = h.decode_uri(request.GET["metric"]) agg_type = request.GET.get("aggMethod", defaultAggMethod) timeFrameInDays = int(request.GET.get("timeFrameInDays", "7")) startTs = strftime("%Y-%m-%d 00:00:00", gmtime(time.time()-(timeFrameInDays*24*60*60))) try: tabData = aggTrendCharts[tabName] charts = tabData["charts"] except: # No configured chart for this tab, return empty points return "" # Setup the chart data seriesNames = str() points = str() categories = str() timestamps = list() # Aggregator aggregator = Aggregator() for t in range (0, timeFrameInDays): newTime = time.strftime("%Y-%m-%d", gmtime(time.time()-(timeFrameInDays-t)*24*60*60)) categories += newTime + "#" timestamps.append(newTime) categories = categories[:-1] # Loop returned charts for tab # Get the title and tests for aggregating # Query the dataset and aggregate for chart in charts: labels = chart["labels"] seriesNames += chart["title"] + "#" counter = 0 # fields results from datastore fields = ["label", "timestamp"] fields.append(metric) condition = { "label": { '$in': labels}, "timestamp": {"$gte": startTs} } results = MongoDB().collection.find( condition, fields = fields, sort = [("timestamp", 1)]) #Initialize a list for capturing the resulting metric data to analyze aggregated_docs = list() for result in results: ts = timestamps[counter] timestamp = result["timestamp"][:-9] # Date has changed, so add the row and reset for the next loop # Data is getting reversed in the pionts array somehow, need to check this if timestamp == ts: aggregated_docs.append(result[metric]) else: if len(aggregated_docs) > 0: points += str(aggregator.get_aggregated_value(aggregated_docs, agg_type, agg_type) / 1000) + str("#") else: points += "n/a#" # set vars for the next loop aggregated_docs = list() counter += 1 if counter >= len(timestamps): break points = points[:-1] points += ";" points = points[:-1] # Final chart points points = "Time"+';'+seriesNames +';'+categories+';'+points return points
def display(self): """Render page with column chart and data table""" # MongoDB handler md_handler = MongoDB() if hasattr(c, "message"): return render("/error.html") # Checkbox options c.chart_type = request.GET.get("chart", None) c.table = request.GET.get("table", "false") init = request.GET.get("metric", "true") c.chart = "true" if c.chart_type else "false" # Metric option c.metric = request.GET.get("metric", "Average") # Number of records if c.chart == "true" and c.table == "true" and init != "true": c.rowcount = len(request.GET) / 3 - 1 else: c.rowcount = len(request.GET) / 3 # Data containers METRICS = ( "full_load_time", "requests", "total_size", "ps_scores", "onload_event", "start_render_time", "time_to_first_byte", "total_dns_time", "total_transfer_time", "total_server_time", "avg_connecting_time", "avg_blocking_time", "text_size", "media_size", "cache_size", "redirects", "bad_requests", "domains", "api_time") c.headers = [ "Label", "Full Load Time (ms)", "Total Requests", "Total Size (kB)", "Page Speed Score", "onLoad Event (ms)", "Start Render Time (ms)", "Time to First Byte (ms)", "Total DNS Time (ms)", "Total Transfer Time (ms)", "Total Server Time (ms)", "Avg. Connecting Time (ms)", "Avg. Blocking Time (ms)", "Text Size (kB)", "Media Size (kB)", "Cache Size (kB)", "Redirects", "Bad Rquests", "Domains", "API Time (ms)"] TITLES = [ "Full Load Time", "Total Requests", "Total Size", "Page Speed Score", "onLoad Event", "Start Render Time", "Time to First Byte", "Total DNS Time", "Total Transfer Time", "Total Server Time", "Avg. Connecting Time", "Avg. Blocking Time", "Text Size", "Media Size", "Cache Size", "Redirects", "Bad Rquests", "Domains", "API Time"] # Set of metrics to exclude (due to missing data) exclude = set() data = dict() for metric in METRICS: data[metric] = list() data["label"] = list() # Data table c.metrics_table = list() c.metrics_table.append(list()) # Test results from database for row in range(c.rowcount): # Parameters from GET request label = request.GET["step_" + str(row+1) + "_label"] start_ts = request.GET["step_" + str(row+1) + "_start_ts"] end_ts = request.GET["step_" + str(row+1) + "_end_ts"] # Label c.metrics_table[0].append(label) data["label"].append(row) data["label"][row] = label # Fetch test results condition = {"label": label, "timestamp": {"$gte": start_ts, "$lte": end_ts}} documents = md_handler.collection.find(condition, fields = METRICS) for metric in METRICS: data[metric].append(row) data[metric][row] = list() for document in documents: for metric in METRICS: if metric != "ps_scores": data[metric][row].append(document[metric]) else: data[metric][row].append(document[metric]["Total Score"]) # Aggregation c.points = str() for row in range(c.rowcount): c.points += data["label"][row] + "#" column = 1 agg_handler = Aggregator() for metric in METRICS: c.metrics_table.append(list()) c.points = c.points[:-1] + ";" for row in range(c.rowcount): if c.metric == "Average": value = agg_handler.average(data[metric][row]) elif c.metric == "Minimum": value = agg_handler.minimum(data[metric][row]) elif c.metric == "Maximum": value = agg_handler.maximum(data[metric][row]) elif c.metric == "90th Percentile": value = agg_handler.percentile(data[metric][row], 0.9) elif c.metric == "Median": value = agg_handler.percentile(data[metric][row], 0.5) if value == "n/a": exclude.add(metric) else: c.points += str(value) + "#" c.metrics_table[column].append(value) column += 1 # Update list of titles if "onload_event" in exclude: TITLES.pop(TITLES.index("onLoad Event")) if "start_render_time" in exclude: TITLES.pop(TITLES.index("Start Render Time")) header = str() for title in TITLES: header += title + "#" c.points = header[:-1] + ";" + c.points[:-1] return render("/display/core.html")