def rearrange_linpack_data(spreadsheetId, range="A:F"): """ Retreived data is sorted into groups by machine name :sheet: sheet API function :spreadsheetId :range: range to graph up the data, it will be mostly sheet name """ sorted_result = [] values = read_sheet(spreadsheetId, range=range) # Clear empty rows values = list(filter(None, values)) header_row = [values[0]] # Pop Header row to sort by system size values = [row for row in values if row[0] != "System"] for _, items in groupby(values, key=lambda x: x[0].split(".")[0]): sorted_data = sorted( list(items), key=lambda x: int(x[0].split(".")[1].split("x")[0])) sorted_result.append(header_row + sorted_data) return sorted_result
def compare_pig_results(spreadsheets): spreadsheet_name = [] values = [] results = [] test_name = "pig" for spreadsheetId in spreadsheets: values.append(read_sheet(spreadsheetId, range=test_name)) spreadsheet_name.append( get_sheet(spreadsheetId, range=[])["properties"]["title"] ) spreadsheet_name = " vs ".join(spreadsheet_name) for index, value in enumerate(values): values[index] = (list(g) for k, g in groupby(value, key=lambda x: x != []) if k) for value, ele in zip(values[0], values[1]): results.append([""]) if value[0] == ele[0]: results.append(value[0]) results = combine_two_array_alternating(results, value[1:], ele[1:]) spreadsheetId = create_spreadsheet(spreadsheet_name, test_name) append_to_sheet(spreadsheetId, results, test_name) graph_pig_data(spreadsheetId, test_name) print(f"https://docs.google.com/spreadsheets/d/{spreadsheetId}") return results
def compare_hammerdb_results(spreadsheets, test_name): spreadsheet_name = [] values = [] results = [] for spreadsheetId in spreadsheets: values.append(read_sheet(spreadsheetId, range=test_name)) spreadsheet_name.append( get_sheet(spreadsheetId, range=[])["properties"]["title"]) spreadsheet_name = " vs ".join(spreadsheet_name) for index, value in enumerate(values): values[index] = (list(g) for k, g in groupby(value, key=lambda x: x != []) if k) for value, ele in zip(values[0], values[1]): results.append([""]) # Make sure it's same system family # TODO: Maybe check for whole type instead of family if value[0][1].split(".")[0] == ele[0][1].split(".")[0]: results = combine_two_array_alternating(results, value, ele) spreadsheetId = create_spreadsheet(spreadsheet_name, test_name) append_to_sheet(spreadsheetId, results, test_name) graph_hammerdb_data(spreadsheetId, test_name) print(f"https://docs.google.com/spreadsheets/d/{spreadsheetId}") return results
def compare_linpack_results(spreadsheets): values = [] results = [] test_name = "linpack_8.2-8.3" for spreadsheetId in spreadsheets: values.append(read_sheet(spreadsheetId, range="linpack")) for v in values[0]: if v[0] == "System": results.append([ "System", "GFLOPS - 8.2", "GFLOPS - 8.3", "% Diff", "GFLOP Scaling - 8.2", "GFLOP Scaling - 8.3", "Cost / hr", "Price/Perf - 8.2", "Price/Perf - 8.3", "Price/Perf % Diff", ]) for y in values[1]: if y[0] == "System": continue if v[0] in y[0]: price_perf = [] perc_diff = (float(y[1]) - float(v[1])) / float(v[1]) price_perf.append(float(v[1]) / float(v[3])) price_perf.append(float(y[1]) / float(y[3])) price_perf_diff = (float(price_perf[1]) - float(price_perf[0])) / float(price_perf[0]) results.append([ v[0], v[1], y[1], perc_diff, v[2], y[2], v[3], price_perf[0], price_perf[1], price_perf_diff, ]) spreadsheetId = create_spreadsheet("Comparison 8.2 vs 8.3", test_name) append_to_sheet(spreadsheetId, results, range=test_name) graph_linpack_comparison(spreadsheetId, range=test_name) print(f"https://docs.google.com/spreadsheets/d/{spreadsheetId}")
def compare_stream_results(spreadsheets, test_name, table_name=["Max Througput"]): values = [] results = [] spreadsheet_name = [] for spreadsheetId in spreadsheets: values.append(read_sheet(spreadsheetId, range=test_name)) spreadsheet_name.append( get_sheet(spreadsheetId, range=[])["properties"]["title"]) spreadsheet_name = " vs ".join(spreadsheet_name) for index, value in enumerate(values): values[index] = (list(g) for k, g in groupby(value, key=lambda x: x != []) if k) list_1 = list(values[0]) list_2 = list(values[1]) for value in list_1: results.append([""]) for ele in list_2: if value[0][0] in table_name and ele[0][0] in table_name: results = combine_two_array_alternating(results, value, ele) break elif value[1][0] == ele[1][0]: results.append(value[0]) results = combine_two_array_alternating( results, value[1:], ele[1:]) break spreadsheetId = create_spreadsheet(spreadsheet_name, test_name) append_to_sheet(spreadsheetId, results, test_name) graph_stream_data(spreadsheetId, test_name) print(f"https://docs.google.com/spreadsheets/d/{spreadsheetId}") return results
def graph_stream_data(spreadsheetId, test_name): """ Retreive each streams results and graph them up indvidually :sheet: sheet API function :spreadsheetId :test_name: test_name to graph up the data, it will be mostly sheet name """ GRAPH_COL_INDEX = 0 GRAPH_ROW_INDEX = 0 start_index = 0 end_index = 0 data = read_sheet(spreadsheetId, test_name) clear_sheet_charts(spreadsheetId, test_name) for index, row in enumerate(data): if "Max Througput" in row: start_index = index if start_index: if row == []: end_index = index if index + 1 == len(data): end_index = index + 1 if end_index: graph_data = data[start_index:end_index] column_count = len(graph_data[0]) for _, items in groupby(graph_data[0][1:], key=lambda x: x.split("-")[0]): len_of_func = len(list(items)) break column = 1 for _ in range(column_count): if column >= column_count: break sheetId = get_sheet( spreadsheetId, test_name)["sheets"][0]["properties"]["sheetId"] series, column = create_series_range_list_stream( column, len_of_func, sheetId, start_index, end_index) requests = { "addChart": { "chart": { "spec": { "title": "%s: %s" % (test_name, graph_data[0][0]), "subtitle": f"{graph_data[1][0].split('.')[0]}", "basicChart": { "chartType": "COLUMN", "legendPosition": "BOTTOM_LEGEND", "axis": [ { "position": "BOTTOM_AXIS", "title": "" }, { "position": "LEFT_AXIS", "title": "Throughput (MB/s)", }, ], "domains": [{ "domain": { "sourceRange": { "sources": [{ "sheetId": sheetId, "startRowIndex": start_index, "endRowIndex": end_index, "startColumnIndex": 0, "endColumnIndex": 1, }] } } }], "series": series, "headerCount": 1, }, }, "position": { "overlayPosition": { "anchorCell": { "sheetId": sheetId, "rowIndex": GRAPH_ROW_INDEX, "columnIndex": column_count + GRAPH_COL_INDEX, } } }, } } } if GRAPH_COL_INDEX >= 5: GRAPH_ROW_INDEX += 20 GRAPH_COL_INDEX = 0 else: GRAPH_COL_INDEX += 6 body = {"requests": requests} sheet.batchUpdate(spreadsheetId=spreadsheetId, body=body).execute() # Reset variables start_index, end_index = 0, 0
def graph_pig_data(spreadsheetId, test_name): """""" GRAPH_COL_INDEX = 1 GRAPH_ROW_INDEX = 0 start_index, end_index = None, None data = read_sheet(spreadsheetId, test_name) clear_sheet_charts(spreadsheetId, test_name) for index, row in enumerate(data): if row == [] and start_index is None: start_index = index continue if start_index is not None: if index + 1 == len(data): end_index = index + 1 elif data[index + 1] == []: end_index = index if end_index: graph_data = data[start_index:end_index] column_count = len(graph_data[1]) sheetId = get_sheet( spreadsheetId, test_name)["sheets"][0]["properties"]["sheetId"] requests = { "addChart": { "chart": { "spec": { "title": f"{test_name}", "subtitle": f"{graph_data[1][0]}", "basicChart": { "chartType": "COLUMN", "legendPosition": "BOTTOM_LEGEND", "axis": [ { "position": "BOTTOM_AXIS", "title": "Threads" }, { "position": "LEFT_AXIS", "title": "Scheduler Efficiency", }, ], "domains": [{ "domain": { "sourceRange": { "sources": [{ "sheetId": sheetId, "startRowIndex": start_index + 2, "endRowIndex": end_index, "startColumnIndex": 0, "endColumnIndex": 1, }] } } }], "series": create_series_range_pig( column_count, sheetId, start_index, end_index), "headerCount": 1, }, }, "position": { "overlayPosition": { "anchorCell": { "sheetId": sheetId, "rowIndex": GRAPH_ROW_INDEX, "columnIndex": column_count + GRAPH_COL_INDEX, } } }, } } } if GRAPH_COL_INDEX >= 5: GRAPH_ROW_INDEX += 20 GRAPH_COL_INDEX = 1 else: GRAPH_COL_INDEX += 6 body = {"requests": requests} sheet.batchUpdate(spreadsheetId=spreadsheetId, body=body).execute() start_index, end_index = None, None
def graph_uperf_data(spreadsheetId, range): """""" GRAPH_COL_INDEX, GRAPH_ROW_INDEX = 2, 0 start_index, end_index = 0, 0 measurement = { "Gb_sec": "Bandwidth", "trans_sec": "Transactions/second", "usec": "Latency", } uperf_results = read_sheet(spreadsheetId, range) clear_sheet_charts(spreadsheetId, range) for index, row in enumerate(uperf_results): if row: if "tcp_stream16" in row[1] or "tcp_rr64" in row[1]: start_index = index if start_index: if row == []: end_index = index if index + 1 == len(uperf_results): end_index = index + 1 if end_index: graph_data = uperf_results[start_index:end_index] # TODO: fix column count column_count = len(uperf_results[2]) sheetId = get_sheet(spreadsheetId, range)["sheets"][0]["properties"][ "sheetId" ] requests = { "addChart": { "chart": { "spec": { "title": f"Uperf : {measurement[graph_data[0][2]]} | {graph_data[0][1]}", "subtitle": f"{graph_data[0][0]}", "basicChart": { "chartType": "COLUMN", "legendPosition": "BOTTOM_LEGEND", "axis": [ { "position": "BOTTOM_AXIS", "title": "Instance count", }, { "position": "LEFT_AXIS", "title": f"{graph_data[0][2]}", }, ], "domains": [ { "domain": { "sourceRange": { "sources": [ { "sheetId": sheetId, "startRowIndex": start_index + 1, "endRowIndex": end_index, "startColumnIndex": 0, "endColumnIndex": 1, } ] } } } ], "series": series_range_uperf( column_count, sheetId, start_index, end_index ), "headerCount": 1, }, }, "position": { "overlayPosition": { "anchorCell": { "sheetId": sheetId, "rowIndex": GRAPH_ROW_INDEX, "columnIndex": column_count + GRAPH_COL_INDEX, } } }, } } } if GRAPH_COL_INDEX >= 5: GRAPH_ROW_INDEX += 20 GRAPH_COL_INDEX = 2 else: GRAPH_COL_INDEX += 6 body = {"requests": requests} sheet.batchUpdate(spreadsheetId=spreadsheetId, body=body).execute() # Reset variables start_index, end_index = 0, 0
def graph_specjbb_data(spreadsheetId, range): GRAPH_COL_INDEX = 1 GRAPH_ROW_INDEX = 0 start_index = 0 end_index = 0 data = read_sheet(spreadsheetId, range) clear_sheet_charts(spreadsheetId, range) for index, row in enumerate(data): if "Peak" in row or "Peak/$eff" in row: start_index = index if start_index: if row == []: end_index = index if index + 1 == len(data): end_index = index + 1 if end_index: graph_data = data[start_index:end_index] column_count = len(graph_data[0]) sheetId = get_sheet(spreadsheetId, range)["sheets"][0]["properties"][ "sheetId" ] requests = { "addChart": { "chart": { "spec": { "title": "%s : %s" % (range, graph_data[0][0]), "subtitle": f"{graph_data[1][0].split('.')[0]}", "basicChart": { "chartType": "COLUMN", "legendPosition": "BOTTOM_LEGEND", "axis": [ {"position": "BOTTOM_AXIS", "title": ""}, { "position": "LEFT_AXIS", "title": "Throughput (bops)", }, ], "domains": [ { "domain": { "sourceRange": { "sources": [ { "sheetId": sheetId, "startRowIndex": start_index, "endRowIndex": end_index, "startColumnIndex": 0, "endColumnIndex": 1, } ] } } } ], "series": create_series_range_list_specjbb( column_count, sheetId, start_index, end_index ), "headerCount": 1, }, }, "position": { "overlayPosition": { "anchorCell": { "sheetId": sheetId, "rowIndex": GRAPH_ROW_INDEX, "columnIndex": column_count + GRAPH_COL_INDEX, } } }, } } } if GRAPH_COL_INDEX >= 5: GRAPH_ROW_INDEX += 20 GRAPH_COL_INDEX = 1 else: GRAPH_COL_INDEX += 6 body = {"requests": requests} sheet.batchUpdate(spreadsheetId=spreadsheetId, body=body).execute() # Reset variables start_index, end_index = 0, 0