def strong_scaling_speedup(lang_set, lang_v, nb_node_set): fig = plt.figure() ax = fig.gca() for lang in lang_set: v = dict() v2 = dict() slang = sorted(lang_v[lang].keys(), key=float) for i in slang: v[i] = dh.get_val(lang_v[lang], i, op_type) v2[i] = v[slang[0]] / v[i] ax.plot(v2.keys(), v2.values(), label=lang, marker='*') ideal = dict() pos = 1 for i in sorted(nb_node_set, key=float): ideal[i] = pos pos *= 2 ax.plot(ideal.keys(), ideal.values(), label='Ideal Speedup') ax.set_yscale('log', basey=2) ax.yaxis.set_major_formatter( ticker.FuncFormatter(lambda y, _: '{:g}'.format(y))) ax.set_xscale('log', basex=2) ax.xaxis.set_major_formatter( ticker.FuncFormatter(lambda x, _: '{:g}'.format(x))) ax.set_ylabel("Speedup") ax.set_xlabel("Nodes") plt.legend() plt.savefig("fig_strong_scaling_speedup.pdf") plt.close()
def strong_scaling_latex_table(lang_set, lang_v, nb_node_set): f = open("strong_scaling_latex_table.tex", "w") f.write('\\newcolumntype{C}{>{\centering\\arraybackslash}X}\n') f.write('\\begin{tabularx}{\\textwidth}{') for i in range(len(nb_node_set) + 1): f.write('C') f.write('}\n') f.write('Lang/#Nodes') for i in sorted(nb_node_set, key=float): f.write('& ') f.write(f'{i} ') f.write('\\\\ \hline\n') for lang in lang_set: f.write(lang + ' ') for i in sorted(nb_node_set, key=float): f.write('& ') v = dh.get_val(lang_v[lang], i, op_type) if v != None: f.write(f'{v:.2f} ') f.write('\\\\\n') f.write('\hline\n') f.write('\\end{tabularx}\n') f.close()
def strong_scaling_speedup_against_previous_bar(lang_set, lang_v, nb_node_set): fig = plt.figure() ax = fig.gca() width = 0.7 pos_g = 0 len_lang = len(lang_set) for lang in lang_set: v = dict() v2 = dict() slang = sorted(lang_v[lang].keys(), key=float) v[0] = dh.get_val(lang_v[lang], slang[0], op_type) pos_l = 1 for i in slang[1:]: v[pos_l] = dh.get_val(lang_v[lang], i, op_type) v2[pos_l] = v[pos_l - 1] / v[pos_l] pos_l = pos_l + 1 v2_keys = [] for i in list(v2.keys()): v2_keys.append(i + pos_g * width / len_lang - width / 2 + width / len_lang / 2) ax.bar(v2_keys, v2.values(), width / len_lang, label=lang, align='center') pos_g = pos_g + 1 ax.yaxis.set_major_formatter( ticker.FuncFormatter(lambda y, _: '{:g}'.format(y))) ax.set_ylabel("Speedup") ax.set_xlabel("Nodes") slang[0] = None ax.set_xticklabels(slang, ha='right') ax.set_xticks(range(len(slang))) ax.axhline(1, color='black', lw=0.5) ax.axhline(2, color='black', lw=0.5) plt.legend() plt.savefig("fig_strong_scaling_speedup_against_previous_bar.pdf") plt.close()
def strong_scaling_bar(lang_set, lang_v, nb_node_set): fig = plt.figure() ax = fig.gca() width = 0.85 pos_l = dict() incr = 0 for i in sorted(nb_node_set, key=float): pos_l[i] = incr incr += 1 pos_g = 0 len_lang = len(lang_set) for lang in lang_set: v = dict() for i in sorted(lang_v[lang].keys(), key=float): v[pos_l[i]] = dh.get_val(lang_v[lang], i, op_type) v_keys = [] for i in list(v.keys()): v_keys.append(i + pos_g * width / len_lang - width / 2 + width / len_lang / 2) rects = ax.bar(v_keys, v.values(), width / len_lang, label=lang, align='center') for rect in rects: height = rect.get_height() ax.annotate( f'{height:.0f}', xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(1, 3), # 3 points vertical offset textcoords="offset pixels", rotation=90, size=10, in_layout=True, ha='center', va='bottom') pos_g = pos_g + 1 # ax.set_yscale('log', basey=2) ax.yaxis.set_major_formatter( ticker.FuncFormatter(lambda y, _: '{:g}'.format(y))) ax.set_ylabel("Time (s)") ax.set_xlabel("Nodes") ax.set_xticklabels(nb_node_set, ha='right') ax.set_xticks(range(len(nb_node_set))) plt.legend() plt.margins(y=0.12) plt.tight_layout() plt.savefig("fig_strong_scaling_bar.pdf") plt.close()
def strong_scaling_speedup_bar(lang_set, lang_v, nb_node_set): fig = plt.figure() ax = fig.gca() width = 0.7 pos_g = 1 len_lang = len(lang_set) + 1 for lang in lang_set: v = dict() v2 = dict() pos_l = 0 for i in sorted(lang_v[lang].keys(), key=float): v[pos_l] = dh.get_val(lang_v[lang], i, op_type) v2[pos_l] = v[0] / v[pos_l] pos_l = pos_l + 1 v_keys = [] for i in list(v.keys()): v_keys.append(i + pos_g * width / len_lang - width / 2 + width / len_lang / 2) ax.bar(v_keys, v2.values(), width / len_lang, label=lang, align='center') pos_g = pos_g + 1 ideal = dict() incr = 0 sc = 1 for i in sorted(nb_node_set): ideal[incr - width / 2 + width / len_lang / 2] = sc incr += 1 sc *= 2 ax.bar(ideal.keys(), ideal.values(), width / len_lang, label='Ideal Speedup') ax.set_yscale('log', basey=2) ax.yaxis.set_major_formatter( ticker.FuncFormatter(lambda y, _: '{:g}'.format(y))) ax.set_ylabel("Speedup") ax.set_xlabel("Nodes") ax.set_xticklabels(nb_node_set, ha='right') ax.set_xticks(range(len(nb_node_set))) plt.legend() plt.savefig("fig_strong_scaling_speedup_bar.pdf") plt.close()
def strong_scaling(lang_set, lang_v): fig = plt.figure() ax = fig.gca() for lang in lang_set: v = dict() for i in sorted(lang_v[lang].keys()): v[str(i)] = dh.get_val(lang_v[lang], i, op_type) ax.plot(v.keys(), v.values(), label=lang, marker='*') ax.set_yscale('log', basey=2) ax.yaxis.set_major_formatter( ticker.FuncFormatter(lambda y, _: '{:g}'.format(y))) plt.locator_params(axis='y', numticks=30) ax.set_ylabel("Time (s)") ax.set_xlabel("Nodes") plt.legend() plt.savefig("fig_strong_scaling.pdf") plt.close()
import core.DictHelper as dh import core.ParseInputArg as pia def conv_list_el_to_int(ml): for i in range(len(ml)): ml[i] = int(ml[i]) op_type = "min" parser = pia.Parser() parser.add_filter() parser.add_dark_background() in_var = parser.get_options() input_res = dh.read_json_file(sys.argv[1], in_var.filter_dict, op_type) nb_block_set = dh.extract_set(input_res, "nb_blocks") blocksize_set = dh.extract_set(input_res, "blocksize") nb_proc_per_task_set = dh.extract_set(input_res, "nb_proc_per_task") lang_set = dh.extract_set(input_res, "lang") conv_list_el_to_int(nb_block_set) conv_list_el_to_int(nb_proc_per_task_set) ymlxmp_data = dict() for nbb in nb_block_set: ymlxmp_data[nbb] = dict() lang_v = dict() for lang in lang_set: lang_v[lang] = dict()
t.append(float(i["nb_nodes"])) t.append(float(i[in_var.best_case_value].get_mean())) i["tmp_key_sort_best_case"] = (i["lang"], i["nb_nodes"]) return t def isCasePerformed(in_md, case): for d in in_md: if (d["lang"], d["nb_nodes"], d["datasize"], d["nb_blocks"]) == (case["lang"], case["nb_nodes"], case["datasize"], case["nb_blocks"]): return True return False input_res = dh.read_json_file(sys.argv[1], in_var.filter_dict, "val") input_res = sorted(input_res, key=my_key) in_var.not_show.append("tmp_key_sort_best_case") old_d = None counter = dict() for d in input_res: new_d = dict() counter[d["tmp_key_sort_best_case"]] = counter.get( d["tmp_key_sort_best_case"], 0) + 1 if in_var.number_values_shown != 0 and counter.get( d["tmp_key_sort_best_case"], 0) > in_var.number_values_shown: continue for k, v in d.items(): if k in in_var.not_show: continue
import sys import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt import matplotlib.ticker as ticker import core.DictHelper as dh import core.ParseInputArg as pia parser = pia.Parser() parser.add_filter() parser.add_dark_background() parser.add_option_list("-o", "--out-list", dest="out_list") in_var = parser.get_options() input_res = dh.read_json_file(sys.argv[1], in_var.filter_dict, "mean") lang_set = dh.extract_set(input_res, "lang") op_type = "min" val_key = "time_calc" lang_v = dict() for lang in lang_set: lang_v[lang] = dict() nb_node_set = set() for d in input_res: lang = d["lang"] nnodes = int(d["nb_nodes"]) if lang == "YML+XMP": if nnodes < 2: print("error : YML+XMP number of nodes < 2") sys.exit(1)