def plot(runs, baseline, fname: str): groups = {"runs": runs} stats = calc_stat( groups, lambda k: k.startswith("analysis_results/") and "/accuracy/" in k and "/train/" not in k)["runs"] baseline_stats = calc_stat( group(baseline, ["scan.train_split"]), lambda k: k.startswith("validation/") and "/accuracy/" in k) for k, s in stats.items(): print(k) print("Baseline groups", baseline_stats.keys()) means = {k: stats[v].get().mean for k, v in plots.items()} std = {k: stats[v].get().std for k, v in plots.items()} #validation/jump/accuracy/total for k, v in refs.items(): print("----------------================---------------------") print(baseline_stats[f"scan.train_split_{v}"]) ref_stats = { k: baseline_stats[f"scan.train_split_{v}"] [f"validation/{v}/accuracy/total"].get() for k, v in refs.items() } ref_means = {k: v.mean for k, v in ref_stats.items()} ref_std = {k: v.std for k, v in ref_stats.items()} fig = plt.figure(figsize=[3, 1.5]) plt.bar([2.25 * x for x in range(len(names))], [ref_means[n] * 100 for n in names], yerr=[ref_std[n] * 100 for n in names], align='center') plt.bar([2.25 * x + 1 for x in range(len(names))], [means[n] * 100 for n in names], yerr=[std[n] * 100 for n in names], align='center') plt.xticks([2.25 * x + 0.5 for x in range(len(names))], names) plt.ylabel("Test accuracy [\\%]") # plt.legend(["Before", "After"]) fig.savefig(fname, bbox_inches='tight')
def plot_both(ff, rnn): assert len(ff) == 10 assert len(rnn) == 10 ff_stats = calc_stat( {"a": ff}, lambda k: (k.startswith("final_accuracy/") and '/iid/accuracy/' in k and '/tuple/' in k) or (k.startswith("inverse_mask_test/") and '/iid/accuracy/' in k))["a"] rnn_stats = calc_stat( {"a": rnn}, lambda k: (k.startswith("final_accuracy/") and '/iid/accuracy/' in k and '/tuple/' in k) or (k.startswith("inverse_mask_test/") and '/iid/accuracy/' in k))["a"] fig = plt.figure(figsize=[4, 0.95]) # ax = fig.add_subplot(111, aspect=0.07) for t in range(2): this_ff_stats = [ff_stats[f"{plots[n]}{t}"].get() for n in names] means_ff = [s.mean * 100 for s in this_ff_stats] std_ff = [s.std * 100 for s in this_ff_stats] plt.bar([5.5 * r + t * 2.5 for r in range(len(names))], means_ff, yerr=std_ff, align='center') for t in range(2): this_rnn_stats = [rnn_stats[f"{plots[n]}{t}"].get() for n in names] means_rnn = [s.mean * 100 for s in this_rnn_stats] std_rnn = [s.std * 100 for s in this_rnn_stats] plt.bar([5.5 * r + 1 + t * 2.5 for r in range(len(names))], means_rnn, yerr=std_rnn, align='center') plt.xticks([5.5 * r + 1.75 for r in range(len(names))], names) plt.ylabel("Accuracy [\\%]") # plt.legend(["F1", "F2", "R1", "R2"], bbox_to_anchor=(1.1, 1.05)) fname = f"{BASE_DIR}/tuple_performance.pdf" fig.axes[0].yaxis.set_label_coords(-0.12, 0.4) fig.savefig(fname, bbox_inches='tight', pad_inches=0.01)
def plot_both(ff, rnn): ff_stats = calc_stat( {"a": ff}, lambda k: (k.startswith("analyzer/") and k.endswith( "/accuracy") and '/validation/' in k) or (k.startswith("inverse_mask_test/") and k.endswith("/accuracy")))["a"] rnn_stats = calc_stat( {"a": rnn}, lambda k: (k.startswith("analyzer/") and k.endswith( "/accuracy") and '/validation/' in k) or (k.startswith("inverse_mask_test/") and k.endswith("/accuracy")))["a"] fig = plt.figure(figsize=[6, 1.6]) for t in range(2): this_ff_stats = [ ff_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names ] means_ff = [s.mean * 100 for s in this_ff_stats] std_ff = [s.std * 100 for s in this_ff_stats] plt.bar([5.5 * r + t * 2.5 for r in range(len(names))], means_ff, yerr=std_ff, align='center') for t in range(2): this_rnn_stats = [ rnn_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names ] means_rnn = [s.mean * 100 for s in this_rnn_stats] std_rnn = [s.std * 100 for s in this_rnn_stats] plt.bar([5.5 * r + 1 + t * 2.5 for r in range(len(names))], means_rnn, yerr=std_rnn, align='center') plt.xticks([5.5 * r + 1.75 for r in range(len(names))], names) plt.ylabel("Accuracy [\\%]") plt.legend(["FNN $+$", "FNN $*$", "RNN $+$", "RNN $*$"]) fname = "out/admmul_performance.pdf" os.makedirs(os.path.dirname(fname), exist_ok=True) fig.savefig(fname, bbox_inches='tight')
def plot_all(name, groups): stats = calc_stat( groups, lambda k: (k.startswith("final_accuracy/") and '/iid/accuracy/' in k and '/tuple/' in k) or (k.startswith("inverse_mask_test/") and '/iid/accuracy/' in k)) for k, s in stats.items(): print("---------------------", k) for n in s.keys(): print(n) for k, s in stats.items(): fig = plot_one(s) fname = os.path.join(BASE_DIR, name, f"{k}.pdf") os.makedirs(os.path.dirname(fname), exist_ok=True) fig.savefig(fname, bbox_inches='tight')
def get_mean_err(runs): groups = {"all": runs} stats = calc_stat(groups, lambda k: k.startswith("analysis_results/") and "/accuracy/" in k and "/train/" not in k)["all"] for k, s in stats.items(): print(k) means = {k: stats[v].get().mean for k, v in plots.items()} std = {k: stats[v].get().std for k, v in plots.items()} ref_means = {k: stats[v].get().mean for k, v in refs.items()} ref_std = {k: stats[v].get().std for k, v in refs.items()} return [ref_means[n]*100 for n in names], [ref_std[n]*100 for n in names], \ [means[n]*100 for n in names], [std[n]*100 for n in names]
def plot_both(ff, rnn): assert len(ff) == 10 assert len(rnn) == 10 ff_stats = calc_stat( {"a": ff}, lambda k: (k.startswith("final_accuracy/") and '/iid/accuracy/' in k and '/tuple/' in k) or (k.startswith("inverse_mask_test/") and '/iid/accuracy/' in k))["a"] rnn_stats = calc_stat( {"a": rnn}, lambda k: (k.startswith("final_accuracy/") and '/iid/accuracy/' in k and '/tuple/' in k) or (k.startswith("inverse_mask_test/") and '/iid/accuracy/' in k))["a"] fig = plt.figure(figsize=[4, 0.95]) # ax = fig.add_subplot(111, aspect=0.07) for t in range(2): this_ff_stats = [ff_stats[f"{plots[n]}{t}"].get() for n in names] means_ff = [s.mean * 100 for s in this_ff_stats] std_ff = [s.std * 100 for s in this_ff_stats] plt.bar([5.5 * r + t * 2.5 for r in range(len(names))], means_ff, yerr=std_ff, align='center') for t in range(2): this_rnn_stats = [rnn_stats[f"{plots[n]}{t}"].get() for n in names] means_rnn = [s.mean * 100 for s in this_rnn_stats] std_rnn = [s.std * 100 for s in this_rnn_stats] plt.bar([5.5 * r + 1 + t * 2.5 for r in range(len(names))], means_rnn, yerr=std_rnn, align='center') plt.xticks([5.5 * r + 1.75 for r in range(len(names))], names) plt.ylabel("Accuracy [\\%]") # plt.legend(["F1", "F2", "R1", "R2"], bbox_to_anchor=(1.1, 1.05)) fname = f"{BASE_DIR}/tuple_performance.pdf" fig.axes[0].yaxis.set_label_coords(-0.12, 0.4) fig.savefig(fname, bbox_inches='tight', pad_inches=0.01) print("\\begin{tabular}{ll|c|cc|cc}") print("\\toprule") print(" & ".join(["", ""] + names) + " \\\\") print("\\midrule") row = ["\\multirow{2}{*}{FNN}"] for t in range(2): this_stats = [ff_stats[f"{plots[n]}{t}"].get() for n in names] row.append(f"Pair {t+1}") for m, s in zip([s.mean * 100 for s in this_stats], [s.std * 100 for s in this_stats]): row.append(f"${m:.0f} \pm {s:.1f}$") print(" & ".join(row) + " \\\\") row = [""] print("\\midrule") row = ["\\multirow{2}{*}{LSTM}"] for t in range(2): this_stats = [rnn_stats[f"{plots[n]}{t}"].get() for n in names] row.append(f"Pair {t+1}") for m, s in zip([s.mean * 100 for s in this_stats], [s.std * 100 for s in this_stats]): row.append(f"${m:.0f} \pm {s:.1f}$") print(" & ".join(row) + " \\\\") row = [""] print("\\bottomrule") print("\end{tabular}")
def plot_both(ff, rnn): ff_stats = calc_stat( {"a": ff}, lambda k: (k.startswith("analyzer/") and k.endswith( "/accuracy") and '/validation/' in k) or (k.startswith("inverse_mask_test/") and k.endswith("/accuracy")))["a"] rnn_stats = calc_stat( {"a": rnn}, lambda k: (k.startswith("analyzer/") and k.endswith( "/accuracy") and '/validation/' in k) or (k.startswith("inverse_mask_test/") and k.endswith("/accuracy")))["a"] fig = plt.figure(figsize=[6, 1.6]) for t in range(2): this_ff_stats = [ ff_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names ] means_ff = [s.mean * 100 for s in this_ff_stats] std_ff = [s.std * 100 for s in this_ff_stats] plt.bar([5.5 * r + t * 2.5 for r in range(len(names))], means_ff, yerr=std_ff, align='center') for t in range(2): this_rnn_stats = [ rnn_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names ] means_rnn = [s.mean * 100 for s in this_rnn_stats] std_rnn = [s.std * 100 for s in this_rnn_stats] plt.bar([5.5 * r + 1 + t * 2.5 for r in range(len(names))], means_rnn, yerr=std_rnn, align='center') plt.xticks([5.5 * r + 1.75 for r in range(len(names))], names) plt.ylabel("Accuracy [\\%]") plt.legend(["FNN $+$", "FNN $*$", "RNN $+$", "RNN $*$"]) fname = "out/admmul_performance.pdf" os.makedirs(os.path.dirname(fname), exist_ok=True) fig.savefig(fname, bbox_inches='tight') print("\\begin{tabular}{ll|c|cc|cc}") print("\\toprule") print(" & ".join(["", ""] + names) + " \\\\") print("\\midrule") row = ["\\multirow{2}{*}{FNN}"] for t in range(2): this_stats = [ ff_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names ] row.append(f"Pair {t}") for m, s in zip([s.mean * 100 for s in this_stats], [s.std * 100 for s in this_stats]): row.append(f"${m:.0f} \pm {s:.1f}$") print(" & ".join(row) + " \\\\") row = [""] print("\\midrule") row = ["\\multirow{2}{*}{LSTM}"] for t in range(2): this_stats = [ rnn_stats[f"{plots[n]}{ops[t]}/accuracy"].get() for n in names ] row.append(f"Pair {t}") for m, s in zip([s.mean * 100 for s in this_stats], [s.std * 100 for s in this_stats]): row.append(f"${m:.0f} \pm {s:.1f}$") print(" & ".join(row) + " \\\\") row = [""] print("\\bottomrule") print("\end{tabular}")