def save_training_graphs(self, train_recorder, save_dir): from alfred.utils.plots import plot_curves, create_fig import matplotlib.pyplot as plt # Losses fig, axes = create_fig((1, 1)) plot_curves(axes, ys=[train_recorder.tape['d_loss']], xs=[train_recorder.tape['episode']], xlabel="Episode", ylabel="d_loss") fig.savefig(str(save_dir / 'losses.png')) plt.close(fig) # True Returns fig, axes = create_fig((1, 2)) fig.suptitle('True returns') plot_curves(axes[0], ys=[train_recorder.tape['return']], xs=[train_recorder.tape['episode']], xlabel="Episode", ylabel="Mean Return") plot_curves(axes[1], ys=[train_recorder.tape['eval_return']], xs=[train_recorder.tape['episode']], xlabel="Episode", ylabel="Mean Eval Return") fig.savefig(str(save_dir / 'true_returns.png')) plt.close(fig)
def save_training_graphs(self, train_recorder, save_dir): from alfred.utils.plots import create_fig, plot_curves import matplotlib.pyplot as plt # Losses fig, axes = create_fig((4, 1)) plot_curves(axes[0], ys=[train_recorder.tape['q1_loss']], xs=[train_recorder.tape['total_transitions']], xlabel='Transitions', ylabel='q1_loss') plot_curves(axes[1], ys=[train_recorder.tape['q2_loss']], xs=[train_recorder.tape['total_transitions']], xlabel='Transitions', ylabel='q2_loss') plot_curves(axes[2], ys=[train_recorder.tape['alpha_loss']], xs=[train_recorder.tape['total_transitions']], xlabel="Transitions", ylabel="alpha_loss") plot_curves(axes[3], ys=[train_recorder.tape['q_s']], xs=[train_recorder.tape['total_transitions']], xlabel="Transitions", ylabel="q_s") fig.savefig(str(save_dir / 'losses.png')) plt.close(fig) # True Returns fig, axes = create_fig((4, 1)) fig.suptitle('Returns') plot_curves(axes[0], ys=[train_recorder.tape['return']], xs=[train_recorder.tape['total_transitions']], xlabel='Transitions', ylabel="Mean Return") plot_curves(axes[1], ys=[train_recorder.tape['eval_return']], xs=[train_recorder.tape['total_transitions']], xlabel='Transitions', ylabel="Mean Eval Return") plot_curves(axes[2], ys=[train_recorder.tape['pi_entropy']], xs=[train_recorder.tape['total_transitions']], xlabel="Transitions", ylabel="pi_entropy") plot_curves(axes[3], ys=[train_recorder.tape['alpha']], xs=[train_recorder.tape['total_transitions']], xlabel="Transitions", ylabel="alpha") fig.savefig(str(save_dir / 'figures.png')) plt.close(fig)
def create_plots(self, train_recorder, save_dir): fig, axes = create_fig((3, 3)) plot_curves( axes[0, 0], xs=[remove_nones(train_recorder.tape['update_i'])], ys=[remove_nones(train_recorder.tape['reconstruction_loss'])], xlabel='update_i', ylabel='reconstruction_loss') plot_curves(axes[0, 1], xs=[remove_nones(train_recorder.tape['update_i'])], ys=[remove_nones(train_recorder.tape['prior_loss'])], xlabel='update_i', ylabel='prior_loss') plot_curves(axes[0, 2], xs=[remove_nones(train_recorder.tape['update_i'])], ys=[remove_nones(train_recorder.tape['total_loss'])], xlabel='update_i', ylabel='total_loss') plot_curves(axes[1, 0], xs=[remove_nones(train_recorder.tape['update_i'])], ys=[remove_nones(train_recorder.tape['lr'])], xlabel='update_i', ylabel='lr') plt.tight_layout() fig.savefig(str(save_dir / 'graphs.png')) plt.close(fig)
def save_training_graphs(self, train_recorder, save_dir): from alfred.utils.plots import create_fig, plot_curves import matplotlib.pyplot as plt # Loss and return fig, axes = create_fig((3, 1)) plot_curves(axes[0], ys=[train_recorder.tape['loss']], xs=[train_recorder.tape['total_transitions']], xlabel='Transitions', ylabel="loss") plot_curves(axes[1], ys=[train_recorder.tape['return']], xs=[train_recorder.tape['total_transitions']], xlabel="Transitions", ylabel="return") plot_curves(axes[2], ys=[train_recorder.tape['eval_return']], xs=[train_recorder.tape['total_transitions']], xlabel="Transitions", ylabel="Eval return") fig.savefig(str(save_dir / 'figures.png')) plt.close(fig)
def save_training_graphs(self, train_recorder, save_dir): from alfred.utils.plots import create_fig, plot_curves import matplotlib.pyplot as plt # Losses fig, axes = create_fig((1, 1)) plot_curves(axes, ys=[train_recorder.tape['d_loss']], xs=[train_recorder.tape['episode']], xlabel="Episode", ylabel="d_loss") fig.savefig(str(save_dir / 'losses.png')) plt.close(fig) # True Returns fig, axes = create_fig((1, 2)) fig.suptitle('True returns') plot_curves(axes[0], ys=[train_recorder.tape['return']], xs=[train_recorder.tape['episode']], xlabel="Episode", ylabel="Mean Return") plot_curves(axes[1], ys=[train_recorder.tape['eval_return']], xs=[train_recorder.tape['episode']], xlabel="Episode", ylabel="Mean Eval Return") fig.savefig(str(save_dir / 'true_returns.png')) plt.close(fig) # Accuracies to_plot = ('recall', 'specificity', 'precision', 'accuracy', 'F1') if any([k in train_recorder.tape for k in to_plot]): fig, axes = create_fig((1, 1)) ys = [train_recorder.tape[key] for key in to_plot] plot_curves(axes, ys=ys, xs=[train_recorder.tape['episode']] * len(ys), xlabel='Episode', ylabel='-', labels=to_plot) fig.savefig(str(save_dir / 'Accuracy.png')) plt.close(fig)
def save_training_graphs(self, train_recorder, save_dir): from alfred.utils.plots import create_fig, plot_curves import matplotlib.pyplot as plt # Losses fig, axes = create_fig((1, 1)) plot_curves(axes, ys=[train_recorder.tape['d_loss']], xs=[train_recorder.tape['episode']], xlabel="Episode", ylabel="d_loss") fig.savefig(str(save_dir / 'losses.png')) plt.close(fig) # True Returns fig, axes = create_fig((1, 2)) fig.suptitle('True returns') plot_curves(axes[0], ys=[train_recorder.tape['return']], xs=[train_recorder.tape['episode']], xlabel="Episode", ylabel="Mean Return") plot_curves(axes[1], ys=[train_recorder.tape['eval_return']], xs=[train_recorder.tape['episode']], xlabel="Episode", ylabel="Mean Eval Return") fig.savefig(str(save_dir / 'true_returns.png')) plt.close(fig) # Estimated Returns to_plot = ('IRLAverageEntReward', 'IRLAverageF', 'IRLAverageLogPi', 'IRLMedianLogPi', 'ExpertIRLAverageEntReward', 'ExpertIRLAverageF', 'ExpertIRLAverageLogPi', 'ExpertIRLMedianLogPi') if any([k in train_recorder.tape for k in to_plot]): fig, axes = create_fig((2, 5)) for i, key in enumerate(to_plot): if key in train_recorder.tape: ax = axes[i // 5, i % 5] plot_curves(ax, ys=[train_recorder.tape[key]], xs=[train_recorder.tape['episode']], xlabel='Episode', ylabel=key) fig.savefig(str(save_dir / 'estimated_rewards.png')) plt.close(fig) # Accuracies to_plot = ('recall', 'specificity', 'precision', 'accuracy', 'F1') if any([k in train_recorder.tape for k in to_plot]): fig, axes = create_fig((1, 1)) ys = [train_recorder.tape[key] for key in to_plot] plot_curves(axes, ys=ys, xs=[train_recorder.tape['episode']] * len(ys), xlabel='Episode', ylabel='-', labels=to_plot) fig.savefig(str(save_dir / 'Accuracy.png')) plt.close(fig)
def _make_benchmark_performance_figure(storage_dirs, save_dir, y_error_bars, logger, normalize_with_first_model=True, sort_bars=False): scores, scores_means, scores_err_up, scores_err_down, sorted_inner_keys, reference_key = _gather_scores( storage_dirs=storage_dirs, save_dir=save_dir, y_error_bars=y_error_bars, logger=logger, normalize_with_first_model=normalize_with_first_model, sort_bars=sort_bars) # Creates the graph n_bars_per_group = len(scores_means.keys()) n_groups = len(scores_means[reference_key].keys()) fig, ax = create_fig((1, 1), figsize=(n_bars_per_group * n_groups, n_groups)) bar_chart(ax, scores=scores_means, err_up=scores_err_up, err_down=scores_err_down, group_names=scores_means[reference_key].keys(), title="Average Return") n_training_seeds = scores[reference_key][list( scores_means[reference_key].keys())[0]].shape[0] scores_info = load_dict_from_json( filename=str(storage_dirs[0] / save_dir / f"{save_dir}_seed_scores_info.json")) info_str = f"{n_training_seeds} training seeds" \ f"\nn_eval_runs={scores_info['n_eval_runs']}" \ f"\nperformance_metric={scores_info['performance_metric']}" \ f"\nperformance_aggregation={scores_info['performance_aggregation']}" ax.text(0.80, 0.95, info_str, transform=ax.transAxes, fontsize=12, verticalalignment='top', bbox=dict(facecolor='gray', alpha=0.1)) plt.tight_layout() # Saves storage_dirs from which the graph was created for traceability for storage_dir in storage_dirs: os.makedirs(storage_dir / save_dir, exist_ok=True) fig.savefig(storage_dir / save_dir / f'{save_dir}_performance.png') save_dict_to_json( { 'sources': str(storage_dir) in storage_dirs, 'n_training_seeds': n_training_seeds, 'n_eval_runs': scores_info['n_eval_runs'], 'performance_metric': scores_info['performance_metric'], 'performance_aggregation': scores_info['performance_aggregation'] }, storage_dir / save_dir / f'{save_dir}_performance_sources.json') plt.close(fig) # SANITY-CHECKS that no seeds has a Nan score to avoid making create best on it expe_with_nan_scores = [] for outer_key in scores.keys(): for inner_key, indiv_score in scores[outer_key].items(): if math.isnan(indiv_score.mean()): expe_with_nan_scores.append(outer_key + "/experiment" + inner_key) if len(expe_with_nan_scores) > 0: raise ValueError( f'Some experiments have nan scores. Remove them from storage and clean summary folder to continue\n' f'experiments with Nan Scores:\n' + '\n'.join(expe_with_nan_scores)) return sorted_inner_keys