def main() -> None: parser = ArgumentParser() parser.add_argument('-o', '--out_dir', default='benchmarking/tmp_figures') parser.add_argument('-d', '--result_dir', required=True) parser.add_argument('-m', '--morton_result_file', required=True) args = parser.parse_args() out_dir = args.out_dir result_dir = args.result_dir morton_result_file = f'{args.morton_result_file}.json' if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) config, result, _ = load_run(result_dir, 'run', 'metrics') # compute_pendulum_nrmse('tmp_results_grid_search/cpu/latent-dim_pendulum-cpu/10') our_nrmse, our_nrmse_train, our_nrmse_pred = compute_our_nrmse( config, result) morton_nrmse, morton_nrmse_train, morton_nrmse_pred = compute_morton_nrmse( morton_result_file) print('Our Run: %s' % result_dir) print('Morton Run: %s' % morton_result_file) print('Our NRMSE: %8.5f' % our_nrmse) print('Our NRMSE (Train): %8.5f' % our_nrmse_train) print('Our NRMSE (Pred.): %8.5f' % our_nrmse_pred) print('Morton NRMSE: %8.5f' % morton_nrmse) print('Morton NRMSE (Train): %8.5f' % morton_nrmse_train) print('Morton NRMSE (Pred.): %8.5f' % morton_nrmse_pred) print('Are we better? %s' % ('yes' if our_nrmse < morton_nrmse else 'no')) print('Are we better (Train)? %s' % ('yes' if our_nrmse_train < morton_nrmse_train else 'no')) print('Are we better (Pred.)? %s' % ('yes' if our_nrmse_pred < morton_nrmse_pred else 'no')) observation_dim_names = [] if 'sine_cosine' in morton_result_file: for dim_name in config.observation_dim_names: if dim_name == r'$\theta$': observation_dim_names.append(r'$\cos(\theta)$') observation_dim_names.append(r'$\sin(\theta)$') else: observation_dim_names.append(dim_name) elif 'pendulum' in morton_result_file: observation_dim_names = [ r'$\cos(\theta)$', r'$\sin(\theta)$', r'$\dot{\theta}$' ] else: observation_dim_names = config.observation_dim_names plot_morton_result(out_dir, morton_result_file, observation_dim_names)
def compute_pendulum_nrmse(run: str) -> None: config, result, _ = load_run(run, 'run', 'metrics') expected = result.observations[0] _, (obs_rollouts, _), _ = compute_rollout(config, result, config.N) actual = obs_rollouts[0] print('Pendulum (Angle):', compute_nrmse(expected, actual)) expected_cosine = np.cos(expected[:, 0]) expected_sine = np.sin(expected[:, 0]) expected_xy = np.concatenate( [expected_cosine, expected_sine, expected[:, 1]], axis=0) actual_cosine = np.cos(actual[:, 0]) actual_sine = np.sin(actual[:, 0]) actual_xy = np.concatenate([actual_cosine, actual_sine, actual[:, 1]], axis=0) print('Pendulum (x/y): ', compute_nrmse(expected_xy, actual_xy))
def main(): parser = ArgumentParser() parser.add_argument('-o', '--out_dir', default='investigation/tmp_figures') parser.add_argument('-d', '--result_dir', required=True) parser.add_argument('-r', '--runs', required=True) args = parser.parse_args() out_dir = args.out_dir result_dir = args.result_dir run_ids = args.runs.split(',') bar = progressbar.ProgressBar(widgets=[ ' Loading Runs: ', Percentage(), ' ', Bar(), ' ', ETA() ], maxval=len(run_ids)).start() runs = [] for i, run_id in enumerate(run_ids): try: runs.append( (run_id, *load_run(f'{result_dir}/{run_id}', 'run', 'metrics'))) except FileNotFoundError: print(f'No run found for id {run_id}! Ignoring.', file=sys.stderr) except NoResultsFoundException: print(f'No results found for run {run_id}! Ignoring.', file=sys.stderr) bar.update(i + 1) bar.finish() if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) bar = progressbar.ProgressBar(widgets=[ 'Calculating Rollouts: ', Percentage(), ' ', Bar(), ' ', ETA() ], maxval=len(runs)).start() data = [] for i, (run_id, config, result, _) in enumerate(runs): _, (obs_rollouts, _), _ = compute_rollout(config, result, config.N) data.append((run_id, config, result, obs_rollouts)) bar.update(i + 1) bar.finish() print('Computing energies.') runs_energies = compute_energies(data) print('Creating plots.') for (run_id, config, _, _), energies in zip(data, runs_energies): domain = np.arange(config.T) * config.h for n, (true_kinetic_energy, true_potential_energy, pred_kinetic_energy, pred_potential_energy) in enumerate(energies): with SubplotsAndSave(out_dir, f'energy-R{run_id}-N{n}-total', place_legend_outside=True) as (fig, ax): ax.plot(domain, true_kinetic_energy + true_potential_energy, color='tuda:blue', label='Truth', zorder=1) ax.plot(domain, pred_kinetic_energy + pred_potential_energy, color='tuda:orange', label='Rollout', zorder=2) ax.axvline((config.T_train - 1) * config.h, color='tuda:red', ls='dotted', label='Prediction Boundary', zorder=3) if config.N > 1: ax.set_title('Sequence %d' % (n + 1)) ax.set_xlabel(r'$t$') ax.set_ylabel('Total Energy') with SubplotsAndSave(out_dir, f'energy-R{run_id}-N{n}-kinetic', place_legend_outside=True) as (fig, ax): ax.plot(domain, true_kinetic_energy, color='tuda:blue', label='Truth', zorder=1) ax.plot(domain, pred_kinetic_energy, color='tuda:orange', label='Rollout', zorder=2) ax.axvline((config.T_train - 1) * config.h, color='tuda:red', ls='dotted', label='Prediction Boundary', zorder=3) if config.N > 1: ax.set_title('Sequence %d' % (n + 1)) ax.set_xlabel(r'$t$') ax.set_ylabel('Kinetic Energy') with SubplotsAndSave(out_dir, f'energy-R{run_id}-N{n}-potential', place_legend_outside=True) as (fig, ax): ax.plot(domain, true_potential_energy, color='tuda:blue', label='Truth', zorder=1) ax.plot(domain, pred_potential_energy, color='tuda:orange', label='Rollout', zorder=2) ax.axvline((config.T_train - 1) * config.h, color='tuda:red', ls='dotted', label='Prediction Boundary', zorder=3) if config.N > 1: ax.set_title('Sequence %d' % (n + 1)) ax.set_xlabel(r'$t$') ax.set_ylabel('Potential Energy')
dirname = match.group(1) if dirname.strip() == '': raise Exception('Result container must not be root!') if match.group(2) is None: item = -1 else: item = int(match.group(2)) - 1 dirs = sorted([ int(x) for x in os.listdir(dirname) if os.path.isdir(dirname + '/' + x) and x.isdigit() ]) result_dir = dirname + '/' + str(dirs[item]) print('Reading results from %s.' % result_dir) config, result, metrics = load_run(result_dir, result_file_name, metrics_file_name) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) if 'g_final_log_likelihood' in include_plots: if metrics is not None: plot_g_final_log_likelihood(out_dir, config, result, metrics) if 'log_likelihood' in include_plots: if metrics is not None: plot_log_likelihood(out_dir, config, result, metrics) if 'latents_rollout' or 'observations_rollout' in include_plots: plot_lunar_lander = 'lunar_lander' in include_plots and config.gym_environment.startswith( 'LunarLander') plot_rollout(out_dir, config, result, 'latents_rollout'
def main(): parser = ArgumentParser() parser.add_argument('-o', '--out_dir', default='investigation/tmp_figures') parser.add_argument('-d', '--result_dir', required=True) parser.add_argument('-f', '--from', required=True, type=int, dest='run_from') parser.add_argument('-t', '--to', required=True, type=int, dest='run_to') parser.add_argument('-m', '--metric', default=','.join(ALL_METRICS)) parser.add_argument('-a', '--accumulation', default=','.join(ALL_ACCUMULATION_METHODS)) parser.add_argument('-x', '--ordinate') args = parser.parse_args() out_dir = args.out_dir result_dir = args.result_dir run_from = args.run_from run_to = args.run_to metric_names = args.metric.lower().split(',') accumulation_methods = args.accumulation.lower().split(',') ordinates = args.ordinate.split(',') run_ids = [str(x) for x in range(run_from, run_to + 1)] print('Reading results from %s/{%s}.' % (result_dir, ','.join(run_ids))) bar = progressbar.ProgressBar(widgets=[ ' Loading Runs: ', Percentage(), ' ', Bar(), ' ', ETA() ], maxval=len(run_ids)).start() runs = [] for i, run_id in enumerate(run_ids): try: runs.append( (run_id, *load_run(f'{result_dir}/{run_id}', 'run', 'metrics'))) except FileNotFoundError: print(f'No run found for id {run_id}! Ignoring.', file=sys.stderr) except NoResultsFoundException: print(f'No results found for run {run_id}! Ignoring.', file=sys.stderr) bar.update(i + 1) bar.finish() if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) bar = progressbar.ProgressBar(widgets=[ 'Calculating Rollouts: ', Percentage(), ' ', Bar(), ' ', ETA() ], maxval=len(runs)).start() data = [] for i, (run_id, config, result, metrics) in enumerate(runs): _, (obs_rollouts, _), _ = compute_rollout(config, result, config.N) obs_smoothed, _ = zip(*[ compute_observations(config, result, result.estimations_latents[n]. T, result.V_hat[n, :, :, :].transpose((2, 0, 1))) for n in range(config.N) ]) data.append( (run_id, config, result, metrics, obs_rollouts, obs_smoothed)) bar.update(i + 1) bar.finish() print('Calculating metrics.') metrics = calculate_metrics(data, metric_names, accumulation_methods) print('Saving metrics to CSV.') with open(f'{out_dir}/metrics.csv', 'w+') as fh: fh.write('run_id,%s,metric_name,accumulation_method,value\n' % ','.join(ordinates)) for metric_name, accumulation_method, _, Y in metrics: for run_id, config, y in Y: X = ','.join([ str(config.config_dict[ordinate]) for ordinate in ordinates ]) fh.write('%s,%s,%s,%s,%f\n' % (str(run_id), X, metric_name, accumulation_method, y)) print('Plotting metrics.') for ordinate in ordinates: X = [run[1].config_dict[ordinate] for run in runs] x_data = list(sorted(set(X))) for metric_name, accumulation_method, max_N, Y in metrics: y_data = [] for x in x_data: y_dat = [] for _, config, y in Y: if config.config_dict[ordinate] == x: y_dat.append(y) y_data.append(y_dat) x = np.asarray(x_data) y_mean = np.asarray([np.mean(part) for part in y_data]) y_std = np.asarray([np.std(part) for part in y_data]) with SubplotsAndSave( out_dir, f'comparison-{metric_name}-{accumulation_method}-vs-{ordinate}' ) as (fig, ax): ax.plot(x, y_mean, color='tuda:blue', label='Average', zorder=1) ax.fill_between(x, y_mean - 2 * y_std, y_mean + 2 * y_std, color='tuda:blue', alpha=0.2, label='Standard Deviation (2x)', zorder=1) ax.scatter(X, [y for _, _, y in Y], s=1, color='black', label='Data Points', zorder=2) ax.set_title( make_title(metric_name, accumulation_method, max_N)) ax.set_xlabel(make_xlabel(ordinate)) ax.set_ylabel(make_ylabel(metric_name)) ax.legend(loc=('lower right' if metric_name == METRIC_LOG_LIKELIHOOD else 'upper right'))