def plot_rollout(out_dir: str, config: ExperimentConfig, result: ExperimentResult, plot_latents: bool, plot_observations: bool, plot_lunar_lander: bool): if not plot_latents and not plot_observations: return N = min(config.N, 10) (latent_rollouts, latent_covs), (obs_rollouts, obs_covs), without_control = compute_rollout( config, result, N) latent_rollouts_without_control = [ None if without_control is None else without_control[0] ] * N obs_rollouts_without_control = [ None if without_control is None else without_control[1] ] * N obs_covs_without_control = [ None if without_control is None else without_control[2] ] * N latent_pred_rollouts, latent_pred_covs, obs_pred_rollouts, obs_pred_covs = [], [], [], [] for n in range(N): (latent_pred_rollout, latent_pred_cov), (obs_pred_rollout, obs_pred_cov), _ = compute_rollout( config, result, 1, initial_value=result.estimations_latents[n, :, -1], initial_cov=result.V_hat[n, :, :, -1], T=config.T - config.T_train + 1, do_control=False) latent_pred_rollouts.append(latent_pred_rollout[0]) latent_pred_covs.append(latent_pred_cov[0]) obs_pred_rollouts.append(obs_pred_rollout[0]) obs_pred_covs.append(obs_pred_cov[0]) if plot_latents: _plot_latent_rollout(out_dir, config, result, N, latent_rollouts, latent_covs, latent_rollouts_without_control, latent_pred_rollouts, latent_pred_covs) if plot_observations: _plot_observations_rollout(out_dir, config, result, N, obs_rollouts, obs_covs, obs_rollouts_without_control, obs_covs_without_control, obs_pred_rollouts, obs_pred_covs) if plot_lunar_lander: _plot_lunar_lander(out_dir, config, result, N, obs_rollouts)
def compute_our_nrmse(config: ExperimentConfig, result: ExperimentResult) -> Tuple[float, float, float]: _, (obs_rollouts, _), _ = compute_rollout(config, result, config.N) expected = result.observations[0] actual = obs_rollouts[0] T_train = config.T_train return compute_nrmse(expected, actual), compute_nrmse( expected[:T_train], actual[:T_train]), compute_nrmse(expected[T_train:], actual[T_train:])
def compute_pendulum_nrmse(run: str) -> None: config, result, _ = load_run(run, 'run', 'metrics') expected = result.observations[0] _, (obs_rollouts, _), _ = compute_rollout(config, result, config.N) actual = obs_rollouts[0] print('Pendulum (Angle):', compute_nrmse(expected, actual)) expected_cosine = np.cos(expected[:, 0]) expected_sine = np.sin(expected[:, 0]) expected_xy = np.concatenate( [expected_cosine, expected_sine, expected[:, 1]], axis=0) actual_cosine = np.cos(actual[:, 0]) actual_sine = np.sin(actual[:, 0]) actual_xy = np.concatenate([actual_cosine, actual_sine, actual[:, 1]], axis=0) print('Pendulum (x/y): ', compute_nrmse(expected_xy, actual_xy))
def _evaluate_parameters_single_seed(seed: int) -> Optional[Tuple[str, float]]: config_updates = { 'seed': seed, 'latent_dim': candidate.latent_dim, 'observation_model': [f'Linear(in_features, {candidate.hidden_layer_size})', 'Tanh()', f'Linear({candidate.hidden_layer_size}, out_features)'] } try: run = run_experiment(args.data_file_name, ['with', experiment], results_dir=args.results_dir, config_updates=config_updates, debug=True) except Exception as e: print(f'HyperSearch: A run failed with an exception: {e}', file=sys.stderr) return None config = ExperimentConfig.from_dict(run.config) result = ExperimentResult.from_dict(config, run.config, run.experiment_info, run.result) _, (obs_rollouts, _), _ = compute_rollout(config, result, config.N) fitness = 0.0 for n, obs_rollout in enumerate(obs_rollouts): fitness += np.sqrt(((obs_rollout - result.observations) ** 2).mean()) return list(filter(lambda obj: isinstance(obj, FileStorageObserver), run.observers))[0].dir, fitness / len(obs_rollouts)
def main(): parser = ArgumentParser() parser.add_argument('-o', '--out_dir', default='investigation/tmp_figures') parser.add_argument('-d', '--result_dir', required=True) parser.add_argument('-r', '--runs', required=True) args = parser.parse_args() out_dir = args.out_dir result_dir = args.result_dir run_ids = args.runs.split(',') bar = progressbar.ProgressBar(widgets=[ ' Loading Runs: ', Percentage(), ' ', Bar(), ' ', ETA() ], maxval=len(run_ids)).start() runs = [] for i, run_id in enumerate(run_ids): try: runs.append( (run_id, *load_run(f'{result_dir}/{run_id}', 'run', 'metrics'))) except FileNotFoundError: print(f'No run found for id {run_id}! Ignoring.', file=sys.stderr) except NoResultsFoundException: print(f'No results found for run {run_id}! Ignoring.', file=sys.stderr) bar.update(i + 1) bar.finish() if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) bar = progressbar.ProgressBar(widgets=[ 'Calculating Rollouts: ', Percentage(), ' ', Bar(), ' ', ETA() ], maxval=len(runs)).start() data = [] for i, (run_id, config, result, _) in enumerate(runs): _, (obs_rollouts, _), _ = compute_rollout(config, result, config.N) data.append((run_id, config, result, obs_rollouts)) bar.update(i + 1) bar.finish() print('Computing energies.') runs_energies = compute_energies(data) print('Creating plots.') for (run_id, config, _, _), energies in zip(data, runs_energies): domain = np.arange(config.T) * config.h for n, (true_kinetic_energy, true_potential_energy, pred_kinetic_energy, pred_potential_energy) in enumerate(energies): with SubplotsAndSave(out_dir, f'energy-R{run_id}-N{n}-total', place_legend_outside=True) as (fig, ax): ax.plot(domain, true_kinetic_energy + true_potential_energy, color='tuda:blue', label='Truth', zorder=1) ax.plot(domain, pred_kinetic_energy + pred_potential_energy, color='tuda:orange', label='Rollout', zorder=2) ax.axvline((config.T_train - 1) * config.h, color='tuda:red', ls='dotted', label='Prediction Boundary', zorder=3) if config.N > 1: ax.set_title('Sequence %d' % (n + 1)) ax.set_xlabel(r'$t$') ax.set_ylabel('Total Energy') with SubplotsAndSave(out_dir, f'energy-R{run_id}-N{n}-kinetic', place_legend_outside=True) as (fig, ax): ax.plot(domain, true_kinetic_energy, color='tuda:blue', label='Truth', zorder=1) ax.plot(domain, pred_kinetic_energy, color='tuda:orange', label='Rollout', zorder=2) ax.axvline((config.T_train - 1) * config.h, color='tuda:red', ls='dotted', label='Prediction Boundary', zorder=3) if config.N > 1: ax.set_title('Sequence %d' % (n + 1)) ax.set_xlabel(r'$t$') ax.set_ylabel('Kinetic Energy') with SubplotsAndSave(out_dir, f'energy-R{run_id}-N{n}-potential', place_legend_outside=True) as (fig, ax): ax.plot(domain, true_potential_energy, color='tuda:blue', label='Truth', zorder=1) ax.plot(domain, pred_potential_energy, color='tuda:orange', label='Rollout', zorder=2) ax.axvline((config.T_train - 1) * config.h, color='tuda:red', ls='dotted', label='Prediction Boundary', zorder=3) if config.N > 1: ax.set_title('Sequence %d' % (n + 1)) ax.set_xlabel(r'$t$') ax.set_ylabel('Potential Energy')
def main(): parser = ArgumentParser() parser.add_argument('-o', '--out_dir', default='investigation/tmp_figures') parser.add_argument('-d', '--result_dir', required=True) parser.add_argument('-f', '--from', required=True, type=int, dest='run_from') parser.add_argument('-t', '--to', required=True, type=int, dest='run_to') parser.add_argument('-m', '--metric', default=','.join(ALL_METRICS)) parser.add_argument('-a', '--accumulation', default=','.join(ALL_ACCUMULATION_METHODS)) parser.add_argument('-x', '--ordinate') args = parser.parse_args() out_dir = args.out_dir result_dir = args.result_dir run_from = args.run_from run_to = args.run_to metric_names = args.metric.lower().split(',') accumulation_methods = args.accumulation.lower().split(',') ordinates = args.ordinate.split(',') run_ids = [str(x) for x in range(run_from, run_to + 1)] print('Reading results from %s/{%s}.' % (result_dir, ','.join(run_ids))) bar = progressbar.ProgressBar(widgets=[ ' Loading Runs: ', Percentage(), ' ', Bar(), ' ', ETA() ], maxval=len(run_ids)).start() runs = [] for i, run_id in enumerate(run_ids): try: runs.append( (run_id, *load_run(f'{result_dir}/{run_id}', 'run', 'metrics'))) except FileNotFoundError: print(f'No run found for id {run_id}! Ignoring.', file=sys.stderr) except NoResultsFoundException: print(f'No results found for run {run_id}! Ignoring.', file=sys.stderr) bar.update(i + 1) bar.finish() if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.makedirs(out_dir) bar = progressbar.ProgressBar(widgets=[ 'Calculating Rollouts: ', Percentage(), ' ', Bar(), ' ', ETA() ], maxval=len(runs)).start() data = [] for i, (run_id, config, result, metrics) in enumerate(runs): _, (obs_rollouts, _), _ = compute_rollout(config, result, config.N) obs_smoothed, _ = zip(*[ compute_observations(config, result, result.estimations_latents[n]. T, result.V_hat[n, :, :, :].transpose((2, 0, 1))) for n in range(config.N) ]) data.append( (run_id, config, result, metrics, obs_rollouts, obs_smoothed)) bar.update(i + 1) bar.finish() print('Calculating metrics.') metrics = calculate_metrics(data, metric_names, accumulation_methods) print('Saving metrics to CSV.') with open(f'{out_dir}/metrics.csv', 'w+') as fh: fh.write('run_id,%s,metric_name,accumulation_method,value\n' % ','.join(ordinates)) for metric_name, accumulation_method, _, Y in metrics: for run_id, config, y in Y: X = ','.join([ str(config.config_dict[ordinate]) for ordinate in ordinates ]) fh.write('%s,%s,%s,%s,%f\n' % (str(run_id), X, metric_name, accumulation_method, y)) print('Plotting metrics.') for ordinate in ordinates: X = [run[1].config_dict[ordinate] for run in runs] x_data = list(sorted(set(X))) for metric_name, accumulation_method, max_N, Y in metrics: y_data = [] for x in x_data: y_dat = [] for _, config, y in Y: if config.config_dict[ordinate] == x: y_dat.append(y) y_data.append(y_dat) x = np.asarray(x_data) y_mean = np.asarray([np.mean(part) for part in y_data]) y_std = np.asarray([np.std(part) for part in y_data]) with SubplotsAndSave( out_dir, f'comparison-{metric_name}-{accumulation_method}-vs-{ordinate}' ) as (fig, ax): ax.plot(x, y_mean, color='tuda:blue', label='Average', zorder=1) ax.fill_between(x, y_mean - 2 * y_std, y_mean + 2 * y_std, color='tuda:blue', alpha=0.2, label='Standard Deviation (2x)', zorder=1) ax.scatter(X, [y for _, _, y in Y], s=1, color='black', label='Data Points', zorder=2) ax.set_title( make_title(metric_name, accumulation_method, max_N)) ax.set_xlabel(make_xlabel(ordinate)) ax.set_ylabel(make_ylabel(metric_name)) ax.legend(loc=('lower right' if metric_name == METRIC_LOG_LIKELIHOOD else 'upper right'))