Пример #1
0
def create_figure(
        report: HTMLReport,
        target_poses,
        *list_of_vector_fields
):
    num_vfs = len(list_of_vector_fields)
    width = 7
    height = 7 * num_vfs
    for i, target_pos in enumerate(target_poses):
        fig, axes = plt.subplots(
            num_vfs, figsize=(width, height)
        )
        for j, vf in enumerate([vfs[i] for vfs in list_of_vector_fields]):
            # `heatmaps` is now a list of heatmaps, such that
            # heatmaps[k] = list_of_list_of_heatmaps[k][i]
            min_pos = max(target_pos - WaterMaze.TARGET_RADIUS,
                          -WaterMaze.BOUNDARY_DIST)
            max_pos = min(target_pos + WaterMaze.TARGET_RADIUS,
                          WaterMaze.BOUNDARY_DIST)

            """
            Plot Estimated & Optimal QF
            """
            ax = axes[j]
            vu.plot_vector_field(fig, ax, vf)
            ax.vlines([min_pos, max_pos], *ax.get_ylim())
            ax.set_xlabel("Position")
            ax.set_ylabel("Velocity")
            ax.set_title("{0}. t = {1}. Target X Pos = {2}".format(
                vf.info['title'],
                vf.info['time'],
                vf.info['target_pos'],
            ))
        img = vu.save_image(fig)
        report.add_image(img, "Target Position = {}".format(target_pos))
Пример #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("folder_path", type=str)
    args = parser.parse_args()
    base_dir = Path(os.getcwd())
    base_dir = base_dir / args.folder_path

    path_and_iter = get_path_and_iters(base_dir)

    resolution = 20
    state_bounds = (-WaterMaze.BOUNDARY_DIST, WaterMaze.BOUNDARY_DIST)
    action_bounds = (-1, 1)
    num_target_poses = 5
    target_poses = np.linspace(*state_bounds, num_target_poses)

    report = HTMLReport(
        str(base_dir / 'report.html'), images_per_row=num_target_poses
    )

    report.add_header("test_header")
    report.add_text("test")
    for path, iter_number in path_and_iter:
        data = joblib.load(str(path))
        qf = data['qf']
        print("QF loaded from iteration %d" % iter_number)

        list_of_vector_fields = []
        for time in [0, 24]:
            vector_fields = []
            for target_pos in target_poses:
                qf_vector_field_eval = create_qf_derivative_eval_fnct(
                    qf, target_pos, time
                )
                vector_fields.append(vu.make_vector_field(
                    qf_vector_field_eval,
                    x_bounds=state_bounds,
                    y_bounds=action_bounds,
                    resolution=resolution,
                    info=dict(
                        time=time,
                        target_pos=target_pos,
                        title="Estimated QF and dQ/da",
                    )
                ))
            list_of_vector_fields.append(vector_fields)

        report.add_text("Iteration = {0}".format(iter_number))
        create_figure(
            report,
            target_poses,
            *list_of_vector_fields,
        )
        report.new_row()
Пример #3
0
def main():
    ptu.set_gpu_mode(True)

    obs_dim = 1
    action_dim = 1
    batch_size = 100

    model = NAF(obs_dim, action_dim)
    # model = SeparateDuelingFF(obs_dim, action_dim)
    # model = ConcatFF(obs_dim, action_dim)
    # model = OuterProductFF(obs_dim, action_dim)
    version = model.__class__.__name__
    version = "NAF-P-depends-on-embedded"

    optimizer = optim.SGD(model.parameters(), lr=1e-7, momentum=0.5)
    loss_fnct = nn.MSELoss()

    num_batches_per_print = 100
    train_size = 100000
    test_size = 10000

    state_bounds = (-10, 10)
    action_bounds = (-10, 10)
    resolution = 20

    base_dir = Path(
        "/home/vitchyr/git/rllab-rail/railrl/data/one-offs/polynomial-nn")
    base_dir = base_dir / version
    if not base_dir.exists():
        base_dir.mkdir()
    report_path = str(base_dir / "report.html")
    report = HTMLReport(report_path, images_per_row=2)
    print("Saving report to: {}".format(report_path))

    train_loader = data.DataLoader(FakeDataset(obs_dim, action_dim, train_size,
                                               state_bounds, action_bounds),
                                   batch_size=batch_size,
                                   shuffle=True)
    test_loader = data.DataLoader(FakeDataset(obs_dim, action_dim, test_size,
                                              state_bounds, action_bounds),
                                  batch_size=batch_size,
                                  shuffle=True)

    model.to(ptu.device)

    def eval_model(state, action):
        state = ptu.Variable(state, requires_grad=False)
        action = ptu.Variable(action, requires_grad=False)
        a, v = model(state, action)
        return a + v

    def train(epoch):
        for batch_idx, (state, action, q_target) in enumerate(train_loader):
            q_estim = eval_model(state, action)
            q_target = ptu.Variable(q_target, requires_grad=False)

            loss = loss_fnct(q_estim, q_target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if batch_idx % num_batches_per_print == 0:
                line_logger.print_over(
                    'Train Epoch: {} [{}/{}]\tLoss: {:.6f}'.format(
                        epoch, batch_size * batch_idx, train_size,
                        loss.data[0]))

    def test(epoch):
        test_losses = []
        for state, action, q_target in test_loader:
            q_estim = eval_model(state, action)
            q_target = ptu.Variable(q_target, requires_grad=False)
            loss = loss_fnct(q_estim, q_target)
            test_losses.append(loss.data[0])

        line_logger.newline()
        print('Test Epoch: {0}. Loss: {1}'.format(epoch, np.mean(test_losses)))

        report.add_header("Epoch = {}".format(epoch))

        fig = visualize_model(q_function, "True Q Function")
        img = vu.save_image(fig)
        report.add_image(img, txt='True Q Function')

        fig = visualize_model(eval_model_np, "Estimated Q Function")
        img = vu.save_image(fig)
        report.add_image(img, txt='Estimated Q Function')

        report.new_row()

    def eval_model_np(state, action):
        state = ptu.Variable(ptu.FloatTensor([[state]]), requires_grad=False)
        action = ptu.Variable(ptu.FloatTensor([[action]]), requires_grad=False)
        a, v = model(state, action)
        q = a + v
        return ptu.get_numpy(q)[0]

    def visualize_model(eval, title):
        fig = plt.figure()
        ax = plt.gca()
        heatmap = vu.make_heat_map(
            eval,
            x_bounds=state_bounds,
            y_bounds=action_bounds,
            resolution=resolution,
        )

        vu.plot_heatmap(heatmap, fig, ax)
        ax.set_xlabel("State")
        ax.set_ylabel("Action")
        ax.set_title(title)

        return fig

    for epoch in range(0, 10):
        model.train()
        train(epoch)
        model.eval()
        test(epoch)

    print("Report saved to: {}".format(report_path))
Пример #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('dir', type=str)
    parser.add_argument('--report_name', type=str,
                        default='report_retroactive.html')

    args = parser.parse_args()
    directory = args.dir
    report_name = args.report_name

    with open(join(directory, 'variant.json')) as variant_file:
        variant = json.load(variant_file)
    skew_config = get_key_recursive(variant, 'skew_config')
    pkl_paths = glob.glob(directory + '/*.pkl')
    numbered_paths = append_itr(pkl_paths)
    ordered_numbered_paths = sorted(numbered_paths, key=lambda x: x[1])

    report = HTMLReport(join(directory, report_name), images_per_row=5)

    vae_heatmap_imgs = []
    sample_imgs = []
    for path, itr in ordered_numbered_paths:
        print("Processing iteration {}".format(itr))
        snapshot = pickle.load(open(path, "rb"))
        if 'vae' in snapshot:
            vae = snapshot['vae']
        else:
            vae = snapshot['p_theta']
        vae.to('cpu')
        vae_train_data = snapshot['train_data']
        dynamics = snapshot.get('dynamics', project_square_border_np_4x4)
        report.add_header("Iteration {}".format(itr))
        vae.xy_range = ((-4, 4), (-4, 4))
        vae_heatmap_img = visualize_vae_samples(
            itr,
            vae_train_data,
            vae,
            report,
            xlim=vae.get_plot_ranges()[0],
            ylim=vae.get_plot_ranges()[1],
            dynamics=dynamics,
        )
        sample_img = visualize_vae(
            vae,
            skew_config,
            report,
            title="Post-skew",
        )
        vae_heatmap_imgs.append(vae_heatmap_img)
        sample_imgs.append(sample_img)

    report.add_header("Summary GIFs")
    for filename, imgs in [
        ("vae_heatmaps", vae_heatmap_imgs),
        ("samples", sample_imgs),
    ]:
        video = np.stack(imgs)
        vwrite(
            '{}/{}.mp4'.format(directory, filename),
            video,
        )
        gif_file_path = '{}/{}.gif'.format(directory, filename)
        gif(gif_file_path, video)
        report.add_image(gif_file_path, txt=filename, is_url=True)

    report.save()
    print("Report saved to")
    print(report.path)
Пример #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("folder_path", type=str)
    # parser.add_argument("--num_iters", type=int)
    args = parser.parse_args()
    base = Path(os.getcwd())
    base = base / args.folder_path

    path_and_iter = get_path_and_iters(base)

    resolution = 20
    x_bounds = (-1, 1)
    y_bounds = (-1, 1)

    report = HTMLReport(str(base / 'report.html'), images_per_row=1)

    # for path, itr in takespread(path_and_iter, args):
    for path, itr in [path_and_iter[-1]]:
        report.add_text("Path: %s" % path)
        print("Loading: %s" % path)
        data = joblib.load(str(path))
        qf = data['qf']
        env = data['env']
        qf.train(False)

        start_state = env.reset()
        report.add_text("Start State = {}".format(start_state))
        report.add_text("Start XY = {}".format(
            position_from_angles(np.expand_dims(start_state, 0))))
        goal_states = [start_state]
        goal_states += [env.sample_goal_for_rollout() for _ in range(5)]
        for goal_state in goal_states:
            qf_eval = create_qf_eval_fnct(qf, start_state, goal_state)
            qf_heatmap = vu.make_heat_map(
                qf_eval,
                x_bounds=x_bounds,
                y_bounds=y_bounds,
                resolution=resolution,
            )

            fig = create_figure(
                ['Estimated'],
                [qf_heatmap],
            )
            img = vu.save_image(fig)
            report.add_image(
                img, "Goal State = {}\nGoal XY = {}".format(
                    goal_state,
                    position_from_angles(np.expand_dims(goal_state, 0))))

    abs_path = osp.abspath(report.path)
    print("Report saved to: {}".format(abs_path))
    report.save()
    open_report = query_yes_no("Open report?", default="yes")
    if open_report:
        cmd = "xdg-open {}".format(abs_path)
        print(cmd)
        subprocess.call(cmd, shell=True)
Пример #6
0
def train(dataset_generator,
          n_start_samples,
          projection=project_samples_square_np,
          n_samples_to_add_per_epoch=1000,
          n_epochs=100,
          save_period=10,
          append_all_data=True,
          full_variant=None,
          dynamics_noise=0,
          num_bins=5,
          weight_type='sqrt_inv_p',
          **kwargs):
    report = HTMLReport(
        logger.get_snapshot_dir() + '/report.html',
        images_per_row=3,
    )
    dynamics = Dynamics(projection, dynamics_noise)
    if full_variant:
        report.add_header("Variant")
        report.add_text(
            json.dumps(
                ppp.dict_to_safe_json(full_variant, sort=True),
                indent=2,
            ))

    orig_train_data = dataset_generator(n_start_samples)
    train_data = orig_train_data

    heatmap_imgs = []
    sample_imgs = []
    entropies = []
    tvs_to_uniform = []
    """
    p_theta = previous iteration's model
    p_new = this iteration's distribution
    """
    p_theta = Histogram(num_bins, weight_type=weight_type)
    for epoch in range(n_epochs):
        logger.record_tabular('Epoch', epoch)
        logger.record_tabular('Entropy ', p_theta.entropy())
        logger.record_tabular('KL from uniform', p_theta.kl_from_uniform())
        logger.record_tabular('TV to uniform', p_theta.tv_to_uniform())
        entropies.append(p_theta.entropy())
        tvs_to_uniform.append(p_theta.tv_to_uniform())

        samples = p_theta.sample(n_samples_to_add_per_epoch)
        empirical_samples = dynamics(samples)

        if append_all_data:
            train_data = np.vstack((train_data, empirical_samples))
        else:
            train_data = np.vstack((orig_train_data, empirical_samples))

        if epoch == 0 or (epoch + 1) % save_period == 0:
            report.add_text("Epoch {}".format(epoch))
            heatmap_img = visualize_histogram(epoch, p_theta, report)
            sample_img = visualize_samples(epoch, train_data, p_theta, report,
                                           dynamics)
            heatmap_imgs.append(heatmap_img)
            sample_imgs.append(sample_img)
            report.save()

            from PIL import Image
            Image.fromarray(heatmap_img).save(logger.get_snapshot_dir() +
                                              '/heatmap{}.png'.format(epoch))
            Image.fromarray(sample_img).save(logger.get_snapshot_dir() +
                                             '/samples{}.png'.format(epoch))
        weights = p_theta.compute_per_elem_weights(train_data)
        p_new = Histogram(num_bins, weight_type=weight_type)
        p_new.fit(
            train_data,
            weights=weights,
        )
        p_theta = p_new
        logger.dump_tabular()
    plot_curves([
        ("Entropy", entropies),
        ("TVs to Uniform", tvs_to_uniform),
    ], report)
    report.add_text("Max entropy: {}".format(p_theta.max_entropy()))
    report.save()

    heatmap_video = np.stack(heatmap_imgs)
    sample_video = np.stack(sample_imgs)

    vwrite(
        logger.get_snapshot_dir() + '/heatmaps.mp4',
        heatmap_video,
    )
    vwrite(
        logger.get_snapshot_dir() + '/samples.mp4',
        sample_video,
    )
    try:
        gif(
            logger.get_snapshot_dir() + '/samples.gif',
            sample_video,
        )
        gif(
            logger.get_snapshot_dir() + '/heatmaps.gif',
            heatmap_video,
        )
        report.add_image(
            logger.get_snapshot_dir() + '/samples.gif',
            "Samples GIF",
            is_url=True,
        )
        report.add_image(
            logger.get_snapshot_dir() + '/heatmaps.gif',
            "Heatmaps GIF",
            is_url=True,
        )
        report.save()
    except ImportError as e:
        print(e)
def train(
        dataset_generator,
        n_start_samples,
        projection=project_samples_square_np,
        n_samples_to_add_per_epoch=1000,
        n_epochs=100,
        z_dim=1,
        hidden_size=32,
        save_period=10,
        append_all_data=True,
        full_variant=None,
        dynamics_noise=0,
        decoder_output_var='learned',
        num_bins=5,
        skew_config=None,
        use_perfect_samples=False,
        use_perfect_density=False,
        vae_reset_period=0,
        vae_kwargs=None,
        use_dataset_generator_first_epoch=True,
        **kwargs
):

    """
    Sanitize Inputs
    """
    assert skew_config is not None
    if not (use_perfect_density and use_perfect_samples):
        assert vae_kwargs is not None
    if vae_kwargs is None:
        vae_kwargs = {}

    report = HTMLReport(
        logger.get_snapshot_dir() + '/report.html',
        images_per_row=10,
    )
    dynamics = Dynamics(projection, dynamics_noise)
    if full_variant:
        report.add_header("Variant")
        report.add_text(
            json.dumps(
                ppp.dict_to_safe_json(
                    full_variant,
                    sort=True),
                indent=2,
            )
        )

    vae, decoder, decoder_opt, encoder, encoder_opt = get_vae(
        decoder_output_var,
        hidden_size,
        z_dim,
        vae_kwargs,
    )
    vae.to(ptu.device)

    epochs = []
    losses = []
    kls = []
    log_probs = []
    hist_heatmap_imgs = []
    vae_heatmap_imgs = []
    sample_imgs = []
    entropies = []
    tvs_to_uniform = []
    entropy_gains_from_reweighting = []
    p_theta = Histogram(num_bins)
    p_new = Histogram(num_bins)

    orig_train_data = dataset_generator(n_start_samples)
    train_data = orig_train_data
    start = time.time()
    for epoch in progressbar(range(n_epochs)):
        p_theta = Histogram(num_bins)
        if epoch == 0 and use_dataset_generator_first_epoch:
            vae_samples = dataset_generator(n_samples_to_add_per_epoch)
        else:
            if use_perfect_samples and epoch != 0:
                # Ideally the VAE = p_new, but in practice, it won't be...
                vae_samples = p_new.sample(n_samples_to_add_per_epoch)
            else:
                vae_samples = vae.sample(n_samples_to_add_per_epoch)
        projected_samples = dynamics(vae_samples)
        if append_all_data:
            train_data = np.vstack((train_data, projected_samples))
        else:
            train_data = np.vstack((orig_train_data, projected_samples))

        p_theta.fit(train_data)
        if use_perfect_density:
            prob = p_theta.compute_density(train_data)
        else:
            prob = vae.compute_density(train_data)
        all_weights = prob_to_weight(prob, skew_config)
        p_new.fit(train_data, weights=all_weights)
        if epoch == 0 or (epoch + 1) % save_period == 0:
            epochs.append(epoch)
            report.add_text("Epoch {}".format(epoch))
            hist_heatmap_img = visualize_histogram(p_theta, skew_config, report)
            vae_heatmap_img = visualize_vae(
                vae, skew_config, report,
                resolution=num_bins,
            )
            sample_img = visualize_vae_samples(
                epoch, train_data, vae, report, dynamics,
            )

            visualize_samples(
                p_theta.sample(n_samples_to_add_per_epoch),
                report,
                title="P Theta/RB Samples",
            )
            visualize_samples(
                p_new.sample(n_samples_to_add_per_epoch),
                report,
                title="P Adjusted Samples",
            )
            hist_heatmap_imgs.append(hist_heatmap_img)
            vae_heatmap_imgs.append(vae_heatmap_img)
            sample_imgs.append(sample_img)
            report.save()

            Image.fromarray(hist_heatmap_img).save(
                logger.get_snapshot_dir() + '/hist_heatmap{}.png'.format(epoch)
            )
            Image.fromarray(vae_heatmap_img).save(
                logger.get_snapshot_dir() + '/hist_heatmap{}.png'.format(epoch)
            )
            Image.fromarray(sample_img).save(
                logger.get_snapshot_dir() + '/samples{}.png'.format(epoch)
            )

        """
        train VAE to look like p_new
        """
        if sum(all_weights) == 0:
            all_weights[:] = 1
        if vae_reset_period > 0 and epoch % vae_reset_period == 0:
            vae, decoder, decoder_opt, encoder, encoder_opt = get_vae(
                decoder_output_var,
                hidden_size,
                z_dim,
                vae_kwargs,
            )
            vae.to(ptu.device)
        vae.fit(train_data, weights=all_weights)
        epoch_stats = vae.get_epoch_stats()

        losses.append(np.mean(epoch_stats['losses']))
        kls.append(np.mean(epoch_stats['kls']))
        log_probs.append(np.mean(epoch_stats['log_probs']))
        entropies.append(p_theta.entropy())
        tvs_to_uniform.append(p_theta.tv_to_uniform())
        entropy_gain = p_new.entropy() - p_theta.entropy()
        entropy_gains_from_reweighting.append(entropy_gain)

        for k in sorted(epoch_stats.keys()):
            logger.record_tabular(k, epoch_stats[k])

        logger.record_tabular("Epoch", epoch)
        logger.record_tabular('Entropy ', p_theta.entropy())
        logger.record_tabular('KL from uniform', p_theta.kl_from_uniform())
        logger.record_tabular('TV to uniform', p_theta.tv_to_uniform())
        logger.record_tabular('Entropy gain from reweight', entropy_gain)
        logger.record_tabular('Total Time (s)', time.time() - start)
        logger.dump_tabular()
        logger.save_itr_params(epoch, {
            'vae': vae,
            'train_data': train_data,
            'vae_samples': vae_samples,
            'dynamics': dynamics,
        })

    report.add_header("Training Curves")
    plot_curves(
        [
            ("Training Loss", losses),
            ("KL", kls),
            ("Log Probs", log_probs),
            ("Entropy Gain from Reweighting", entropy_gains_from_reweighting),
        ],
        report,
    )
    plot_curves(
        [
            ("Entropy", entropies),
            ("TV to Uniform", tvs_to_uniform),
        ],
        report,
    )
    report.add_text("Max entropy: {}".format(p_theta.max_entropy()))
    report.save()

    for filename, imgs in [
        ("hist_heatmaps", hist_heatmap_imgs),
        ("vae_heatmaps", vae_heatmap_imgs),
        ("samples", sample_imgs),
    ]:
        video = np.stack(imgs)
        vwrite(
            logger.get_snapshot_dir() + '/{}.mp4'.format(filename),
            video,
        )
        local_gif_file_path = '{}.gif'.format(filename)
        gif_file_path = '{}/{}'.format(
            logger.get_snapshot_dir(),
            local_gif_file_path
        )
        gif(gif_file_path, video)
        report.add_image(local_gif_file_path, txt=filename, is_url=True)
    report.save()
def generate_report(fanova_info: FanovaInfo, base_dir, param_name_to_log=None):
    if param_name_to_log is None:
        param_name_to_log = {}
    f, config_space, X, Y, categorical_remapping, variants_list = fanova_info
    report = HTMLReport(
        osp.join(base_dir, 'report.html'),
        images_per_row=3,
    )

    vis = visualizer.Visualizer(f, config_space)
    cs_params = config_space.get_hyperparameters()
    importances = [get_param_importance(f, param) for param in cs_params]
    are_logs = [
        is_data_log_uniformly_distributed(X[:, i])
        for i in range(len(cs_params))
    ]
    data = sorted(
        zip(cs_params, importances, are_logs),
        key=lambda x: -x[1],
    )
    """
    List out how the categorical hyperparameters were mapped.
    """
    for name, remapping in categorical_remapping.items():
        report.add_text("Remapping for {}:".format(name))
        for key, value in remapping.inverse_dict.items():
            report.add_text("\t{} = {}\n".format(key, value))
    """
    Plot individual marginals.
    """
    print("Creating marginal plots")
    for param, importance, is_log in data:
        param_name = param.name
        if param_name in param_name_to_log:
            is_log = param_name_to_log[param_name]
        if isinstance(param, CategoricalHyperparameter):
            vis.plot_categorical_marginal(param_name, show=False)
        else:
            vis.plot_marginal(param_name, show=False, log_scale=is_log)
        img = vu.save_image()
        report.add_image(
            img,
            "Marginal for {}.\nImportance = {}".format(param_name, importance),
        )
    """
    Plot pairwise marginals.
    """
    print("Creating pairwise-marginal plots")
    num_params = len(cs_params)
    num_pairs = num_params * (num_params + 1) // 2
    pair_and_importance = (f.get_most_important_pairwise_marginals(num_pairs))
    for combi, importance in pair_and_importance.items():
        param_names = []
        for p in combi:
            param_names.append(cs_params[p].name)
        info_text = "Pairwise Marginal for {}.\nImportance = {}".format(
            param_names,
            importance,
        ),
        if any(is_categorical(f, name) for name in param_names):
            report.add_text(info_text)
            continue
        plot_pairwise_marginal(vis, combi, show=False)
        img = vu.save_image()
        report.add_image(
            img,
            txt=info_text,
        )
    """
    List the top 10 parameters.
    """
    N = min(10, len(Y))
    Y[np.isnan(Y)] = np.nanmin(Y) - 1
    best_idxs = Y.argsort()[-N:][::-1]
    all_param_names = [p.name for p in config_space.get_hyperparameters()]
    for rank, i in enumerate(best_idxs):
        variant = variants_list[i]
        report.add_text("Rank {} params, with score = {}:".format(
            rank + 1, Y[i]))
        for name, value in zip(all_param_names, X[i, :]):
            report.add_text("\t{} = {}\n".format(name, value))
        report.add_text("\texp_name = {}\n".format(variant['exp_name']))
        report.add_text("\tunique_id = {}\n".format(variant['unique_id']))

    print("Guesses for is_log")
    print("{")
    for param, _, is_log in data:
        name = param.name
        print("    '{}': {},".format(name, is_log))
    print("}")
    """
    Ask user if they want to see the report
    """
    abs_path = osp.abspath(report.path)
    print("Report saved to: {}".format(abs_path))
    report.save()
    open_report = query_yes_no("Open report?", default="yes")
    if open_report:
        cmd = "xdg-open {}".format(abs_path)
        print(cmd)
        subprocess.call(cmd, shell=True)