def viz_main(dataset_dirs: List[pathlib.Path],
             checkpoint: pathlib.Path,
             mode: str,
             batch_size: int,
             only_errors: bool,
             use_gt_rope: bool,
             old_compat: bool = False,
             **kwargs):
    stdev_pub_ = rospy.Publisher("stdev", Float32, queue_size=10)
    traj_idx_pub_ = rospy.Publisher("traj_idx_viz", Float32, queue_size=10)

    ###############
    # Model
    ###############
    trials_directory = pathlib.Path('trials').absolute()
    trial_path = checkpoint.parent.absolute()
    _, params = filepath_tools.create_or_load_trial(
        trial_path=trial_path, trials_directory=trials_directory)
    model_class = link_bot_classifiers.get_model(params['model_class'])

    ###############
    # Dataset
    ###############
    dataset = ClassifierDatasetLoader(
        dataset_dirs,
        load_true_states=True,
        use_gt_rope=use_gt_rope,
        threshold=params['classifier_dataset_hparams']['labeling_params']
        ['threshold'],
        old_compat=old_compat)
    model = model_class(hparams=params,
                        batch_size=batch_size,
                        scenario=dataset.scenario)
    tf_dataset = dataset.get_datasets(mode=mode)
    scenario = dataset.scenario

    ###############
    # Evaluate
    ###############
    tf_dataset = batch_tf_dataset(tf_dataset, batch_size, drop_remainder=True)

    model = classifier_utils.load_generic_model([checkpoint])

    for batch_idx, example in enumerate(
            progressbar(tf_dataset, widgets=base_dataset.widgets)):
        example.update(dataset.batch_metadata)
        predictions, _ = model.check_constraint_from_example(example,
                                                             training=False)

        labels = tf.expand_dims(example['is_close'][:, 1:], axis=2)

        probabilities = predictions['probabilities']

        # Visualization
        example.pop("time")
        example.pop("batch_size")
        decisions = probabilities > 0.5
        classifier_is_correct = tf.squeeze(tf.equal(decisions,
                                                    tf.cast(labels, tf.bool)),
                                           axis=-1)
        for b in range(batch_size):
            example_b = index_dict_of_batched_tensors_tf(example, b)

            # if the classifier is correct at all time steps, ignore
            if only_errors and tf.reduce_all(classifier_is_correct[b]):
                continue

            def _custom_viz_t(scenario: Base3DScenario, e: Dict, t: int):
                if t > 0:
                    accept_probability_t = predictions['probabilities'][
                        b, t - 1, 0].numpy()
                else:
                    accept_probability_t = -999
                scenario.plot_accept_probability(accept_probability_t)

                traj_idx_msg = Float32()
                traj_idx_msg.data = batch_idx * batch_size + b
                traj_idx_pub_.publish(traj_idx_msg)

            anim = RvizAnimation(scenario=scenario,
                                 n_time_steps=dataset.horizon,
                                 init_funcs=[
                                     init_viz_env,
                                     dataset.init_viz_action(),
                                 ],
                                 t_funcs=[
                                     _custom_viz_t,
                                     dataset.classifier_transition_viz_t(),
                                     ExperimentScenario.plot_stdev_t,
                                 ])
            with open("debugging.hjson", 'w') as f:
                my_hdump(numpify(example_b), f)
            anim.play(example_b)
def save_order(outdir: pathlib.Path, subfolders_ordered: List[pathlib.Path]):
    sort_order_filename = outdir / 'sort_order.txt'
    with sort_order_filename.open("w") as sort_order_file:
        my_hdump(subfolders_ordered, sort_order_file)
示例#3
0
def make_recovery_dataset_from_params_dict(dataset_dir: pathlib.Path,
                                           fwd_model_dir,
                                           classifier_model_dir: pathlib.Path,
                                           labeling_params: Dict,
                                           outdir: pathlib.Path,
                                           batch_size: int,
                                           use_gt_rope: bool,
                                           start_at: Optional = None,
                                           stop_at: Optional = None):
    # append "best_checkpoint" before loading
    classifier_model_dir = classifier_model_dir / 'best_checkpoint'
    if not isinstance(fwd_model_dir, List):
        fwd_model_dir = [fwd_model_dir]
    fwd_model_dir = [p / 'best_checkpoint' for p in fwd_model_dir]

    np.random.seed(0)
    tf.random.set_seed(0)

    dynamics_hparams = hjson.load((dataset_dir / 'hparams.hjson').open('r'))
    fwd_model, _ = dynamics_utils.load_generic_model(fwd_model_dir)

    dataset = DynamicsDatasetLoader([dataset_dir], use_gt_rope=use_gt_rope)

    outdir.mkdir(exist_ok=True)
    print(Fore.GREEN + f"Making recovery dataset {outdir.as_posix()}")
    new_hparams_filename = outdir / 'hparams.hjson'
    recovery_dataset_hparams = dynamics_hparams

    scenario = fwd_model.scenario
    if not isinstance(classifier_model_dir, List):
        classifier_model_dir = [classifier_model_dir]
    classifier_model = classifier_utils.load_generic_model(
        classifier_model_dir, scenario)

    recovery_dataset_hparams['dataset_dir'] = dataset_dir
    recovery_dataset_hparams['fwd_model_dir'] = fwd_model_dir
    recovery_dataset_hparams['classifier_model'] = classifier_model_dir
    recovery_dataset_hparams['fwd_model_hparams'] = fwd_model.hparams
    recovery_dataset_hparams['labeling_params'] = labeling_params
    recovery_dataset_hparams['state_keys'] = fwd_model.state_keys
    recovery_dataset_hparams['action_keys'] = fwd_model.action_keys
    recovery_dataset_hparams['start-at'] = start_at
    recovery_dataset_hparams['stop-at'] = stop_at
    my_hdump(recovery_dataset_hparams,
             new_hparams_filename.open("w"),
             indent=2)

    outdir.mkdir(parents=True, exist_ok=True)

    start_at = progress_point(start_at)
    stop_at = progress_point(stop_at)

    modes = ['train', 'val', 'test']
    for mode in modes:
        if start_at is not None and modes.index(mode) < modes.index(
                start_at[0]):
            continue
        if stop_at is not None and modes.index(mode) > modes.index(stop_at[0]):
            continue

        tf_dataset_for_mode = dataset.get_datasets(mode=mode)

        full_output_directory = outdir / mode
        full_output_directory.mkdir(parents=True, exist_ok=True)

        # figure out that record_idx to start at
        record_idx = count_up_to_next_record_idx(full_output_directory)

        # FIXME: start_at is not implemented correctly in the sense that it shouldn't be the same
        #  across train/val/test
        for out_example in generate_recovery_examples(
                tf_dataset=tf_dataset_for_mode,
                modes=modes,
                mode=mode,
                fwd_model=fwd_model,
                classifier_model=classifier_model,
                dataset=dataset,
                labeling_params=labeling_params,
                batch_size=batch_size,
                start_at=start_at,
                stop_at=stop_at):
            # FIXME: is there an extra time/batch dimension?
            for batch_idx in range(out_example['traj_idx'].shape[0]):
                out_example_b = index_dict_of_batched_tensors_tf(
                    out_example, batch_idx)

                # # BEGIN DEBUG
                # from link_bot_data.visualization import init_viz_env, recovery_transition_viz_t, init_viz_action
                # from copy import deepcopy
                #
                # viz_out_example_b = deepcopy(out_example_b)
                # recovery_probability = compute_recovery_probabilities(viz_out_example_b['accept_probabilities'],
                #                                                       labeling_params['n_action_samples'])
                # viz_out_example_b['recovery_probability'] = recovery_probability
                # anim = RvizAnimation(scenario=scenario,
                #                      n_time_steps=labeling_params['action_sequence_horizon'],
                #                      init_funcs=[init_viz_env,
                #                                  init_viz_action(dataset.scenario_metadata, fwd_model.action_keys,
                #                                                  fwd_model.state_keys),
                #                                  ],
                #                      t_funcs=[init_viz_env,
                #                               recovery_transition_viz_t(dataset.scenario_metadata,
                #                                                         fwd_model.state_keys),
                #                               lambda s, e, t: scenario.plot_recovery_probability_t(e, t),
                #                               ])
                # anim.play(viz_out_example_b)
                # # END DEBUG

                tf_write_example(full_output_directory, out_example_b,
                                 record_idx)
                record_idx += 1

    return outdir
    def collect_data(
        self,
        n_trajs: int,
        nickname: str,
        robot_namespace: str,
    ):
        outdir = pathlib.Path('fwd_model_data') / nickname
        full_output_directory = data_directory(outdir, n_trajs)

        files_dataset = FilesDataset(full_output_directory)

        full_output_directory.mkdir(exist_ok=True)
        print(Fore.GREEN + full_output_directory.as_posix() + Fore.RESET)

        s_for_size = self.scenario.get_state()
        a_for_size = self.scenario.sample_action(
            action_rng=np.random.RandomState(0),
            environment={},
            state=s_for_size,
            action_params=self.params,
            validate=False)
        state_description = {k: v.shape[0] for k, v in s_for_size.items()}
        action_description = {k: v.shape[0] for k, v in a_for_size.items()}

        dataset_hparams = {
            'nickname': nickname,
            'robot_namespace': robot_namespace,
            'seed': self.seed,
            'n_trajs': n_trajs,
            'data_collection_params': self.params,
            'scenario': self.scenario_name,
            # FIXME: rename this key?
            'scenario_metadata': self.scenario.dynamics_dataset_metadata(),
            'state_description': state_description,
            'action_description': action_description,
        }
        with (full_output_directory /
              'hparams.hjson').open('w') as dataset_hparams_file:
            my_hdump(dataset_hparams, dataset_hparams_file, indent=2)

        self.scenario.randomization_initialization()
        self.scenario.on_before_data_collection(self.params)

        t0 = perf_counter()

        combined_seeds = [
            traj_idx + 100000 * self.seed for traj_idx in range(n_trajs)
        ]
        for traj_idx, seed in enumerate(combined_seeds):
            # combine the trajectory idx and the overall "seed" to make a unique seed for each trajectory/seed pair
            env_rng = np.random.RandomState(seed)
            action_rng = np.random.RandomState(seed)

            # Randomize the environment
            randomize = self.params[
                "randomize_n"] and traj_idx % self.params["randomize_n"] == 0
            needs_reset = self.scenario.needs_reset()
            if (not self.params['no_objects'] and randomize) or needs_reset:
                if needs_reset:
                    rospy.logwarn("Reset required!")
                self.scenario.randomize_environment(env_rng, self.params)

            # Generate a new trajectory
            example = self.collect_trajectory(traj_idx=traj_idx,
                                              verbose=self.verbose,
                                              action_rng=action_rng)
            print(
                f'traj {traj_idx}/{n_trajs} ({seed}), {perf_counter() - t0:.4f}s'
            )

            # Save the data
            full_filename = self.write_example(full_output_directory, example,
                                               traj_idx)
            files_dataset.add(full_filename)

        self.scenario.on_after_data_collection(self.params)

        print(Fore.GREEN + full_output_directory.as_posix() + Fore.RESET)

        return files_dataset
def make_classifier_dataset_from_params_dict(dataset_dir: pathlib.Path,
                                             fwd_model_dir: List[pathlib.Path],
                                             labeling_params: Dict,
                                             outdir: pathlib.Path,
                                             use_gt_rope: bool,
                                             visualize: bool,
                                             take: Optional[int] = None,
                                             batch_size: Optional[int] = None,
                                             start_at: Optional[int] = None,
                                             stop_at: Optional[int] = None):
    # append "best_checkpoint" before loading
    if not isinstance(fwd_model_dir, List):
        fwd_model_dir = [fwd_model_dir]
    fwd_model_dir = [p / 'best_checkpoint' for p in fwd_model_dir]

    dynamics_hparams = hjson.load((dataset_dir / 'hparams.hjson').open('r'))
    fwd_models, _ = dynamics_utils.load_generic_model(fwd_model_dir)

    dataset = DynamicsDatasetLoader([dataset_dir], use_gt_rope=use_gt_rope)

    new_hparams_filename = outdir / 'hparams.hjson'
    classifier_dataset_hparams = dynamics_hparams

    classifier_dataset_hparams['dataset_dir'] = dataset_dir.as_posix()
    classifier_dataset_hparams['fwd_model_hparams'] = fwd_models.hparams
    classifier_dataset_hparams['labeling_params'] = labeling_params
    classifier_dataset_hparams['true_state_keys'] = dataset.state_keys
    classifier_dataset_hparams['predicted_state_keys'] = fwd_models.state_keys
    classifier_dataset_hparams['action_keys'] = dataset.action_keys
    classifier_dataset_hparams['scenario_metadata'] = dataset.hparams[
        'scenario_metadata']
    classifier_dataset_hparams['start-at'] = start_at
    classifier_dataset_hparams['stop-at'] = stop_at
    my_hdump(classifier_dataset_hparams,
             new_hparams_filename.open("w"),
             indent=2)

    # because we're currently making this dataset, we can't call "get_dataset" but we can still use it to visualize
    classifier_dataset_for_viz = ClassifierDatasetLoader(
        [outdir], use_gt_rope=use_gt_rope)

    t0 = perf_counter()
    total_example_idx = 0
    for mode in ['train', 'val', 'test']:
        tf_dataset = dataset.get_datasets(mode=mode, take=take)

        full_output_directory = outdir / mode
        full_output_directory.mkdir(parents=True, exist_ok=True)

        out_examples_gen = generate_classifier_examples(
            fwd_models, tf_dataset, dataset, labeling_params, batch_size)
        for out_examples in out_examples_gen:
            for out_examples_for_start_t in out_examples:
                actual_batch_size = out_examples_for_start_t['traj_idx'].shape[
                    0]
                for batch_idx in range(actual_batch_size):
                    out_example_b = index_dict_of_batched_tensors_tf(
                        out_examples_for_start_t, batch_idx)

                    if out_example_b['time_idx'].ndim == 0:
                        continue

                    if visualize:
                        add_label(out_example_b, labeling_params['threshold'])
                        classifier_dataset_for_viz.anim_transition_rviz(
                            out_example_b)

                    tf_write_example(full_output_directory, out_example_b,
                                     total_example_idx)
                    rospy.loginfo_throttle(
                        10,
                        f"Examples: {total_example_idx:10d}, Time: {perf_counter() - t0:.3f}"
                    )
                    total_example_idx += 1

    return outdir