def test_trajectory_plan():
    d = load_model_and_data()
    controller = ZMQController(
        config=d['config'],
        model_dy=d['model_dy'],
        action_function=d['action_function'],
        observation_function=d['observation_function'],
        visual_observation_function=d['visual_observation_function'],
        planner=d['planner'],
        debug=True,
        zmq_enabled=True,
        camera_info=d['camera_info'],
    )

    plan_msg_file = "/home/manuelli/data/key_dynam/hardware_experiments/demonstrations/stable/2020-07-09-20-27-09_push_right_blue_tapes/plan_msg.p"
    plan_msg = load_pickle(plan_msg_file, encoding='latin1')
    plan_msg = zmq_utils.convert(plan_msg)

    controller._on_plan_msg(plan_msg)
    traj_goal = controller._state_dict['plan'].get_trajectory_goal(15, 5)
    print("traj_goal.shape", traj_goal.shape)

    plan_msg_file = "/home/manuelli/data/key_dynam/hardware_experiments/demonstrations/stable/2020-07-09-20-27-09_push_right_blue_tapes/plan_msg.p"
    plan_msg = load_pickle(plan_msg_file, encoding='latin1')
    plan_msg = zmq_utils.convert(plan_msg)

    for i in range(4):
        compute_control_action_msg = {
            'type': "COMPUTE_CONTROL_ACTION",
            'data': plan_msg['data']['plan_data'][i]
        }
        controller._on_compute_control_action(compute_control_action_msg,
                                              visualize=True)
        input("press Enter to continue")
Beispiel #2
0
def load_model_state_dict(model_folder=None):

    models_root = os.path.join(get_data_root(),
                               "dev/experiments/drake_pusher_slider_v2")

    # model_name = "dataset_2020-04-23-20-45-12-697915_T_aug_random_velocity_1000_angled_cam/trained_models/dynamics/DD_3D/2020-05-12-12-03-05-252242_DD_3D_spatial_z_n_his_2"

    model_name = "dataset_2020-04-23-20-45-12-697915_T_aug_random_velocity_1000_angled_cam/trained_models/dynamics/DD_3D/2020-05-11-19-44-35-085478_DD_3D_n_his_2"

    model_folder = os.path.join(models_root, model_name)

    model_dy = model_builder.load_dynamics_model_from_folder(
        model_folder)['model_dy']

    # load dense descriptor model
    metadata = load_pickle(os.path.join(model_folder, 'metadata.p'))
    model_dd_file = metadata['model_file']
    model_dd = torch.load(model_dd_file)
    model_dd = model_dd.eval()
    model_dd = model_dd.cuda()

    spatial_descriptor_data = load_pickle(
        os.path.join(model_folder, 'spatial_descriptors.p'))

    return {
        'model_dy': model_dy,
        'model_dd': model_dd,
        'spatial_descriptor_data': spatial_descriptor_data,
        'metadata': metadata
    }
def load_model_state_dict(model_folder=None):
    # load dynamics model
    #
    # model_folder = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_box_on_side/dataset_dps_box_on_side_600/trained_models/dynamics/DD_3D/2020-05-15-04-40-10-770703_DD_3D_all_z_n_his_2"

    models_root = os.path.join(
        get_data_root(),
        "dev/experiments/drake_pusher_slider_box_on_side/dataset_dps_box_on_side_600/trained_models/dynamics"
    )
    # model_name = "DD_3D/2020-05-15-00-54-26-961701_DD_3D_n_his_2"
    # model_name = "DD_3D/2020-05-15-04-40-10-770703_DD_3D_all_z_n_his_2"
    model_name = "DD_3D/2020-05-15-02-07-44-204479_DD_3D_spatial_z_n_his_2"
    model_folder = os.path.join(models_root, model_name)

    model_dy = model_builder.load_dynamics_model_from_folder(
        model_folder)['model_dy']

    # load dense descriptor model
    metadata = load_pickle(os.path.join(model_folder, 'metadata.p'))
    model_dd_file = metadata['model_file']
    model_dd = torch.load(model_dd_file)
    model_dd = model_dd.eval()
    model_dd = model_dd.cuda()

    spatial_descriptor_data = load_pickle(
        os.path.join(model_folder, 'spatial_descriptors.p'))

    return {
        'model_dy': model_dy,
        'model_dd': model_dd,
        'spatial_descriptor_data': spatial_descriptor_data,
        'metadata': metadata
    }
Beispiel #4
0
def load_model(model_train_dir, strict=True):
    """
    Helper function to load dynamics model and vision model
    """

    model_dy_dict = model_builder.load_dynamics_model_from_folder(
        model_train_dir, strict=strict)
    model_dy = model_dy_dict['model_dy']
    model_dy = model_dy.eval()
    model_dy = model_dy.cuda()
    model_name = model_dy_dict['model_name']

    # load dense descriptor model
    metadata = load_pickle(os.path.join(model_train_dir, 'metadata.p'))
    model_dd_file = metadata['model_file']
    model_dd = torch.load(model_dd_file)
    model_dd = model_dd.eval()
    model_dd = model_dd.cuda()

    spatial_descriptor_data = load_pickle(
        os.path.join(model_train_dir, 'spatial_descriptors.p'))

    return {
        'model_dy': model_dy,
        'model_dd': model_dd,
        'spatial_descriptor_data': spatial_descriptor_data,
        'metadata': metadata,
        'model_name': model_name,
    }
Beispiel #5
0
def load_model_state_dict(model_folder=None):

    models_root = "/home/manuelli/data/key_dynam/dev/experiments/20/dataset_correlle_mug-small_many_colors_600/trained_models/dynamics"

    # model_name = "DD_3D/2020-06-04-19-18-48-274487_DD_3D_z_state_n_his_2_no_T_aug"
    model_name = "DD_3D/2020-06-05-00-25-39-676089_DD_3D_all_n_his_2"
    # model_name = "DD_3D/2020-06-05-15-25-01-580144_DD_3D_all_z_n_his_2"
    model_folder = os.path.join(models_root, model_name)


    model_dy = model_builder.load_dynamics_model_from_folder(model_folder)['model_dy']

    # load dense descriptor model
    metadata = load_pickle(os.path.join(model_folder, 'metadata.p'))
    model_dd_file = metadata['model_file']
    model_dd = torch.load(model_dd_file)
    model_dd = model_dd.eval()
    model_dd = model_dd.cuda()

    spatial_descriptor_data = load_pickle(os.path.join(model_folder, 'spatial_descriptors.p'))

    return {'model_dy': model_dy,
            'model_dd': model_dd,
            'spatial_descriptor_data': spatial_descriptor_data,
            'metadata': metadata}
    def load_dataset(
            config,  # e.g. experiments/07/config.yaml
            episodes_config,  # dict, e.g. real_push_box.yaml
            episodes_root,  # str: root of where all the logs are stored
            precomputed_data_root=None,
            max_num_episodes=None,
            load_image_episode=True):

        multi_episode_dict = dict()
        for counter, episode_name in enumerate(episodes_config['episodes']):

            # this is for debugging purposes
            if (max_num_episodes is not None) and counter >= max_num_episodes:
                break

            episode_processed_dir = os.path.join(episodes_root, episode_name,
                                                 "processed")

            # load the DenseCorrespondence Episode that handles image observations
            # if requested
            dc_episode = None
            if load_image_episode:

                ####
                precomputed_data = None
                precomputed_data_file = None
                if precomputed_data_root is not None:

                    # replace .h5 filename with .p for pickle file
                    precomputed_data_file = os.path.join(
                        precomputed_data_root, "%s.p" % (episode_name))
                    precomputed_data_file_hdf5 = os.path.join(
                        precomputed_data_root, "%s.h5" % (episode_name))

                    if os.path.isfile(precomputed_data_file):
                        precomputed_data = load_pickle(precomputed_data_file)
                    else:
                        raise ValueError("file doesn't exist: %s" %
                                         (precomputed_data_file))

                dc_episode = dc_episode_reader.DynamicSpartanEpisodeReader(
                    config=None,
                    root_dir=episode_processed_dir,
                    name=episode_name,
                    precomputed_data=precomputed_data,
                    precomputed_data_file=precomputed_data_file)

            episode = DynamicSpartanEpisodeReader(
                config,
                episode_processed_dir,
                name=episode_name,
                downsample_rate=config['dataset']['downsample_rate'],
                downsample_idx=0,  # hardcoded for now
                dc_episode_reader=dc_episode,
            )

            multi_episode_dict[episode_name] = episode

        return multi_episode_dict
def debug():

    save_dir = os.path.join(get_project_root(),
                            'sandbox/mpc/push_right_box_horizontal')

    save_dir = "/home/manuelli/data/key_dynam/sandbox/2020-07-07-20-09-54_push_right_box_horizontal"
    plan_msg = load_pickle(os.path.join(save_dir, 'plan_msg.p'),
                           encoding='latin1')
    plan_msg = zmq_utils.convert(plan_msg)

    compute_control_action_msg = load_pickle(os.path.join(
        save_dir, 'compute_control_action_msg.p'),
                                             encoding='latin1')
    compute_control_action_msg = zmq_utils.convert(compute_control_action_msg)

    K_matrix = None
    T_world_camera = None

    if 'K_matrix' in plan_msg:
        K_matrix = plan_msg['K_matrix']

    if 'T_world_camera' in plan_msg:
        T_world_camera = plan_msg['T_world_camera']

    d = load_model_and_data(
        K_matrix=K_matrix,
        T_world_camera=T_world_camera,
    )

    controller = ZMQController(
        config=d['config'],
        model_dy=d['model_dy'],
        action_function=d['action_function'],
        observation_function=d['observation_function'],
        visual_observation_function=d['visual_observation_function'],
        planner=d['planner'],
        debug=True,
        zmq_enabled=False,
        camera_info=d['camera_info'],
    )

    controller._on_plan_msg(plan_msg)
    controller._on_compute_control_action(compute_control_action_msg)
Beispiel #8
0
def drake_sim_dataset_loader(
    precomputed_vision_data_dir,  # precomputed_vision_data_dir
    dataset_root,  # location of original dataset
    max_num_episodes=None,
):
    spatial_descriptor_data = load_pickle(
        os.path.join(precomputed_vision_data_dir, 'spatial_descriptors.p'))
    metadata = load_pickle(
        os.path.join(precomputed_vision_data_dir, 'metadata.p'))
    descriptor_keypoints_root = os.path.join(precomputed_vision_data_dir,
                                             'descriptor_keypoints')

    multi_episode_dict = DrakeSimEpisodeReader.load_dataset(
        dataset_root,
        max_num_episodes=max_num_episodes,
        descriptor_keypoints_root=descriptor_keypoints_root)

    return {
        'spatial_descriptors_data': spatial_descriptor_data,
        'metadata': metadata,
        'multi_episode_dict': multi_episode_dict,
    }
Beispiel #9
0
def select_spatially_separated_descriptors(K=5,  # number of reference descriptors
                                          output_dir=None,
                                          visualize=False):
    raise ValueError("deprecated")
    multi_episode_dict = exp_utils.load_episodes()['multi_episode_dict']
    model_file = exp_utils.get_DD_model_file()

    confidence_scores_folder = os.path.join(get_data_root(),
                                            "dev/experiments/09/descriptor_confidence_scores/2020-03-25-19-57-26-556093_constant_velocity_500/2020-03-30-14-21-13-371713")

    # folder = "dev/experiments/07/descriptor_confidence_scores/real_push_box/2020-03-10-15-57-43-867147"
    folder = confidence_scores_folder
    folder = os.path.join(get_data_root(), folder)
    data_file = os.path.join(folder, 'data.p')
    data = load_pickle(data_file)

    heatmap_values = data['heatmap_values']
    scoring_func = keypoint_selection.create_scoring_function(gamma=3)
    score_data = keypoint_selection.score_heatmap_values(heatmap_values,
                                                         scoring_func=scoring_func)
    sorted_idx = score_data['sorted_idx']

    metadata_file = os.path.join(folder, 'metadata.p')
    metadata = load_pickle(metadata_file)
    camera_name = metadata['camera_name']

    keypoint_idx = keypoint_selection.select_spatially_separated_keypoints(sorted_idx,
                                                                           metadata['indices'],
                                                                           position_diff_threshold=30,
                                                                           K=K,
                                                                           verbose=False)

    ref_descriptors = metadata['ref_descriptors'][keypoint_idx]  # [K, D]
    spatial_descriptors_data = score_data
    spatial_descriptors_data['spatial_descriptors'] = ref_descriptors
    spatial_descriptors_data['spatial_descriptors_idx'] = keypoint_idx
    save_pickle(spatial_descriptors_data, os.path.join(folder, 'spatial_descriptors.p'))
Beispiel #10
0
def load_episodes_from_config(config):
    """
    Loads episodes using the path specified in the config
    :param config:
    :type config:
    :return:
    :rtype:
    """
    data_path = config["dataset"]["data_path"]
    if not os.path.isabs(data_path):
        data_path = os.path.join(get_project_root(), data_path)

    # load the data
    print("loading data from disk . . . ")
    raw_data = load_pickle(data_path)
    print("finished loading data")
    episodes = PyMunkEpisodeReader.load_pymunk_episodes_from_raw_data(raw_data)

    return episodes
Beispiel #11
0
def create_pusher_slider_keypoint_dataset(config=None):
    # load some previously generated data

    project_root = get_project_root()
    if config is None:
        config_file = os.path.join(project_root, "experiments/02/config.yaml")
        config = load_yaml(config_file)

    action_function = ActionFunctionFactory.pusher_velocity
    obs_function = ObservationFunctionFactory.pusher_pose_slider_keypoints(
        config)

    DATA_PATH = os.path.join(
        project_root,
        "test_data/pusher_slider_10_episodes/2019-10-22-21-30-02-536750.p")

    raw_data = load_pickle(DATA_PATH)
    episodes = PyMunkEpisodeReader.load_pymunk_episodes_from_raw_data(raw_data)

    # create MultiEpisodeDataset
    dataset = MultiEpisodeDataset(config,
                                  action_function=action_function,
                                  observation_function=obs_function,
                                  episodes=episodes)

    episode = dataset.get_random_episode()
    data_0 = episode.get_observation(0)
    data_1 = episode.get_observation(1)

    print("time 0", data_0["sim_time"])
    print("time 1", data_1["sim_time"])

    # episode_name = episodes.keys()[0]
    # episode = episodes[episode_name]
    # data = episode.data
    # print("episode.data.keys()", episode.data.keys())
    # print("test ", type(data["trajectory"][0].keys()))
    # print("test ", data["trajectory"][0].keys())
    return dataset, config
def main():
    dataset_name, multi_episode_dict = load_episodes()

    ## Load Model
    model_name, model_file = get_DD_model_file()
    model = torch.load(model_file)
    model = model.cuda()
    model = model.eval()

    # make this unique
    output_dir = os.path.join(get_data_root(),
                              "dev/experiments/09/precomputed_vision_data",
                              "dataset_%s" % (dataset_name),
                              "model_name_%s" % (model_name),
                              get_current_YYYY_MM_DD_hh_mm_ss_ms())

    camera_name = "camera_1_top_down"
    episode_name = "2020-05-13-21-55-01-487901_idx_33"
    episode_idx = 22

    # compute descriptor confidence scores
    if True:
        print("\n\n---------Computing Descriptor Confidence Scores-----------")
        metadata_file = os.path.join(output_dir, 'metadata.p')
        if os.path.isfile(metadata_file):
            answer = input(
                "metadata.p file already exists, do you want to overwrite it? y/n"
            )

            if answer == "y":
                os.rmdir(output_dir)
                print("removing existing file and continuing")

            else:
                print("aborting")
                quit()

        compute_descriptor_confidences(
            multi_episode_dict,
            model,
            output_dir,
            batch_size=10,
            num_workers=20,
            model_file=model_file,
            camera_name=camera_name,
            num_ref_descriptors=50,
            num_batches=10,
            episode_name_arg=episode_name,
            episode_idx=episode_idx,
        )

    if True:
        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        # metadata_file = "/media/hdd/data/key_dynam/dev/experiments/09/precomputed_vision_data/dataset_2020-03-25-19-57-26-556093_constant_velocity_500/model_name_2020-04-07-14-31-35-804270_T_aug_dataset/2020-04-09-20-51-50-624799/metadata.p"
        # metadata = load_pickle(metadata_file)

        print("\n\n---------Precomputing Descriptor Keypoints-----------")
        descriptor_keypoints_output_dir = os.path.join(output_dir,
                                                       "descriptor_keypoints")
        precompute_descriptor_keypoints(
            multi_episode_dict,
            model,
            descriptor_keypoints_output_dir,
            ref_descriptors_metadata=metadata,
            batch_size=10,
            num_workers=20,
        )

    if True:
        confidence_score_data_file = os.path.join(output_dir, 'data.p')
        confidence_score_data = load_pickle(confidence_score_data_file)
        print(
            "\n\n---------Selecting Spatially Separated Keypoints-----------")
        score_and_select_spatially_separated_keypoints(
            metadata,
            confidence_score_data=confidence_score_data,
            K=5,
            position_diff_threshold=25,
            output_dir=output_dir,
        )

    print("Data saved at: ", output_dir)
    print("Finished Normally")
Beispiel #13
0
    def load_dataset(
        dataset_root,  # str: folder containing dataset
        load_image_data=True,
        descriptor_images_root=None,  # str: (optional) folder containing hdf5 files with descriptors
        descriptor_keypoints_root=None,
        max_num_episodes=None,  # int, max num episodes to load
        precomputed_data_root=None,
    ):
        """

        :param dataset_root: folder should contain
            - config.yaml
            - metadata.yaml
            - <episode_name.p>
            - <episode_name.h5>
        :type dataset_root:
        :return:
        :rtype:
        """

        if load_image_data:
            from key_dynam.dense_correspondence.dc_drake_sim_episode_reader import DCDrakeSimEpisodeReader

        metadata = load_yaml(DrakeSimEpisodeReader.metadata_file(dataset_root))
        multi_episode_dict = dict()
        episode_names = list(metadata['episodes'].keys())
        episode_names.sort()  # sort the keys

        num_episodes = len(episode_names)

        # optionally don't read all episodes
        if (max_num_episodes is not None) and (max_num_episodes > 0):
            # compute the number of episodes to read, in sorted order
            num_episodes = int(min(len(episode_names), max_num_episodes))

        for idx in range(num_episodes):
            episode_name = episode_names[idx]
            val = metadata['episodes'][episode_name]

            # load non image data
            non_image_data_file = os.path.join(dataset_root,
                                               val['non_image_data_file'])
            assert os.path.isfile(
                non_image_data_file), "File doesn't exist: %s" % (
                    non_image_data_file)
            non_image_data = load_pickle(non_image_data_file)

            dc_episode_reader = None
            if load_image_data:

                # load image data
                image_data_file = os.path.join(dataset_root,
                                               val['image_data_file'])
                assert os.path.isfile(
                    image_data_file), "File doesn't exist: %s" % (
                        image_data_file)

                descriptor_image_data_file = None
                if descriptor_images_root is not None:
                    descriptor_image_data_file = os.path.join(
                        descriptor_images_root, val['image_data_file'])

                    assert os.path.isfile(
                        descriptor_image_data_file
                    ), "File doesn't exist: %s" % (descriptor_image_data_file)

                descriptor_keypoints_data = None
                descriptor_keypoints_hdf5_file = None
                if descriptor_keypoints_root is not None:

                    # replace .h5 filename with .p for pickle file
                    descriptor_keypoints_data_file = val[
                        'image_data_file'].split(".")[0] + ".p"
                    descriptor_keypoints_data_file = os.path.join(
                        descriptor_keypoints_root,
                        descriptor_keypoints_data_file)

                    descriptor_keypoints_hdf5_file = os.path.join(
                        descriptor_keypoints_root, val['image_data_file'])

                    if os.path.isfile(descriptor_keypoints_data_file):
                        descriptor_keypoints_data = load_pickle(
                            descriptor_keypoints_data_file)
                    else:
                        assert os.path.isfile(
                            descriptor_keypoints_hdf5_file
                        ), "File doesn't exist: %s" % (
                            descriptor_keypoints_hdf5_file)

                ####
                precomputed_data = None
                precomputed_data_file = None
                if precomputed_data_root is not None:

                    # replace .h5 filename with .p for pickle file
                    precomputed_data_file = val['image_data_file'].split(
                        ".")[0] + ".p"
                    precomputed_data_file = os.path.join(
                        precomputed_data_root, precomputed_data_file)

                    if os.path.isfile(precomputed_data_file):
                        precomputed_data = load_pickle(precomputed_data_file)
                    else:
                        raise ValueError("file doesn't exist: %s" %
                                         (precomputed_data_file))

                dc_episode_reader = DCDrakeSimEpisodeReader(
                    non_image_data,
                    image_data_file,
                    descriptor_image_data_file=descriptor_image_data_file,
                    descriptor_keypoints_data=descriptor_keypoints_data,
                    descriptor_keypoints_data_file=
                    descriptor_keypoints_hdf5_file,
                    precomputed_data=precomputed_data,
                    precomputed_data_file=precomputed_data_file)

            episode_reader = DrakeSimEpisodeReader(
                non_image_data=non_image_data,
                episode_name=episode_name,
                dc_episode_reader=dc_episode_reader)

            multi_episode_dict[episode_name] = episode_reader

        return multi_episode_dict
def main(dataset_name):

    # sample from specific image
    dataset_paths = None
    episode_name = None
    camera_name = None
    episode_idx = None
    if dataset_name == "dps_box_on_side_600":
        camera_name = "camera_angled"
        episode_name = "2020-05-13-21-55-01-487901_idx_33"
        episode_idx = 22
        dataset_paths = exp_18_utils.get_dataset_paths(dataset_name)
    elif dataset_name == "correlle_mug-small_single_color_600":
        camera_name = "camera_1_top_down"
        episode_name = "2020-06-02-14-15-27-898104_idx_56"
        episode_idx = 18
        dataset_paths = exp_20_utils.get_dataset_paths(dataset_name)
    elif dataset_name == "correlle_mug-small_many_colors_600":
        camera_name = "camera_1_top_down"
        episode_name = "2020-06-03-15-48-50-165064_idx_56"
        episode_idx = 20
        dataset_paths = exp_20_utils.get_dataset_paths(dataset_name)
    else:
        raise ValueError("unknown dataset")

    dataset_root = dataset_paths['dataset_root']
    dataset_name = dataset_paths['dataset_name']

    ## Load Model
    model_name, model_file = get_DD_model_file(dataset_name)
    model_train_dir = os.path.dirname(model_file)

    print("model_train_dir", model_train_dir)
    print("model_file", model_file)
    model = torch.load(model_file)
    model = model.cuda()
    model = model.eval()

    multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset(
        dataset_root, max_num_episodes=None)

    output_dir = os.path.join(
        model_train_dir,
        'precomputed_vision_data/descriptor_keypoints/dataset_%s/' %
        (dataset_name))

    # compute descriptor confidence scores
    if False:
        print("\n\n---------Computing Descriptor Confidence Scores-----------")
        metadata_file = os.path.join(output_dir, 'metadata.p')
        if os.path.isfile(metadata_file):
            answer = input(
                "metadata.p file already exists, do you want to overwrite it? y/n\n"
            )

            if answer == "y":
                shutil.rmtree(output_dir)
                print("removing existing file and continuing")

            else:
                print("aborting")
                quit()

        set_seed(0)

        compute_descriptor_confidences(
            multi_episode_dict,
            model,
            output_dir,
            batch_size=10,
            num_workers=20,
            model_file=model_file,
            camera_name=camera_name,
            num_ref_descriptors=50,
            num_batches=10,
            episode_name_arg=episode_name,
            episode_idx=episode_idx,
        )

    if False:
        confidence_score_data_file = os.path.join(output_dir, 'data.p')
        confidence_score_data = load_pickle(confidence_score_data_file)

        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        print(
            "\n\n---------Selecting Spatially Separated Keypoints-----------")
        score_and_select_spatially_separated_keypoints(
            metadata,
            confidence_score_data=confidence_score_data,
            K=4,
            position_diff_threshold=15,
            output_dir=output_dir,
        )

    # visualize descriptors
    if False:
        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        episode_name = metadata['episode_name']
        episode_idx = metadata['episode_idx']
        camera_name = metadata['camera_name']

        episode = multi_episode_dict[episode_name]
        data = episode.get_image_data(camera_name, episode_idx)
        rgb = data['rgb']

        uv = metadata['indices']

        print("uv.shape", uv.shape)

        color = [0, 255, 0]
        draw_reticles(rgb, uv[:, 0], uv[:, 1], label_color=color)

        save_file = os.path.join(output_dir, 'sampled_descriptors.png')

        plt.figure()
        plt.imshow(rgb)
        plt.savefig(save_file)
        plt.show()

    # visualize spatially separated descriptors
    if False:
        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        spatial_descriptor_file = os.path.join(output_dir,
                                               'spatial_descriptors.p')
        spatial_descriptors_data = load_pickle(spatial_descriptor_file)
        des_idx = spatial_descriptors_data['spatial_descriptors_idx']

        episode_name = metadata['episode_name']
        episode_idx = metadata['episode_idx']
        camera_name = metadata['camera_name']

        episode = multi_episode_dict[episode_name]
        data = episode.get_image_data(camera_name, episode_idx)
        rgb = data['rgb']

        uv = metadata['indices']

        print("uv.shape", uv.shape)

        color = [0, 255, 0]
        draw_reticles(rgb, uv[des_idx, 0], uv[des_idx, 1], label_color=color)

        save_file = os.path.join(output_dir,
                                 'spatially_separated_descriptors.png')

        plt.figure()
        plt.imshow(rgb)
        plt.savefig(save_file)
        plt.show()

    if True:
        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        # metadata_file = "/media/hdd/data/key_dynam/dev/experiments/09/precomputed_vision_data/dataset_2020-03-25-19-57-26-556093_constant_velocity_500/model_name_2020-04-07-14-31-35-804270_T_aug_dataset/2020-04-09-20-51-50-624799/metadata.p"
        # metadata = load_pickle(metadata_file)

        print("\n\n---------Precomputing Descriptor Keypoints-----------")
        descriptor_keypoints_output_dir = os.path.join(output_dir,
                                                       "descriptor_keypoints")
        precompute_descriptor_keypoints(multi_episode_dict,
                                        model,
                                        descriptor_keypoints_output_dir,
                                        ref_descriptors_metadata=metadata,
                                        batch_size=8,
                                        num_workers=20,
                                        camera_names=[camera_name])

    print("Data saved at: ", output_dir)
    print("Finished Normally")
import numpy as np
from key_dynam.utils.utils import load_pickle, save_pickle, save_yaml
from key_dynam.utils import meshcat_utils
from key_dynam.utils import transform_utils

data_file = "/home/manuelli/data/key_dynam/hardware_experiments/closed_loop_rollouts/stable/2020-07-10-22-16-08_long_push_on_long_side/mpc_rollouts/2020-07-10-22-19-03-591910/data.p"

data = load_pickle(data_file)
pts = data['plan']['plan_data'][-1]['dynamics_model_input_data'][
    'visual_observation']['pts_W']
print("pts\n", pts)

centroid = np.mean(pts, axis=0)
pts_centered = pts - centroid
save_data = {'object_points': pts_centered.tolist()}
save_file = "object_points_master.yaml"
save_yaml(save_data, save_file)

# do some meshcat debug
vis = meshcat_utils.make_default_visualizer_object()
meshcat_utils.visualize_points(vis,
                               "object_points_centered",
                               pts_centered,
                               color=[0, 0, 255],
                               size=0.01)

meshcat_utils.visualize_points(vis,
                               "object_points_world",
                               pts,
                               color=[0, 255, 0],
                               size=0.01)
def load_model_and_data():

    dataset_name = "push_box_hardware"

    # model_name = "DD_2D/2020-06-24-22-22-58-234812_DD_3D_n_his_2" # this model is actually 3D
    # model_name = "DD_3D/2020-06-25-00-49-29-679042_DD_3D_n_his_2_T_aug"
    # model_name = "DD_3D/2020-06-25-00-39-29-020621_DD_3D_n_his_2"

    model_name = "DD_3D/2020-07-02-17-59-21-362337_DD_3D_n_his_2_T_aug"
    train_dir = "/home/manuelli/data/key_dynam/dev/experiments/22/dataset_push_box_hardware/trained_models/dynamics"

    train_dir = os.path.join(train_dir, model_name)
    ckpt_file = os.path.join(train_dir, "net_best_dy_state_dict.pth")

    config = load_yaml(os.path.join(train_dir, 'config.yaml'))
    state_dict = torch.load(ckpt_file)

    # build dynamics model
    model_dy = build_dynamics_model(config)
    # print("state_dict.keys()", state_dict.keys())
    model_dy.load_state_dict(state_dict)
    model_dy = model_dy.eval()
    model_dy = model_dy.cuda()

    spatial_descriptor_data = load_pickle(
        os.path.join(train_dir, 'spatial_descriptors.p'))
    metadata = load_pickle(os.path.join(train_dir, 'metadata.p'))

    # load the dataset
    dataset_paths = get_dataset_paths(dataset_name)
    dataset_root = dataset_paths['dataset_root']
    episodes_config = dataset_paths['episodes_config']

    precomputed_vision_data_root = DD_utils.get_precomputed_data_root(
        dataset_name)['precomputed_data_root']

    # descriptor_keypoints_root = os.path.join(precomputed_vision_data_root, 'descriptor_keypoints')
    descriptor_keypoints_root = os.path.join(precomputed_vision_data_root,
                                             'descriptor_keypoints')

    multi_episode_dict = DynamicSpartanEpisodeReader.load_dataset(
        config=config,
        episodes_config=episodes_config,
        episodes_root=dataset_paths['dataset_root'],
        load_image_episode=True,
        precomputed_data_root=descriptor_keypoints_root,
        max_num_episodes=None)

    visual_observation_function = PrecomputedVisualObservationFunctionFactory.function_from_config(
        config,
        keypoint_idx=spatial_descriptor_data['spatial_descriptors_idx'])

    action_function = ActionFunctionFactory.function_from_config(config)
    observation_function = ObservationFunctionFactory.function_from_config(
        config)
    dataset = MultiEpisodeDataset(
        config,
        action_function=action_function,
        observation_function=observation_function,
        episodes=multi_episode_dict,
        visual_observation_function=visual_observation_function,
        phase="valid",  # this means no data augmentation
    )

    return {
        "model_dy": model_dy,
        'dataset': dataset,
        'config': config,
        "multi_episode_dict": multi_episode_dict,
        'spatial_descriptor_data': spatial_descriptor_data,
    }
Beispiel #17
0
def load_model_and_data():
    dataset_name = "push_box_hardware"

    # model_name = "DD_2D/2020-06-24-22-22-58-234812_DD_3D_n_his_2" # this model is actually 3D
    # model_name = "DD_3D/2020-06-25-00-49-29-679042_DD_3D_n_his_2_T_aug"
    # model_name = "DD_3D/2020-06-25-00-39-29-020621_DD_3D_n_his_2"

    model_name = "DD_3D/2020-07-02-17-59-21-362337_DD_3D_n_his_2_T_aug"
    train_dir = "/home/manuelli/data/key_dynam/dev/experiments/22/dataset_push_box_hardware/trained_models/dynamics"

    train_dir = os.path.join(train_dir, model_name)
    ckpt_file = os.path.join(train_dir, "net_best_dy_state_dict.pth")

    config = load_yaml(os.path.join(train_dir, 'config.yaml'))
    state_dict = torch.load(ckpt_file)

    # build dynamics model
    model_dy = build_dynamics_model(config)
    # print("state_dict.keys()", state_dict.keys())
    model_dy.load_state_dict(state_dict)
    model_dy = model_dy.eval()
    model_dy = model_dy.cuda()

    spatial_descriptor_data = load_pickle(
        os.path.join(train_dir, 'spatial_descriptors.p'))
    metadata = load_pickle(os.path.join(train_dir, 'metadata.p'))

    # build dense-descriptor model
    model_dd_file = metadata['model_file']
    model_dd = torch.load(model_dd_file)
    model_dd = model_dd.eval()
    model_dd = model_dd.cuda()

    # load the dataset
    dataset_paths = get_dataset_paths(dataset_name)
    dataset_root = dataset_paths['dataset_root']
    episodes_config = dataset_paths['episodes_config']

    precomputed_vision_data_root = DD_utils.get_precomputed_data_root(
        dataset_name)['precomputed_data_root']

    # descriptor_keypoints_root = os.path.join(precomputed_vision_data_root, 'descriptor_keypoints')
    descriptor_keypoints_root = os.path.join(precomputed_vision_data_root,
                                             'descriptor_keypoints')

    multi_episode_dict = DynamicSpartanEpisodeReader.load_dataset(
        config=config,
        episodes_config=episodes_config,
        episodes_root=dataset_paths['dataset_root'],
        load_image_episode=True,
        precomputed_data_root=descriptor_keypoints_root,
        max_num_episodes=None)

    visual_observation_function = PrecomputedVisualObservationFunctionFactory.function_from_config(
        config,
        keypoint_idx=spatial_descriptor_data['spatial_descriptors_idx'])

    action_function = ActionFunctionFactory.function_from_config(config)
    observation_function = ObservationFunctionFactory.function_from_config(
        config)
    dataset = MultiEpisodeDataset(
        config,
        action_function=action_function,
        observation_function=observation_function,
        episodes=multi_episode_dict,
        visual_observation_function=visual_observation_function,
        phase="valid",  # this means no data augmentation
    )

    #### PLANNER #######
    planner = None
    # make a planner config
    planner_config = copy.copy(model_dy.config)
    config_tmp = load_yaml(
        os.path.join(get_project_root(),
                     'experiments/exp_22_push_box_hardware/config_DD_3D.yaml'))
    planner_config['mpc'] = config_tmp['mpc']
    if PLANNER_TYPE == "random_shooting":
        planner = RandomShootingPlanner(planner_config)
    elif PLANNER_TYPE == "mppi":
        planner = PlannerMPPI(planner_config)
    else:
        raise ValueError("unknown planner type: %s" % (PLANNER_TYPE))

    return {
        "model_dy": model_dy,
        'model_dd': model_dd,
        'dataset': dataset,
        'config': config,
        "multi_episode_dict": multi_episode_dict,
        'spatial_descriptor_data': spatial_descriptor_data,
        'planner': planner,
        'observation_function': observation_function,
        'action_function': action_function,
    }
Beispiel #18
0
def train_dynamics(config,
                   train_dir, # str: directory to save output
                   multi_episode_dict, # multi_episode_dict
                   ):

    use_precomputed_keypoints = config['dataset']['visual_observation']['enabled'] and config['dataset']['visual_observation']['descriptor_keypoints']

    # set random seed for reproduction
    set_seed(config['train']['random_seed'])

    st_epoch = config['train']['resume_epoch'] if config['train']['resume_epoch'] > 0 else 0
    tee = Tee(os.path.join(train_dir, 'train_st_epoch_%d.log' % st_epoch), 'w')

    tensorboard_dir = os.path.join(train_dir, "tensorboard")
    if not os.path.exists(tensorboard_dir):
        os.makedirs(tensorboard_dir)

    writer = SummaryWriter(log_dir=tensorboard_dir)

    # save the config
    save_yaml(config, os.path.join(train_dir, "config.yaml"))


    action_function = ActionFunctionFactory.function_from_config(config)
    observation_function = ObservationFunctionFactory.function_from_config(config)

    datasets = {}
    dataloaders = {}
    data_n_batches = {}
    for phase in ['train', 'valid']:
        print("Loading data for %s" % phase)
        datasets[phase] = MultiEpisodeDataset(config,
                                              action_function=action_function,
                                              observation_function=observation_function,
                                              episodes=multi_episode_dict,
                                              phase=phase)

        dataloaders[phase] = DataLoader(
            datasets[phase], batch_size=config['train']['batch_size'],
            shuffle=True if phase == 'train' else False,
            num_workers=config['train']['num_workers'], drop_last=True)

        data_n_batches[phase] = len(dataloaders[phase])

    use_gpu = torch.cuda.is_available()

    # compute normalization parameters if not starting from pre-trained network . . .


    '''
    define model for dynamics prediction
    '''

    model_dy = build_visual_dynamics_model(config)
    K = config['vision_net']['num_ref_descriptors']

    print("model_dy.vision_net._reference_descriptors.shape", model_dy.vision_net._ref_descriptors.shape)
    print("model_dy.vision_net.descriptor_dim", model_dy.vision_net.descriptor_dim)
    print("model_dy #params: %d" % count_trainable_parameters(model_dy))

    camera_name = config['vision_net']['camera_name']
    W = config['env']['rgbd_sensors']['sensor_list'][camera_name]['width']
    H = config['env']['rgbd_sensors']['sensor_list'][camera_name]['height']
    diag = np.sqrt(W**2 + H**2) # use this to scale the loss

    # sample reference descriptors unless using precomputed keypoints
    if not use_precomputed_keypoints:
        # sample reference descriptors
        episode_names = list(datasets["train"].episode_dict.keys())
        episode_names.sort()
        episode_name = episode_names[0]
        episode = datasets["train"].episode_dict[episode_name]
        episode_idx = 0
        camera_name = config["vision_net"]["camera_name"]
        image_data = episode.get_image_data(camera_name, episode_idx)
        des_img = torch.Tensor(image_data['descriptor'])
        mask_img = torch.Tensor(image_data['mask'])
        ref_descriptor_dict = sample_descriptors(des_img,
                                                 mask_img,
                                                 config['vision_net']['num_ref_descriptors'])



        model_dy.vision_net._ref_descriptors.data = ref_descriptor_dict['descriptors']
        model_dy.vision_net.reference_image = image_data['rgb']
        model_dy.vision_net.reference_indices = ref_descriptor_dict['indices']
    else:
        metadata_file = os.path.join(get_data_root(), config['dataset']['descriptor_keypoints_dir'], 'metadata.p')
        descriptor_metadata = load_pickle(metadata_file)

        # [32, 2]
        ref_descriptors = torch.Tensor(descriptor_metadata['ref_descriptors'])

        # [K, 2]
        ref_descriptors = ref_descriptors[:K]
        model_dy.vision_net._ref_descriptors.data = ref_descriptors
        model_dy.vision_net._ref_descriptors_metadata = descriptor_metadata

        # this is just a sanity check
        assert model_dy.vision_net.num_ref_descriptors == K

    print("reference_descriptors", model_dy.vision_net._ref_descriptors)

    # criterion
    criterionMSE = nn.MSELoss()
    l1Loss = nn.L1Loss()

    # optimizer
    params = model_dy.parameters()
    lr = float(config['train']['lr'])
    optimizer = optim.Adam(params, lr=lr, betas=(config['train']['adam_beta1'], 0.999))

    # setup scheduler
    sc = config['train']['lr_scheduler']
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode='min',
                                  factor=sc['factor'],
                                  patience=sc['patience'],
                                  threshold_mode=sc['threshold_mode'],
                                  cooldown= sc['cooldown'],
                                  verbose=True)

    if use_gpu:
        print("using gpu")
        model_dy = model_dy.cuda()

    print("model_dy.vision_net._ref_descriptors.device", model_dy.vision_net._ref_descriptors.device)
    print("model_dy.vision_net #params: %d" %(count_trainable_parameters(model_dy.vision_net)))


    best_valid_loss = np.inf
    global_iteration = 0
    epoch_counter_external = 0

    try:
        for epoch in range(st_epoch, config['train']['n_epoch']):
            phases = ['train', 'valid']
            epoch_counter_external = epoch

            writer.add_scalar("Training Params/epoch", epoch, global_iteration)
            for phase in phases:
                model_dy.train(phase == 'train')

                meter_loss_rmse = AverageMeter()
                step_duration_meter = AverageMeter()


                # bar = ProgressBar(max_value=data_n_batches[phase])
                loader = dataloaders[phase]

                for i, data in enumerate(loader):

                    step_start_time = time.time()

                    global_iteration += 1

                    with torch.set_grad_enabled(phase == 'train'):
                        n_his, n_roll = config['train']['n_history'], config['train']['n_rollout']
                        n_samples = n_his + n_roll

                        if DEBUG:
                            print("global iteration: %d" %(global_iteration))


                        # visual_observations = data['visual_observations']
                        visual_observations_list = data['visual_observations_list']
                        observations = data['observations']
                        actions = data['actions']

                        if use_gpu:
                            observations = observations.cuda()
                            actions = actions.cuda()

                        # states, actions = data
                        assert actions.size(1) == n_samples

                        B = actions.size(0)
                        loss_mse = 0.


                        # compute the output of the visual model for all timesteps
                        visual_model_output_list = []
                        for visual_obs in visual_observations_list:
                            # visual_obs is a dict containing observation for a single
                            # time step (of course across a batch however)
                            # visual_obs[<camera_name>]['rgb_tensor'] has shape [B, 3, H, W]

                            # probably need to cast input to cuda
                            dynamics_net_input = None
                            if use_precomputed_keypoints:
                                # note precomputed descriptors stored on disk are of size
                                # K = 32. We need to trim it down to the appropriate size
                                # [B, K_disk, 2] where K_disk is num keypoints on disk
                                keypoints = visual_obs[camera_name]['descriptor_keypoints']


                                # [B, 32, 2] where K is num keypoints
                                keypoints = keypoints[:,:K]

                                if DEBUG:
                                    print("keypoints.shape", keypoints.shape)

                                dynamics_net_input = keypoints.flatten(start_dim=1)
                            else:
                                out_dict = model_dy.vision_net.forward(visual_obs)

                                # [B, vision_model_out_dim]
                                dynamics_net_input = out_dict['dynamics_net_input']

                            visual_model_output_list.append(dynamics_net_input)

                        # concatenate this into a tensor
                        # [B, n_samples, vision_model_out_dim]
                        visual_model_output = torch.stack(visual_model_output_list, dim=1)

                        # cast this to float so it can be concatenated below
                        visual_model_output = visual_model_output.type_as(observations)

                        if DEBUG:
                            print('visual_model_output.shape', visual_model_output.shape)
                            print("observations.shape", observations.shape)
                            print("actions.shape", actions.shape)

                        # states is gotten by concatenating visual_observations and observations
                        # [B, n_samples, vision_model_out_dim + obs_dim]
                        states = torch.cat((visual_model_output, observations), dim=-1)

                        # state_cur: B x n_his x state_dim
                        state_cur = states[:, :n_his]

                        if DEBUG:
                            print("states.shape", states.shape)

                        for j in range(n_roll):

                            if DEBUG:
                                print("n_roll j: %d" %(j))

                            state_des = states[:, n_his + j]

                            # action_cur: B x n_his x action_dim
                            action_cur = actions[:, j : j + n_his] if actions is not None else None

                            # state_pred: B x state_dim
                            # state_pred: B x state_dim
                            input = {'observation': state_cur,
                                     'action': action_cur,
                                     }

                            if DEBUG:
                                print("state_cur.shape", state_cur.shape)
                                print("action_cur.shape", action_cur.shape)

                            state_pred = model_dy.dynamics_net(input)

                            # normalize by diag to ensure the loss is in [0,1] range
                            loss_mse_cur = criterionMSE(state_pred/diag, state_des/diag)
                            loss_mse += loss_mse_cur / n_roll

                            # l1Loss
                            loss_l1 = l1Loss(state_pred, state_des)

                            # update state_cur
                            # state_pred.unsqueeze(1): B x 1 x state_dim
                            # state_cur: B x n_his x state_dim
                            state_cur = torch.cat([state_cur[:, 1:], state_pred.unsqueeze(1)], 1)

                            meter_loss_rmse.update(np.sqrt(loss_mse.item()), B)

                    step_duration_meter.update(time.time() - step_start_time)
                    if phase == 'train':
                        optimizer.zero_grad()
                        loss_mse.backward()
                        optimizer.step()

                    if (i % config['train']['log_per_iter'] == 0) or (global_iteration % config['train']['log_per_iter'] == 0):
                        log = '%s [%d/%d][%d/%d] LR: %.6f' % (
                            phase, epoch, config['train']['n_epoch'], i, data_n_batches[phase],
                            get_lr(optimizer))
                        log += ', rmse: %.6f (%.6f)' % (
                            np.sqrt(loss_mse.item()), meter_loss_rmse.avg)

                        log += ', step time %.6f' %(step_duration_meter.avg)
                        step_duration_meter.reset()


                        print(log)

                        # log data to tensorboard
                        # only do it once we have reached 100 iterations
                        if global_iteration > 100:
                            writer.add_scalar("Params/learning rate", get_lr(optimizer), global_iteration)
                            writer.add_scalar("Loss_MSE/%s" %(phase), loss_mse.item(), global_iteration)
                            writer.add_scalar("L1/%s" %(phase), loss_l1.item(), global_iteration)
                            writer.add_scalar("L1_fraction/%s" %(phase), loss_l1.item()/diag, global_iteration)
                            writer.add_scalar("RMSE average loss/%s" %(phase), meter_loss_rmse.avg, global_iteration)

                    if phase == 'train' and i % config['train']['ckp_per_iter'] == 0:
                        save_model(model_dy, '%s/net_dy_epoch_%d_iter_%d' % (train_dir, epoch, i))



                log = '%s [%d/%d] Loss: %.6f, Best valid: %.6f' % (
                    phase, epoch, config['train']['n_epoch'], meter_loss_rmse.avg, best_valid_loss)
                print(log)

                if phase == 'valid':
                    if config['train']['lr_scheduler']['enabled']:
                        scheduler.step(meter_loss_rmse.avg)

                    # print("\nPhase == valid")
                    # print("meter_loss_rmse.avg", meter_loss_rmse.avg)
                    # print("best_valid_loss", best_valid_loss)
                    if meter_loss_rmse.avg < best_valid_loss:
                        best_valid_loss = meter_loss_rmse.avg
                        save_model(model_dy, '%s/net_best_dy' % (train_dir))

                writer.flush() # flush SummaryWriter events to disk

    except KeyboardInterrupt:
        # save network if we have a keyboard interrupt
        save_model(model_dy, '%s/net_dy_epoch_%d_keyboard_interrupt' % (train_dir, epoch_counter_external))
        writer.flush() # flush SummaryWriter events to disk
def load_model_and_data(
    K_matrix=None,
    T_world_camera=None,
):

    dataset_name = "push_box_hardware"

    model_name = "DD_3D/2020-07-02-17-59-21-362337_DD_3D_n_his_2_T_aug"
    train_dir = os.path.join(
        get_data_root(),
        "dev/experiments/22/dataset_push_box_hardware/trained_models/dynamics")
    # train_dir = "/home/manuelli/data/key_dynam/dev/experiments/22/dataset_push_box_hardware/trained_models/dynamics"

    train_dir = os.path.join(train_dir, model_name)
    ckpt_file = os.path.join(train_dir, "net_best_dy_state_dict.pth")

    train_config = load_yaml(os.path.join(train_dir, 'config.yaml'))
    state_dict = torch.load(ckpt_file)

    # build dynamics model
    model_dy = build_dynamics_model(train_config)
    # print("state_dict.keys()", state_dict.keys())
    model_dy.load_state_dict(state_dict)
    model_dy = model_dy.eval()
    model_dy = model_dy.cuda()

    # load the dataset
    dataset_paths = get_dataset_paths(dataset_name)
    dataset_root = dataset_paths['dataset_root']
    episodes_config = dataset_paths['episodes_config']

    spatial_descriptor_data = load_pickle(
        os.path.join(train_dir, 'spatial_descriptors.p'))
    metadata = load_pickle(os.path.join(train_dir, 'metadata.p'))

    ref_descriptors = spatial_descriptor_data['spatial_descriptors']
    ref_descriptors = torch_utils.cast_to_torch(ref_descriptors).cuda()

    # dense descriptor model
    model_dd_file = metadata['model_file']
    model_dd = torch.load(model_dd_file)
    model_dd = model_dd.eval()
    model_dd = model_dd.cuda()

    camera_name = train_config['dataset']['visual_observation_function'][
        'camera_name']

    camera_info = None
    if (T_world_camera is not None) and (K_matrix is not None):
        camera_info = {
            "K": K_matrix,
            'T_world_camera': T_world_camera,
        }
    else:
        camera_info = get_spartan_camera_info(camera_name)

    camera_info['camera_name'] = camera_name
    visual_observation_function = \
        VisualObservationFunctionFactory.descriptor_keypoints_3D(config=train_config,
                                                                 camera_name=camera_name,
                                                                 model_dd=model_dd,
                                                                 ref_descriptors=ref_descriptors,
                                                                 K_matrix=camera_info['K'],
                                                                 T_world_camera=camera_info['T_world_camera'],
                                                                 )

    action_function = ActionFunctionFactory.function_from_config(train_config)
    observation_function = ObservationFunctionFactory.function_from_config(
        train_config)

    #### PLANNER #######
    planner = None
    # make a planner config
    planner_config = copy.copy(train_config)
    config_tmp = load_yaml(
        os.path.join(get_project_root(),
                     'experiments/exp_22_push_box_hardware/config_DD_3D.yaml'))
    planner_config['mpc'] = config_tmp['mpc']
    if PLANNER_TYPE == "random_shooting":
        planner = RandomShootingPlanner(planner_config)
    elif PLANNER_TYPE == "mppi":
        planner = PlannerMPPI(planner_config)
    else:
        raise ValueError("unknown planner type: %s" % (PLANNER_TYPE))

    return {
        "model_dy": model_dy,
        'config': train_config,
        'spatial_descriptor_data': spatial_descriptor_data,
        'action_function': action_function,
        'observation_function': observation_function,
        'visual_observation_function': visual_observation_function,
        'planner': planner,
        'camera_info': camera_info,
    }
Beispiel #20
0
def run_precompute_descriptors_pipeline(multi_episode_dict, # dict
                                        model, # dense descriptor model file
                                        model_file=None,
                                        output_dir=None, # str where to save data
                                        episode_name=None, # optional for descriptor sampling
                                        camera_name=None, # which camera to compute descriptors for
                                        episode_idx=None, # optional for descriptor sampling
                                        visualize=True,
                                        K=5,
                                        position_diff_threshold=20,
                                        seed=0,
                                        ):

    assert model_file is not None
    assert camera_name is not None
    #
    # ## Load Model
    # model_train_dir = os.path.dirname(model_file)
    #
    # print("model_train_dir", model_train_dir)
    # print("model_file", model_file)
    # model = torch.load(model_file)
    # model = model.cuda()
    # model = model.eval()
    #
    # output_dir = os.path.join(model_train_dir,
    #                           'precomputed_vision_data/descriptor_keypoints/dataset_%s/' % (dataset_name))

    # compute descriptor confidence scores
    set_seed(seed)
    if True:
        print("\n\n---------Computing Descriptor Confidence Scores-----------")
        metadata_file = os.path.join(output_dir, 'metadata.p')
        if os.path.isfile(metadata_file):
            answer = input("metadata.p file already exists, do you want to overwrite it? y/n\n")

            if answer == "y":
                shutil.rmtree(output_dir)
                print("removing existing file and continuing")

            else:
                print("aborting")
                quit()



        compute_descriptor_confidences(multi_episode_dict,
                                       model,
                                       output_dir,
                                       batch_size=10,
                                       num_workers=20,
                                       model_file=model_file,
                                       camera_name=camera_name,
                                       num_ref_descriptors=50,
                                       num_batches=10,
                                       episode_name_arg=episode_name,
                                       episode_idx=episode_idx,
                                       )

    if True:
        confidence_score_data_file = os.path.join(output_dir, 'data.p')
        confidence_score_data = load_pickle(confidence_score_data_file)

        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        print("\n\n---------Selecting Spatially Separated Keypoints-----------")
        score_and_select_spatially_separated_keypoints(metadata,
                                                       confidence_score_data=confidence_score_data,
                                                       K=K,
                                                       position_diff_threshold=position_diff_threshold,
                                                       output_dir=output_dir,
                                                       )

    # visualize descriptors
    if True:
        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        episode_name = metadata['episode_name']
        episode_idx = metadata['episode_idx']
        camera_name = metadata['camera_name']

        episode = multi_episode_dict[episode_name]
        data = episode.get_image_data(camera_name, episode_idx)
        rgb = data['rgb']

        uv = metadata['indices']

        print("uv.shape", uv.shape)

        color = [0, 255, 0]
        draw_reticles(rgb, uv[:, 0], uv[:, 1], label_color=color)

        save_file = os.path.join(output_dir, 'sampled_descriptors.png')


        plt.figure()
        plt.imshow(rgb)
        plt.savefig(save_file)
        if visualize:
            plt.show()

    # visualize spatially separated descriptors
    if True:
        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        spatial_descriptor_file = os.path.join(output_dir, 'spatial_descriptors.p')
        spatial_descriptors_data = load_pickle(spatial_descriptor_file)
        des_idx = spatial_descriptors_data['spatial_descriptors_idx']

        episode_name = metadata['episode_name']
        episode_idx = metadata['episode_idx']
        camera_name = metadata['camera_name']

        episode = multi_episode_dict[episode_name]
        data = episode.get_image_data(camera_name, episode_idx)
        rgb = data['rgb']

        uv = metadata['indices']

        print("uv.shape", uv.shape)

        color = [0, 255, 0]
        draw_reticles(rgb, uv[des_idx, 0], uv[des_idx, 1], label_color=color)

        save_file = os.path.join(output_dir, 'spatially_separated_descriptors.png')

        plt.figure()
        plt.imshow(rgb)
        plt.savefig(save_file)
        if visualize:
            plt.show()


    if True:
        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        print("\n\n---------Precomputing Descriptor Keypoints-----------")
        descriptor_keypoints_output_dir = os.path.join(output_dir, "descriptor_keypoints")
        precompute_descriptor_keypoints(multi_episode_dict,
                                        model,
                                        descriptor_keypoints_output_dir,
                                        ref_descriptors_metadata=metadata,
                                        batch_size=8,
                                        num_workers=20,
                                        camera_names=[camera_name]
                                        )

    print("Data saved at: ", output_dir)
    print("Finished Normally")
Beispiel #21
0
import os

from key_dynam.utils.utils import load_pickle
from key_dynam.dataset.drake_sim_episode_reader import DrakeSimEpisodeReader

data_file = "/home/manuelli/data/key_dynam/dev/experiments/18/data/dps_box_on_side_600/2020-05-13-21-53-45-823302_idx_0.p"

non_image_data = load_pickle(data_file)
episode = DrakeSimEpisodeReader(non_image_data)

data = episode.get_data(0)

print("data.keys()", data.keys())
print("data['observation']", data['observation'].keys())
Beispiel #22
0
def DD_3D_dynamics(dataset_name):
    from key_dynam.training.train_dynamics_pusher_slider_precomputed_keypoints import train_dynamics
    from key_dynam.experiments.drake_pusher_slider import DD_utils

    def load_config():
        return load_yaml(
            os.path.join(
                get_project_root(),
                'experiments/exp_22_push_box_hardware/config_DD_3D.yaml'))

    dataset_paths = get_dataset_paths(dataset_name)
    dataset_root = dataset_paths['dataset_root']
    episodes_config = dataset_paths['episodes_config']

    precomputed_vision_data_root = DD_utils.get_precomputed_data_root(
        dataset_name)['precomputed_data_root']

    # descriptor_keypoints_root = os.path.join(precomputed_vision_data_root, 'descriptor_keypoints')
    descriptor_keypoints_root = os.path.join(precomputed_vision_data_root,
                                             'descriptor_keypoints')

    config = load_config()
    multi_episode_dict = DynamicSpartanEpisodeReader.load_dataset(
        config=config,
        episodes_config=episodes_config,
        episodes_root=dataset_paths['dataset_root'],
        load_image_episode=True,
        precomputed_data_root=descriptor_keypoints_root,
        max_num_episodes=None)

    experiment_save_root = get_experiment_save_root(dataset_name)

    # experiment_save_root = os.path.join(get_data_root(), 'sandbox')

    # standard
    if True:
        TRAIN = True

        spatial_descriptor_data = load_pickle(
            os.path.join(precomputed_vision_data_root,
                         'spatial_descriptors.p'))
        metadata = load_pickle(
            os.path.join(precomputed_vision_data_root, 'metadata.p'))

        train_dir = None

        config = load_config()
        config['train']['n_history'] = 2
        config['train']['random_seed'] = 1
        config['dataset']['visual_observation_function'][
            'camera_name'] = dataset_paths['main_camera_name']

        config['dataset'][
            'precomputed_data_root'] = precomputed_vision_data_root
        suffix = "_DD_3D_n_his_2"
        model_name = get_current_YYYY_MM_DD_hh_mm_ss_ms() + suffix

        if TRAIN:
            train_dir = os.path.join(experiment_save_root,
                                     'trained_models/dynamics/DD_3D/',
                                     model_name)

            os.makedirs(train_dir)

            visual_observation_function = PrecomputedVisualObservationFunctionFactory.function_from_config(
                config,
                keypoint_idx=spatial_descriptor_data['spatial_descriptors_idx']
            )

            train_dynamics(
                config=config,
                train_dir=train_dir,
                multi_episode_dict=multi_episode_dict,
                visual_observation_function=visual_observation_function,
                metadata=metadata,
                spatial_descriptors_data=spatial_descriptor_data)

    # data_aug
    if False:
        TRAIN = True

        spatial_descriptor_data = load_pickle(
            os.path.join(precomputed_vision_data_root,
                         'spatial_descriptors.p'))
        metadata = load_pickle(
            os.path.join(precomputed_vision_data_root, 'metadata.p'))

        train_dir = None

        config = load_config()
        config['train']['n_history'] = 2
        config['train']['random_seed'] = 1
        config['dataset']['visual_observation_function'][
            'camera_name'] = dataset_paths['main_camera_name']
        config['dataset']['data_augmentation']['enabled'] = True

        config['dataset'][
            'precomputed_data_root'] = precomputed_vision_data_root
        suffix = "_DD_3D_n_his_2_T_aug"
        model_name = get_current_YYYY_MM_DD_hh_mm_ss_ms() + suffix

        if TRAIN:
            train_dir = os.path.join(experiment_save_root,
                                     'trained_models/dynamics/DD_3D/',
                                     model_name)

            os.makedirs(train_dir)

            visual_observation_function = PrecomputedVisualObservationFunctionFactory.function_from_config(
                config,
                keypoint_idx=spatial_descriptor_data['spatial_descriptors_idx']
            )

            train_dynamics(
                config=config,
                train_dir=train_dir,
                multi_episode_dict=multi_episode_dict,
                visual_observation_function=visual_observation_function,
                metadata=metadata,
                spatial_descriptors_data=spatial_descriptor_data)