Пример #1
0
def get_dataset_paths(dataset_name):
    if dataset_name == "push_box_hardware":

        episodes_root = os.path.join(get_data_ssd_root(),
                                     "dataset/push_box_hardware")
        episodes_config = load_yaml(
            os.path.join(
                get_project_root(),
                'experiments/exp_22_push_box_hardware/push_box_hardware_episodes_config.yaml'
            ))

        transporter_model_chkpt = None
        dense_descriptor_model_chkpt = "/home/manuelli/data/key_dynam/dev/experiments/22/dataset_push_box_string_pull/trained_models/perception/dense_descriptors/data_aug_2020-07-02-02-39-27-400442/net_best_model.pth"

        return {
            'dataset_name': dataset_name,
            'dataset_root': episodes_root,
            'episodes_config': episodes_config,
            'main_camera_name': 'd415_01',
            'dense_descriptor_camera_list': ['d415_01', 'd415_02'],
            'transporter_model_chkpt': transporter_model_chkpt,
            'dense_descriptor_model_chkpt': dense_descriptor_model_chkpt,
        }
    elif dataset_name == "push_box_string_pull":
        episodes_root = os.path.join(get_data_ssd_root(),
                                     "dataset/push_box_string_pull")
        episodes_config = load_yaml(
            os.path.join(
                get_project_root(),
                'experiments/exp_22_push_box_hardware/push_box_string_pull_episodes_config.yaml'
            ))

        transporter_model_chkpt = None

        dense_descriptor_model_chkpt = "/home/manuelli/data/key_dynam/dev/experiments/22/dataset_push_box_string_pull/trained_models/perception/dense_descriptors/data_aug_2020-07-02-02-39-27-400442/net_best_model.pth"

        return {
            'dataset_name': dataset_name,
            'dataset_root': episodes_root,
            'episodes_config': episodes_config,
            'main_camera_name': 'd415_01',
            'dense_descriptor_camera_list': ['d415_01', 'd415_02'],
            'transporter_model_chkpt': transporter_model_chkpt,
            'dense_descriptor_model_chkpt': dense_descriptor_model_chkpt,
        }

    else:
        raise ValueError("unknown dataset:", dataset_name)
Пример #2
0
def test_pusher_slider_dataset():
    # dataset, config = create_pusher_slider_dataset()

    project_root = get_project_root()
    config_file = os.path.join(project_root, "experiments/01/config.yaml")
    config = load_yaml(config_file)

    # new dataset loading approach
    episodes = load_episodes_from_config(config)
    action_function = ActionFunctionFactory.function_from_config(config)
    observation_function = ObservationFunctionFactory.function_from_config(
        config)

    dataset = MultiEpisodeDataset(config,
                                  action_function=action_function,
                                  observation_function=observation_function,
                                  episodes=episodes,
                                  phase="train")

    data = dataset[0]  # test the getitem
    print("type(data)", type(data))
    print("list(data)", list(data))

    print(type(data["observations"]))
    print("observations.shape", data["observations"].shape)
    print("actions.shape", data["actions"].shape)

    print("observations", data["observations"])
    print("actions", data["actions"])

    stats = dataset.compute_dataset_statistics()

    print("stats", stats)
Пример #3
0
 def load_config():
     config = load_yaml(
         os.path.join(
             get_project_root(),
             "experiments/exp_22_push_box_hardware/integral_heatmap_3d.yaml"
         ))
     return config
Пример #4
0
def load_autoencoder_model():
    train_dir = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_v2/dataset_2020-04-20-14-58-21-418302_T_aug_random_velocity_1000/trained_models/dynamics/autoencoder/2020-06-08-19-08-45-269917_z_dim_16"

    chkpt_file = "net_dy_epoch_294_iter_100_state_dict.pth"
    ckpt_file = os.path.join(train_dir, chkpt_file)

    config = load_yaml(os.path.join(train_dir, 'config.yaml'))
    state_dict = torch.load(ckpt_file)

    # build dynamics model
    model_dy = build_dynamics_model(config)
    model_dy.load_state_dict(state_dict['dynamics'])
    model_dy = model_dy.eval()
    model_dy = model_dy.cuda()

    # build autoencoder file
    model_ae = ConvolutionalAutoencoder.from_global_config(config)
    model_ae.load_state_dict(state_dict['autoencoder'])
    model_ae = model_ae.eval()
    model_ae = model_ae.cuda()

    # visual observation function
    visual_observation_func = VisualObservationFunctionFactory.autoencoder_latent_state(
        config, model_ae=model_ae)

    return {
        'model_ae': model_ae,
        'model_dy': model_dy,
        'visual_observation_function': visual_observation_func
    }
Пример #5
0
    def load_model_from_checkpoint(model_chkpt_file):
        """
        Assumes the config is stored 2 levels up
        :param model_ckpt_file:
        :type model_ckpt_file:
        :return:
        :rtype:
        """

        train_dir = os.path.dirname(os.path.dirname(model_chkpt_file))
        model_name = os.path.split(train_dir)[-1]

        config = load_yaml(os.path.join(train_dir, 'config.yaml'))

        model_kp = Transporter(config, use_gpu=True)
        model_kp.load_state_dict(torch.load(model_chkpt_file))
        model_kp = model_kp.cuda()
        model_kp = model_kp.eval()

        return {
            'model': model_kp,
            'model_file': model_chkpt_file,
            'model_name': None,
            'train_dir': train_dir,
            'config': config,
        }
Пример #6
0
def sample_random_mug():
    sdf_dir = os.path.join(get_data_root(), "stable/sim_assets/anzu_mugs")
    # sdf_file = random.choice(SDFHelper.get_sdf_list(sdf_dir))

    mug_list = load_yaml(
        os.path.join(get_project_root(), 'experiments/exp_20_mugs/mugs.yaml'))
    sdf_file = random.choice(mug_list['corelle_mug-small'])
    sdf_file = os.path.join(sdf_dir, sdf_file)
    return sdf_file
Пример #7
0
def top_down_dataset_root():
    dataset_name = "2020-04-20-14-58-21-418302_T_aug_random_velocity_1000"
    # dataset_root = os.path.join(get_data_root(), "dev/experiments/09/data", dataset_name)
    dataset_root = os.path.join(get_data_ssd_root(), 'dataset', dataset_name)
    config = load_yaml(os.path.join(dataset_root, 'config.yaml'))

    return {'dataset_name': dataset_name,
            'dataset_root': dataset_root,
            'config': config,
            'main_camera_name': 'camera_1_top_down'}
Пример #8
0
def load_default_config():
    """
    Loads the experiments/05/config.yaml
    :return:
    :rtype:
    """
    config_file = os.path.join(get_project_root(),
                               'experiments/05/config.yaml')
    config = load_yaml(config_file)
    return config
Пример #9
0
def single_corelle_mug_600():
    dataset_name = "single_corelle_mug_600"

    dataset_root = os.path.join(get_data_ssd_root(), 'dataset', dataset_name)
    config = load_yaml(os.path.join(dataset_root, 'config.yaml'))

    return {
        'dataset_name': dataset_name,
        'dataset_root': dataset_root,
        'config': config,
        'main_camera_name': 'camera_1_top_down',
    }
Пример #10
0
def mugs_random_colors_1000():
    dataset_name = "mugs_random_colors_1000"

    dataset_root = os.path.join(get_data_ssd_root(), 'dataset', dataset_name)
    config = load_yaml(os.path.join(dataset_root, 'config.yaml'))

    return {
        'dataset_name': dataset_name,
        'dataset_root': dataset_root,
        'config': config,
        'main_camera_name': 'camera_1_top_down',
    }
Пример #11
0
    def f(q_tmp):
        config = load_yaml(
            os.path.join(get_project_root(),
                         'experiments/exp_20_mugs/config.yaml'))
        config['dataset']['num_episodes'] = num_episodes_per_thread
        out = collect_episodes(config,
                               output_dir=OUTPUT_DIR,
                               visualize=False,
                               debug=False,
                               run_from_thread=True)

        q_tmp.put(out)
Пример #12
0
def angled_cam_dataset_root():
    dataset_name = "2020-04-23-20-45-12-697915_T_aug_random_velocity_1000_angled_cam"

    # prepare folders
    # dataset_root = os.path.join(get_data_root(), 'dev/experiments/10/data', dataset_name)
    dataset_root = os.path.join(get_data_ssd_root(), 'dataset', dataset_name)
    config = load_yaml(os.path.join(dataset_root, 'config.yaml'))

    return {'dataset_name': dataset_name,
            'dataset_root': dataset_root,
            'config': config,
            'main_camera_name': 'camera_angled',
            }
Пример #13
0
def get_dataset_paths(dataset_name):
    if dataset_name == "2020-04-20-14-58-21-418302_T_aug_random_velocity_1000":
        return top_down_dataset_root()
    elif dataset_name == "2020-04-23-20-45-12-697915_T_aug_random_velocity_1000_angled_cam":
        return angled_cam_dataset_root()
    elif dataset_name == "box_push_1000_top_down":
        dataset_root = os.path.join(get_data_ssd_root(), 'dataset', "box_push_1000")
        config = load_yaml(os.path.join(dataset_root, 'config.yaml'))
        transporter_model_chkpt = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_v2/dataset_box_push_1000_top_down/trained_models/perception/transporter/transporter_standard_2020-06-14-22-29-31-256422/train_nKp6_invStd10.0/net_best.pth"

        dense_descriptor_model_chkpt = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_v2/dataset_box_push_1000_top_down/trained_models/perception/dense_descriptors/data_aug_2020-06-14-21-47-52-389769/net_best_model.pth"


        return {'dataset_name': dataset_name,
                'dataset_root': dataset_root,
                'config': config,
                'main_camera_name': 'camera_1_top_down',
                'dense_descriptor_camera_list': ['camera_1_top_down', 'camera_2_top_down_rotated'],
                'transporter_model_chkpt': transporter_model_chkpt,
                'dense_descriptor_model_chkpt': dense_descriptor_model_chkpt,
                }
    elif dataset_name == "box_push_1000_angled":
        dataset_root = os.path.join(get_data_ssd_root(), 'dataset', "box_push_1000")
        config = load_yaml(os.path.join(dataset_root, 'config.yaml'))
        transporter_model_chkpt = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_v2/dataset_box_push_1000_angled/trained_models/perception/transporter/transporter_standard_2020-06-15-18-35-52-478769/train_nKp6_invStd10.0/net_best.pth"

        dense_descriptor_model_chkpt = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_v2/dataset_box_push_1000_angled/trained_models/perception/dense_descriptors/data_aug_2020-06-15-15-39-24-127276/net_best_model.pth"

        return {'dataset_name': dataset_name,
                'dataset_root': dataset_root,
                'config': config,
                'main_camera_name': 'camera_angled',
                'dense_descriptor_camera_list': ['camera_angled', 'camera_angled_rotated'],
                'transporter_model_chkpt': transporter_model_chkpt,
                'dense_descriptor_model_chkpt': dense_descriptor_model_chkpt,
                }
    else:
        raise ValueError("unknown dataset:", dataset_name)
Пример #14
0
def box_on_side_dataset_root():
    dataset_name = "dps_box_on_side_600"
    # dataset_root = os.path.join(get_data_root(), "dev/experiments/18/data", dataset_name)
    dataset_root = os.path.join(get_data_ssd_root(), 'dataset', dataset_name)
    config = load_yaml(os.path.join(dataset_root, 'config.yaml'))

    dense_descriptor_model_chkpt = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_box_on_side/dataset_dps_box_on_side_600/trained_models/perception/dense_descriptor/3D_loss_camera_angled_2020-05-13-23-39-35-818188/net_best_dy_model.pth"

    return {'dataset_name': dataset_name,
            'dataset_root': dataset_root,
            'config': config,
            'main_camera_name': 'camera_angled',
            'dense_descriptor_model_chkpt': dense_descriptor_model_chkpt,
            }
Пример #15
0
def correlle_mug_small_many_colors_600():
    dataset_name = "correlle_mug-small_many_colors_600"

    dataset_root = os.path.join(get_data_ssd_root(), 'dataset', dataset_name)
    config = load_yaml(os.path.join(dataset_root, 'config.yaml'))

    dense_descriptor_model_chkpt = "/home/manuelli/data/key_dynam/dev/experiments/20/dataset_correlle_mug-small_many_colors_600/trained_models/perception/dense_descriptors/data_aug_2020-06-03-16-41-29-740641/net_best_model.pth"

    return {
        'dataset_name': dataset_name,
        'dataset_root': dataset_root,
        'config': config,
        'main_camera_name': 'camera_1_top_down',
        "dense_descriptor_model_chkpt": dense_descriptor_model_chkpt,
    }
Пример #16
0
def test_pusher_slider_keypoint_dataset():
    project_root = get_project_root()
    config_file = os.path.join(project_root, "experiments/02/config.yaml")
    config = load_yaml(config_file)

    config["n_history"] = 1
    config["n_roll"] = 0

    # new dataset loading approach
    episodes = load_episodes_from_config(config)
    action_function = ActionFunctionFactory.function_from_config(config)
    observation_function = ObservationFunctionFactory.function_from_config(
        config)

    dataset = MultiEpisodeDataset(config,
                                  action_function=action_function,
                                  observation_function=observation_function,
                                  episodes=episodes,
                                  phase="train")

    # dataset, config = create_pusher_slider_keypoint_dataset(config=config)

    episode_names = dataset.get_episode_names()
    episode_names.sort()
    episode_name = episode_names[0]
    episode = dataset.episode_dict[episode_name]
    obs_raw = episode.get_observation(0)
    obs_raw['slider']['angle'] = 0

    dataset.observation_function(obs_raw)

    print("20 degrees\n\n\n\n")
    obs_raw['slider']['angle'] = np.deg2rad(90)
    dataset.observation_function(obs_raw)
    quit()

    data = dataset[0]  # test the getitem
    print("type(data)", type(data))
    print("data.keys()", data.keys())

    print(type(data["observations"]))
    print("observations.shape", data["observations"].shape)
    print("actions.shape", data["actions"].shape)

    print("observations", data["observations"])
    print("actions", data["actions"])
Пример #17
0
def load_model():

    # dataset_name
    # model_file
    sae_train_dir = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_v2/dataset_2020-04-20-14-58-21-418302_T_aug_random_velocity_1000/trained_models/perception/spatial_autoencoder"

    # model_name = "2020-06-05-20-57-10-394927"
    ckp_file = 'net_best.pth'
    model_name = "2020-06-06-01-57-53-187767" # lr 1e-3


    model_name = "2020-06-06-17-31-05-356659" # with masked loss
    ckp_file = "net_kp_epoch_38_iter_0.pth"


    dataset_name = "2020-04-20-14-58-21-418302_T_aug_random_velocity_1000"
    train_dir = os.path.join(sae_train_dir, model_name)
    dataset_paths = exp_dps_utils.get_dataset_paths(dataset_name)

    config = load_yaml(os.path.join(train_dir, 'config.yaml'))
    ckp_file = os.path.join(train_dir, 'checkpoints', ckp_file)

    camera_name = config['perception']['camera_name']

    model = SpatialAutoencoder.from_global_config(config)
    model.load_state_dict(torch.load(ckp_file))


    dataset_root = dataset_paths['dataset_root']
    dataset_name = dataset_paths['dataset_name']
    multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset(dataset_root=dataset_root)
    image_preprocess_func = AutoencoderImagePreprocessFunctionFactory.spatial_autoencoder(config)

    dataset = AutoencoderImageDataset(config=config,
                                      episodes=multi_episode_dict,
                                      phase="train",
                                      camera_names=[camera_name],
                                      image_preprocess_func=image_preprocess_func,
                                      )



    return {'dataset_name': dataset_name,
            'dataset': dataset,
            'model': model}
Пример #18
0
def load_transporter_model(model_file=None):
    train_dir = os.path.dirname(os.path.dirname(model_file))

    print("train_dir", train_dir)

    config = load_yaml(os.path.join(train_dir, 'config.yaml'))

    model_kp = Transporter(config, use_gpu=True)
    model_kp.load_state_dict(torch.load(model_file))
    model_kp = model_kp.cuda()
    model_kp = model_kp.eval()

    return {
        'model': model_kp,
        'model_file': model_file,
        'train_dir': train_dir,
        'config': config,
    }
Пример #19
0
def load_dynamics_model_from_folder(model_folder,
                                    state_dict_file=None,
                                    strict=True):
    """
    Builds model and loads parameters using the 'load_state_dict'
    function
    """
    config = load_yaml(os.path.join(model_folder, 'config.yaml'))
    model = build_dynamics_model(config)

    if state_dict_file is None:
        state_dict_file = os.path.join(model_folder,
                                       'net_best_dy_state_dict.pth')

    model.load_state_dict(torch.load(state_dict_file), strict=strict)
    model = model.eval()
    model = model.cuda()
    _, model_name = os.path.split(model_folder)

    return {'model_dy': model, 'model_name': model_name, 'config': config}
Пример #20
0
def main():
    start_time = time.time()
    config = load_yaml(os.path.join(get_project_root(), 'experiments/drake_pusher_slider/env_config.yaml'))
    config['dataset']['num_episodes'] = 1000 # half for train, half for valid

    set_seed(500) # just randomly chosen

    num_episodes = config['dataset']['num_episodes']
    DATASET_NAME = "box_push_%d" %(num_episodes)
    OUTPUT_DIR = os.path.join(get_data_ssd_root(), 'dataset', DATASET_NAME)

    if not os.path.exists(OUTPUT_DIR):
        os.makedirs(OUTPUT_DIR)
    collect_episodes(
        config,
        output_dir=OUTPUT_DIR,
        visualize=False,
        debug=False)

    elapsed = time.time() - start_time
    print("Generating and saving dataset to disk took %d seconds" % (int(elapsed)))
Пример #21
0
def load_model(model_folder, strict=True):
    model_dy_dict = model_builder.load_dynamics_model_from_folder(
        model_folder, strict=strict)
    _, model_name = os.path.split(model_folder)

    config = model_dy_dict['config']

    # correct way
    precomputed_data_root = config['dataset']['precomputed_data_root']
    metadata = load_yaml(os.path.join(precomputed_data_root, 'metadata.yaml'))
    model_kp_file = metadata['model_file']

    print("model_kp_file", model_kp_file)

    model_kp_dict = load_transporter_model(model_file=model_kp_file)

    return {
        "model_dy": model_dy_dict,
        'model_kp': model_kp_dict,
        'model_name': model_name
    }
Пример #22
0
def main():
    start_time = time.time()
    config = load_yaml(
        os.path.join(get_project_root(),
                     'experiments/exp_20_mugs/config.yaml'))

    config['dataset']['num_episodes'] = 10

    set_seed(500)  # just randomly chosen

    DATASET_NAME = "mugs_%d" % (config['dataset']['num_episodes'])
    OUTPUT_DIR = os.path.join(get_data_root(), 'sandbox', DATASET_NAME)
    print("OUTPUT_DIR:", OUTPUT_DIR)

    collect_episodes(config,
                     output_dir=OUTPUT_DIR,
                     visualize=False,
                     debug=False)

    elapsed = time.time() - start_time
    print("Generating and saving dataset to disk took %d seconds" %
          (int(elapsed)))
Пример #23
0
def create_pusher_slider_keypoint_dataset(config=None):
    # load some previously generated data

    project_root = get_project_root()
    if config is None:
        config_file = os.path.join(project_root, "experiments/02/config.yaml")
        config = load_yaml(config_file)

    action_function = ActionFunctionFactory.pusher_velocity
    obs_function = ObservationFunctionFactory.pusher_pose_slider_keypoints(
        config)

    DATA_PATH = os.path.join(
        project_root,
        "test_data/pusher_slider_10_episodes/2019-10-22-21-30-02-536750.p")

    raw_data = load_pickle(DATA_PATH)
    episodes = PyMunkEpisodeReader.load_pymunk_episodes_from_raw_data(raw_data)

    # create MultiEpisodeDataset
    dataset = MultiEpisodeDataset(config,
                                  action_function=action_function,
                                  observation_function=obs_function,
                                  episodes=episodes)

    episode = dataset.get_random_episode()
    data_0 = episode.get_observation(0)
    data_1 = episode.get_observation(1)

    print("time 0", data_0["sim_time"])
    print("time 1", data_1["sim_time"])

    # episode_name = episodes.keys()[0]
    # episode = episodes[episode_name]
    # data = episode.data
    # print("episode.data.keys()", episode.data.keys())
    # print("test ", type(data["trajectory"][0].keys()))
    # print("test ", data["trajectory"][0].keys())
    return dataset, config
Пример #24
0
def run_interactive_circle_slider():
    """
    Launch interactive environment where you can move the pusher around with
    the arrow keys
    :return:
    :rtype:
    """
    config_file = os.path.join(get_project_root(),
                               'experiments/03/config.yaml')
    config = load_yaml(config_file)
    env = PusherSlider(config=config)
    env.reset()

    while env._running:
        action = env.process_events()
        env.step(action)
        obs, reward, done, info = env.step(action)
        env.render(mode='human')

        if True:
            print("\n\n\n")
            print("slider position", obs['slider']['position'])
            print("pusher position", obs['pusher']['position'])
Пример #25
0
def main():
    start_time = time.time()
    config = load_yaml(
        os.path.join(get_project_root(),
                     'experiments/exp_18_box_on_side/config.yaml'))
    # config['dataset']['num_episodes'] = 500  # half for train, half for valid
    config['dataset']['num_episodes'] = 600  # half for train, half for valid

    set_seed(500)  # just randomly chosen

    DATASET_NAME = "dps_box_on_side_%d" % (config['dataset']['num_episodes'])
    OUTPUT_DIR = os.path.join(get_data_root(), "dev/experiments/18/data",
                              DATASET_NAME)
    print("OUTPUT_DIR:", OUTPUT_DIR)

    collect_episodes(config,
                     output_dir=OUTPUT_DIR,
                     visualize=False,
                     debug=False)

    elapsed = time.time() - start_time
    print("Generating and saving dataset to disk took %d seconds" %
          (int(elapsed)))
Пример #26
0
def load_autoencoder_model(train_dir):
    chkpt_file = "net_best_state_dict.pth"
    ckpt_file = os.path.join(train_dir, chkpt_file)

    config = load_yaml(os.path.join(train_dir, 'config.yaml'))
    state_dict = torch.load(ckpt_file)

    # build dynamics model
    model_dy = build_dynamics_model(config)
    model_dy.load_state_dict(state_dict['dynamics'])
    model_dy = model_dy.eval()
    model_dy = model_dy.cuda()

    # build autoencoder file
    model_ae = ConvolutionalAutoencoder.from_global_config(config)
    model_ae.load_state_dict(state_dict['autoencoder'])
    model_ae = model_ae.eval()
    model_ae = model_ae.cuda()

    return {
        'model_ae': model_ae,
        'model_dy': model_dy,
        'config': config,
    }
Пример #27
0
def main():
    # load dynamics model
    model_dict = load_model_state_dict()
    model = model_dict['model_dy']
    model_dd = model_dict['model_dd']
    config = model.config

    env_config = load_yaml(os.path.join(get_project_root(), 'experiments/exp_20_mugs/config.yaml'))
    env_config['env']['observation']['depth_int16'] = True
    n_history = config['train']['n_history']

    initial_cond = generate_initial_condition(env_config, push_length=PUSH_LENGTH)
    env_config = initial_cond['config']

    # enable the right observations

    camera_name = model_dict['metadata']['camera_name']
    spatial_descriptor_data = model_dict['spatial_descriptor_data']
    ref_descriptors = spatial_descriptor_data['spatial_descriptors']
    K = ref_descriptors.shape[0]

    ref_descriptors = torch.Tensor(ref_descriptors).cuda()  # put them on the GPU

    print("ref_descriptors\n", ref_descriptors)
    print("ref_descriptors.shape", ref_descriptors.shape)

    # create the environment
    # create the environment
    env = DrakeMugsEnv(env_config)
    env.reset()

    T_world_camera = env.camera_pose(camera_name)
    camera_K_matrix = env.camera_K_matrix(camera_name)

    # create another environment for doing rollouts
    env2 = DrakeMugsEnv(env_config, visualize=False)
    env2.reset()

    action_function = ActionFunctionFactory.function_from_config(config)
    observation_function = ObservationFunctionFactory.drake_pusher_position_3D(config)
    visual_observation_function = \
        VisualObservationFunctionFactory.descriptor_keypoints_3D(config=config,
                                                                 camera_name=camera_name,
                                                                 model_dd=model_dd,
                                                                 ref_descriptors=ref_descriptors,
                                                                 K_matrix=camera_K_matrix,
                                                                 T_world_camera=T_world_camera,
                                                                 )

    episode = OnlineEpisodeReader()
    mpc_input_builder = DynamicsModelInputBuilder(observation_function=observation_function,
                                                  visual_observation_function=visual_observation_function,
                                                  action_function=action_function,
                                                  episode=episode)

    vis = meshcat_utils.make_default_visualizer_object()
    vis.delete()

    reset_environment(env, initial_cond['q_pusher'], initial_cond['q_slider'])
    obs_init = env.get_observation()

    #### ROLLOUT USING LEARNED MODEL + GROUND TRUTH ACTIONS ############
    reset_environment(env, initial_cond['q_pusher'], initial_cond['q_slider'])
    # add just some large number of these
    episode.clear()
    for i in range(n_history):
        action_zero = np.zeros(2)
        obs_tmp = env.get_observation()
        episode.add_observation_action(obs_tmp, action_zero)

    def goal_func(obs_tmp):
        state_tmp = mpc_input_builder.get_state_input_single_timestep({'observation': obs_tmp})['state']
        return model.compute_z_state(state_tmp.unsqueeze(0))['z_object'].flatten()


    #
    idx = episode.get_latest_idx()
    obs_raw = episode.get_observation(idx)
    z_object_goal = goal_func(obs_raw)
    z_keypoints_init_W = keypoints_3D_from_dynamics_model_output(z_object_goal, K)
    z_keypoints_init_W = torch_utils.cast_to_numpy(z_keypoints_init_W)

    z_keypoints_obj = keypoints_world_frame_to_object_frame(z_keypoints_init_W,
                                                          T_W_obj=slider_pose_from_observation(obs_init))

    color = [1, 0, 0]
    meshcat_utils.visualize_points(vis=vis,
                                   name="keypoints_W",
                                   pts=z_keypoints_init_W,
                                   color=color,
                                   size=0.02,
                                   )

    # input("press Enter to continue")

    # rollout single action sequence using the simulator
    action_sequence_np = torch_utils.cast_to_numpy(initial_cond['action_sequence'])
    N = action_sequence_np.shape[0]
    obs_rollout_gt = env_utils.rollout_action_sequence(env, action_sequence_np)[
        'observations']

    # using the vision model to get "goal" keypoints
    z_object_goal = goal_func(obs_rollout_gt[-1])
    z_object_goal_np = torch_utils.cast_to_numpy(z_object_goal)
    z_keypoints_goal = keypoints_3D_from_dynamics_model_output(z_object_goal, K)
    z_keypoints_goal = torch_utils.cast_to_numpy(z_keypoints_goal)

    # visualize goal keypoints
    color = [0, 1, 0]
    meshcat_utils.visualize_points(vis=vis,
                                   name="goal_keypoints",
                                   pts=z_keypoints_goal,
                                   color=color,
                                   size=0.02,
                                   )

    # input("press Enter to continue")

    #### ROLLOUT USING LEARNED MODEL + GROUND TRUTH ACTIONS ############
    reset_environment(env, initial_cond['q_pusher'], initial_cond['q_slider'])
    # add just some large number of these
    episode.clear()
    for i in range(n_history):
        action_zero = np.zeros(2)
        obs_tmp = env.get_observation()
        episode.add_observation_action(obs_tmp, action_zero)

    # [n_history, state_dim]
    idx = episode.get_latest_idx()

    dyna_net_input = mpc_input_builder.get_dynamics_model_input(idx, n_history=n_history)
    state_init = dyna_net_input['states'].cuda() # [n_history, state_dim]
    action_init = dyna_net_input['actions'] # [n_history, action_dim]


    print("state_init.shape", state_init.shape)
    print("action_init.shape", action_init.shape)


    action_seq_gt_torch = torch_utils.cast_to_torch(initial_cond['action_sequence'])
    action_input = torch.cat((action_init[:(n_history-1)], action_seq_gt_torch), dim=0).cuda()
    print("action_input.shape", action_input.shape)


    # rollout using the ground truth actions and learned model
    # need to add the batch dim to do that
    z_init = model.compute_z_state(state_init)['z']
    rollout_pred = rollout_model(state_init=z_init.unsqueeze(0),
                                 action_seq=action_input.unsqueeze(0),
                                 dynamics_net=model,
                                 compute_debug_data=True)

    state_pred_rollout = rollout_pred['state_pred']

    print("state_pred_rollout.shape", state_pred_rollout.shape)

    for i in range(N):
        # vis GT for now
        name = "GT_3D/%d" % (i)
        T_W_obj = slider_pose_from_observation(obs_rollout_gt[i])
        # print("T_W_obj", T_W_obj)

        # green
        color = np.array([0, 1, 0]) * get_color_intensity(i, N)
        meshcat_utils.visualize_points(vis=vis,
                                       name=name,
                                       pts=z_keypoints_obj,
                                       color=color,
                                       size=0.01,
                                       T=T_W_obj)

        # red
        color = np.array([0, 0, 1]) * get_color_intensity(i, N)
        state_pred = state_pred_rollout[:, i, :]
        pts_pred = keypoints_3D_from_dynamics_model_output(state_pred, K).squeeze()
        pts_pred = pts_pred.detach().cpu().numpy()
        name = "pred_3D/%d" % (i)
        meshcat_utils.visualize_points(vis=vis,
                                       name=name,
                                       pts=pts_pred,
                                       color=color,
                                       size=0.01,
                                       )

    # input("finished visualizing GT rollout\npress Enter to continue")
    index_dict = get_object_and_robot_state_indices(config)
    object_indices = index_dict['object_indices']

    # reset the environment and use the MPC controller to stabilize this
    # now setup the MPC to try to stabilize this . . . .
    reset_environment(env, initial_cond['q_pusher'], initial_cond['q_slider'])
    episode.clear()

    # add just some large number of these
    for i in range(n_history):
        action_zero = np.zeros(2)
        obs_tmp = env.get_observation()
        episode.add_observation_action(obs_tmp, action_zero)

    # input("press Enter to continue")

    # make a planner config
    planner_config = copy.copy(config)
    config_tmp = load_yaml(os.path.join(get_project_root(), 'experiments/drake_pusher_slider/eval_config.yaml'))
    planner_config['mpc'] = config_tmp['mpc']
    planner = None
    if PLANNER_TYPE == "random_shooting":
        planner = RandomShootingPlanner(planner_config)
    elif PLANNER_TYPE == "mppi":
        planner = PlannerMPPI(planner_config)
    else:
        raise ValueError("unknown planner type: %s" % (PLANNER_TYPE))

    mpc_out = None
    action_seq_mpc = None
    state_pred_mpc = None
    counter = -1
    while True:
        counter += 1
        print("\n\n-----Running MPC Optimization: Counter (%d)-------" % (counter))

        obs_cur = env.get_observation()
        episode.add_observation_only(obs_cur)

        if counter == 0 or REPLAN:
            print("replanning")
            ####### Run the MPC ##########

            # [1, state_dim]

            n_look_ahead = N - counter
            if USE_FIXED_MPC_HORIZON:
                n_look_ahead = MPC_HORIZON
            if n_look_ahead == 0:
                break

            # start_time = time.time()
            # idx of current observation
            idx = episode.get_latest_idx()
            mpc_start_time = time.time()
            mpc_input_data = mpc_input_builder.get_dynamics_model_input(idx, n_history=n_history)
            state_cur = mpc_input_data['states']
            action_his = mpc_input_data['actions']

            if mpc_out is not None:
                action_seq_rollout_init = mpc_out['action_seq'][1:]
            else:
                action_seq_rollout_init = None

            # run MPPI
            z_cur = None
            with torch.no_grad():
                z_cur = model.compute_z_state(state_cur.unsqueeze(0).cuda())['z'].squeeze(0)



            mpc_out = planner.trajectory_optimization(state_cur=z_cur,
                                                      action_his=action_his,
                                                      obs_goal=z_object_goal_np,
                                                      model_dy=model,
                                                      action_seq_rollout_init=action_seq_rollout_init,
                                                      n_look_ahead=n_look_ahead,
                                                      eval_indices=object_indices,
                                                      rollout_best_action_sequence=True,
                                                      verbose=True,
                                                      )

            print("MPC step took %.4f seconds" %(time.time() - mpc_start_time))
            action_seq_mpc = mpc_out['action_seq'].cpu().numpy()


        # Rollout with ground truth simulator dynamics
        action_seq_mpc = torch_utils.cast_to_numpy(mpc_out['action_seq'])
        env2.set_simulator_state_from_observation_dict(env2.get_mutable_context(), obs_cur)
        obs_mpc_gt = env_utils.rollout_action_sequence(env2, action_seq_mpc)['observations']
        state_pred_mpc = torch_utils.cast_to_numpy(mpc_out['state_pred'])

        vis['mpc_3D'].delete()
        vis['mpc_GT_3D'].delete()

        L = len(obs_mpc_gt)
        print("L", L)
        if L == 0:
            break
        for i in range(L):
            # red
            color = np.array([1, 0, 0]) * get_color_intensity(i, L)
            state_pred = state_pred_mpc[i, :]
            state_pred = np.expand_dims(state_pred, 0)  # may need to expand dims here
            pts_pred = keypoints_3D_from_dynamics_model_output(state_pred, K).squeeze()
            name = "mpc_3D/%d" % (i)
            meshcat_utils.visualize_points(vis=vis,
                                           name=name,
                                           pts=pts_pred,
                                           color=color,
                                           size=0.01,
                                           )

            # ground truth rollout of the MPC action_seq
            name = "mpc_GT_3D/%d" % (i)
            T_W_obj = slider_pose_from_observation(obs_mpc_gt[i])

            # green
            color = np.array([1, 1, 0]) * get_color_intensity(i, L)
            meshcat_utils.visualize_points(vis=vis,
                                           name=name,
                                           pts=z_keypoints_obj,
                                           color=color,
                                           size=0.01,
                                           T=T_W_obj)

        action_cur = action_seq_mpc[0]

        print("action_cur", action_cur)
        # print("action_GT", initial_cond['action'])
        input("press Enter to continue")

        # add observation actions to the episode
        obs_cur = env.get_observation()
        episode.replace_observation_action(obs_cur, action_cur)

        # step the simulator
        env.step(action_cur)

        # visualize current keypoint positions
        obs_cur = env.get_observation()
        T_W_obj = slider_pose_from_observation(obs_cur)

        # yellow
        color = np.array([1, 1, 0])
        meshcat_utils.visualize_points(vis=vis,
                                       name="keypoint_cur",
                                       pts=z_keypoints_obj,
                                       color=color,
                                       size=0.02,
                                       T=T_W_obj)

        action_seq_mpc = action_seq_mpc[1:]
        state_pred_mpc = state_pred_mpc[1:]

    obs_final = env.get_observation()

    pose_error = compute_pose_error(obs_rollout_gt[-1],
                                    obs_final)

    print("position_error: %.3f"  %(pose_error['position_error']))
    print("angle error degrees: %.3f" %(pose_error['angle_error_degrees']))
Пример #28
0
    def load_dataset(
        dataset_root,  # str: folder containing dataset
        load_image_data=True,
        descriptor_images_root=None,  # str: (optional) folder containing hdf5 files with descriptors
        descriptor_keypoints_root=None,
        max_num_episodes=None,  # int, max num episodes to load
        precomputed_data_root=None,
    ):
        """

        :param dataset_root: folder should contain
            - config.yaml
            - metadata.yaml
            - <episode_name.p>
            - <episode_name.h5>
        :type dataset_root:
        :return:
        :rtype:
        """

        if load_image_data:
            from key_dynam.dense_correspondence.dc_drake_sim_episode_reader import DCDrakeSimEpisodeReader

        metadata = load_yaml(DrakeSimEpisodeReader.metadata_file(dataset_root))
        multi_episode_dict = dict()
        episode_names = list(metadata['episodes'].keys())
        episode_names.sort()  # sort the keys

        num_episodes = len(episode_names)

        # optionally don't read all episodes
        if (max_num_episodes is not None) and (max_num_episodes > 0):
            # compute the number of episodes to read, in sorted order
            num_episodes = int(min(len(episode_names), max_num_episodes))

        for idx in range(num_episodes):
            episode_name = episode_names[idx]
            val = metadata['episodes'][episode_name]

            # load non image data
            non_image_data_file = os.path.join(dataset_root,
                                               val['non_image_data_file'])
            assert os.path.isfile(
                non_image_data_file), "File doesn't exist: %s" % (
                    non_image_data_file)
            non_image_data = load_pickle(non_image_data_file)

            dc_episode_reader = None
            if load_image_data:

                # load image data
                image_data_file = os.path.join(dataset_root,
                                               val['image_data_file'])
                assert os.path.isfile(
                    image_data_file), "File doesn't exist: %s" % (
                        image_data_file)

                descriptor_image_data_file = None
                if descriptor_images_root is not None:
                    descriptor_image_data_file = os.path.join(
                        descriptor_images_root, val['image_data_file'])

                    assert os.path.isfile(
                        descriptor_image_data_file
                    ), "File doesn't exist: %s" % (descriptor_image_data_file)

                descriptor_keypoints_data = None
                descriptor_keypoints_hdf5_file = None
                if descriptor_keypoints_root is not None:

                    # replace .h5 filename with .p for pickle file
                    descriptor_keypoints_data_file = val[
                        'image_data_file'].split(".")[0] + ".p"
                    descriptor_keypoints_data_file = os.path.join(
                        descriptor_keypoints_root,
                        descriptor_keypoints_data_file)

                    descriptor_keypoints_hdf5_file = os.path.join(
                        descriptor_keypoints_root, val['image_data_file'])

                    if os.path.isfile(descriptor_keypoints_data_file):
                        descriptor_keypoints_data = load_pickle(
                            descriptor_keypoints_data_file)
                    else:
                        assert os.path.isfile(
                            descriptor_keypoints_hdf5_file
                        ), "File doesn't exist: %s" % (
                            descriptor_keypoints_hdf5_file)

                ####
                precomputed_data = None
                precomputed_data_file = None
                if precomputed_data_root is not None:

                    # replace .h5 filename with .p for pickle file
                    precomputed_data_file = val['image_data_file'].split(
                        ".")[0] + ".p"
                    precomputed_data_file = os.path.join(
                        precomputed_data_root, precomputed_data_file)

                    if os.path.isfile(precomputed_data_file):
                        precomputed_data = load_pickle(precomputed_data_file)
                    else:
                        raise ValueError("file doesn't exist: %s" %
                                         (precomputed_data_file))

                dc_episode_reader = DCDrakeSimEpisodeReader(
                    non_image_data,
                    image_data_file,
                    descriptor_image_data_file=descriptor_image_data_file,
                    descriptor_keypoints_data=descriptor_keypoints_data,
                    descriptor_keypoints_data_file=
                    descriptor_keypoints_hdf5_file,
                    precomputed_data=precomputed_data,
                    precomputed_data_file=precomputed_data_file)

            episode_reader = DrakeSimEpisodeReader(
                non_image_data=non_image_data,
                episode_name=episode_name,
                dc_episode_reader=dc_episode_reader)

            multi_episode_dict[episode_name] = episode_reader

        return multi_episode_dict
Пример #29
0
def load_simple_config():
    config_file = os.path.join(get_project_root(), 'config/simple_config.yaml')
    config = load_yaml(config_file)
    return config
Пример #30
0
def run_gym_env():
    """
    Runs the gym env
    :return:
    :rtype:
    """
    DEBUG_PRINTS = True
    USE_PYGAME = True

    try:

        # setup pygame thing
        if USE_PYGAME:
            pygame.init()
            screen = pygame.display.set_mode(
                (640, 480))  # needed for grabbing focus
            clock = pygame.time.Clock()

        default_action = np.zeros(2)
        velocity = 0.2

        if USE_PYGAME:
            action = process_pygame_events()
        else:
            action = default_action

        config = load_yaml(
            os.path.join(get_project_root(),
                         'experiments/exp_20_mugs/config.yaml'))
        env = DrakeMugsEnv(config)
        env.reset()

        context = env.get_mutable_context()
        pos = np.array([0, 0, 0.1])
        # quat = transforms3d.euler.euler2quat(np.deg2rad(90), 0, 0)
        quat = np.array([1, 0, 0, 0])
        q = np.concatenate((quat, pos))
        env.set_object_position(context=context, q=q)
        # set the box pose
        # context = env.get_mutable_context()
        # pos = np.array([1.56907481e-04, 1.11390697e-06, 5.11972761e-02])
        # quat = np.array([ 7.13518047e-01, -6.69765583e-07, -7.00636851e-01, -6.82079212e-07])
        # q_slider = np.concatenate((quat, pos))
        # env.set_slider_position(context, q=q_slider)

        env.simulator.set_target_realtime_rate(1.0)

        # move box araound
        # context = env.get_mutable_context()
        # q_slider = [-0.05, 0, 0.03]

        num_model_instances = env.diagram_wrapper.mbp.num_model_instances()
        print("num_model_instances", num_model_instances)
        print("num_positions", env.diagram_wrapper.mbp.num_positions())

        label_db = env.diagram_wrapper.get_label_db()
        print("label db:", label_db.all())

        mask_db = env.diagram_wrapper.get_labels_to_mask()
        print("mask db:", mask_db.all())

        # context = env.get_mutable_context()
        #
        # # set the position of pusher
        # q_pusher = [0.2,0.2]
        # mbp = env.diagram_wrapper.mbp
        # mbp_context = env.diagram.GetMutableSubsystemContext(mbp, context)
        # mbp.SetPositions(mbp_context, env.diagram_wrapper.models['pusher'], q_pusher)

        camera_names = list(
            config['env']['rgbd_sensors']['sensor_list'].keys())
        camera_names.sort()

        image_vis = ImageVisualizer(len(camera_names), 1)

        print("running sim")

        while True:
            if USE_PYGAME:
                action = velocity * process_pygame_events()
            else:
                action = default_action

            # print("action:", action)
            obs, reward, done, info = env.step(action)

            # print("obs\n", obs)

            # visualize RGB images in matplotlib
            for idx, camera_name in enumerate(camera_names):
                rgb_image = obs['images'][camera_name]['rgb']
                image_vis.draw_image(idx, 0, rgb_image)

            image_vis.visualize_interactive()

            # # print unique depth values
            # depth_32F = obs['images']['camera_0']['depth_32F']
            # print("unique depth_32F vals", np.unique(depth_32F))
            # # print unique depth values
            # depth_16U = obs['images']['camera_0']['depth_16U']
            # print("unique depth_16U vals", np.unique(depth_16U))

        # build simulator
    except KeyboardInterrupt:
        pygame.quit()
        plt.close()