示例#1
0
def load_drake_sim_episodes_from_config(config,
                                        load_descriptor_images=True,
                                        load_descriptor_keypoints=True):
    dataset_root = config["dataset"]["dataset_dir"]
    if not os.path.isabs(dataset_root):
        dataset_root = os.path.join(get_data_root(), dataset_root)

    descriptor_images_root = None
    if 'descriptor_images_dir' in config['dataset'] and load_descriptor_images:
        descriptor_images_root = os.path.join(
            get_data_root(), config['dataset']['descriptor_images_dir'])

    descriptor_keypoints_root = None
    if 'descriptor_keypoints_dir' in config[
            'dataset'] and load_descriptor_keypoints:
        descriptor_keypoints_root = os.path.join(
            get_data_root(), config['dataset']['descriptor_keypoints_dir'])

    max_num_episodes = None
    if 'max_num_episodes' in config['dataset']:
        max_num_episodes = config['dataset']['max_num_episodes']

    multi_episode_dict = \
        DrakeSimEpisodeReader.load_dataset(dataset_root,
                                           descriptor_images_root=descriptor_images_root,
                                           descriptor_keypoints_root=descriptor_keypoints_root,
                                           max_num_episodes=max_num_episodes)

    return multi_episode_dict
示例#2
0
def get_precomputed_data_root(dataset_name):
    transporter_model_name = None
    precomputed_data_root = None

    if dataset_name == "2020-04-20-14-58-21-418302_T_aug_random_velocity_1000":
        transporter_model_name = "transporter_2020-05-06-19-11-54-206998"
        precomputed_data_root = os.path.join(
            get_data_root(), "dev/experiments/14/trained_models/perception",
            transporter_model_name,
            "precomputed_vision_data/transporter_keypoints",
            "dataset_%s" % (dataset_name))
    elif dataset_name == "2020-04-23-20-45-12-697915_T_aug_random_velocity_1000_angled_cam":
        transporter_model_name = "transporter_2020-05-07-22-26-56-654913"
        precomputed_data_root = os.path.join(
            get_data_root(), "dev/experiments/15/trained_models/perception",
            transporter_model_name,
            "precomputed_vision_data/transporter_keypoints",
            "dataset_%s" % (dataset_name))
    elif dataset_name == "dps_box_on_side_600":
        transporter_model_name = "transporter_camera_angled_2020-05-13-23-38-18-580817"
        precomputed_data_root = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_box_on_side/dataset_dps_box_on_side_600/trained_models/perception/transporter_camera_angled_2020-05-13-23-38-18-580817/precomputed_vision_data/transporter_keypoints/dataset_dps_box_on_side_600"
    else:
        raise ValueError("unknown dataset:", dataset_name)

    return {
        'transporter_model_name': transporter_model_name,
        'precomputed_data_root': precomputed_data_root
    }
示例#3
0
def load_model_state_dict(model_folder=None):

    models_root = os.path.join(get_data_root(),
                               "dev/experiments/drake_pusher_slider_v2")

    # model_name = "dataset_2020-04-23-20-45-12-697915_T_aug_random_velocity_1000_angled_cam/trained_models/dynamics/DD_3D/2020-05-12-12-03-05-252242_DD_3D_spatial_z_n_his_2"

    model_name = "dataset_2020-04-23-20-45-12-697915_T_aug_random_velocity_1000_angled_cam/trained_models/dynamics/DD_3D/2020-05-11-19-44-35-085478_DD_3D_n_his_2"

    model_folder = os.path.join(models_root, model_name)

    model_dy = model_builder.load_dynamics_model_from_folder(
        model_folder)['model_dy']

    # load dense descriptor model
    metadata = load_pickle(os.path.join(model_folder, 'metadata.p'))
    model_dd_file = metadata['model_file']
    model_dd = torch.load(model_dd_file)
    model_dd = model_dd.eval()
    model_dd = model_dd.cuda()

    spatial_descriptor_data = load_pickle(
        os.path.join(model_folder, 'spatial_descriptors.p'))

    return {
        'model_dy': model_dy,
        'model_dd': model_dd,
        'spatial_descriptor_data': spatial_descriptor_data,
        'metadata': metadata
    }
示例#4
0
    def save_data(
        self,
        save_dir=None,
    ):
        """
        Saves data from the

        - PlanContainer
        - OnlineEpisode
        """

        if save_dir is None:
            save_dir = os.path.join(
                utils.get_data_root(),
                'hardware_experiments/closed_loop_rollouts/sandbox',
                utils.get_current_YYYY_MM_DD_hh_mm_ss_ms())

        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        print("saving MPC rollout data at: %s" % (save_dir))
        save_data = {
            'episode': self._state_dict['episode'].get_save_data(),
            'plan': self._state_dict['plan'].get_save_data(),
        }

        save_file = os.path.join(save_dir, "data.p")
        utils.save_pickle(save_data, save_file)
        print("done saving data")
def load_model_state_dict(model_folder=None):
    # load dynamics model
    #
    # model_folder = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_box_on_side/dataset_dps_box_on_side_600/trained_models/dynamics/DD_3D/2020-05-15-04-40-10-770703_DD_3D_all_z_n_his_2"

    models_root = os.path.join(
        get_data_root(),
        "dev/experiments/drake_pusher_slider_box_on_side/dataset_dps_box_on_side_600/trained_models/dynamics"
    )
    # model_name = "DD_3D/2020-05-15-00-54-26-961701_DD_3D_n_his_2"
    # model_name = "DD_3D/2020-05-15-04-40-10-770703_DD_3D_all_z_n_his_2"
    model_name = "DD_3D/2020-05-15-02-07-44-204479_DD_3D_spatial_z_n_his_2"
    model_folder = os.path.join(models_root, model_name)

    model_dy = model_builder.load_dynamics_model_from_folder(
        model_folder)['model_dy']

    # load dense descriptor model
    metadata = load_pickle(os.path.join(model_folder, 'metadata.p'))
    model_dd_file = metadata['model_file']
    model_dd = torch.load(model_dd_file)
    model_dd = model_dd.eval()
    model_dd = model_dd.cuda()

    spatial_descriptor_data = load_pickle(
        os.path.join(model_folder, 'spatial_descriptors.p'))

    return {
        'model_dy': model_dy,
        'model_dd': model_dd,
        'spatial_descriptor_data': spatial_descriptor_data,
        'metadata': metadata
    }
def get_DD_model_file():
    model_name = "2020-04-07-14-31-35-804270_T_aug_dataset"
    model_file = os.path.join(
        get_data_root(),
        "dev/experiments/09/trained_models/dense_descriptors/%s/net_best_dy_model.pth"
        % (model_name))

    return model_name, model_file
示例#7
0
def sample_random_mug():
    sdf_dir = os.path.join(get_data_root(), "stable/sim_assets/anzu_mugs")
    # sdf_file = random.choice(SDFHelper.get_sdf_list(sdf_dir))

    mug_list = load_yaml(
        os.path.join(get_project_root(), 'experiments/exp_20_mugs/mugs.yaml'))
    sdf_file = random.choice(mug_list['corelle_mug-small'])
    sdf_file = os.path.join(sdf_dir, sdf_file)
    return sdf_file
def load_episodes():
    # DATASET_NAME = "2020-03-25-19-57-26-556093_constant_velocity_500"
    # DATASET_NAME = "2020-04-15-21-15-56-602712_T_aug_random_velocity_500"
    DATASET_NAME = "2020-04-20-14-58-21-418302_T_aug_random_velocity_1000"
    dataset_root = os.path.join(get_data_root(), "dev/experiments/09/data",
                                DATASET_NAME)

    max_num_episodes = None
    multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset(
        dataset_root, max_num_episodes=max_num_episodes)

    return DATASET_NAME, multi_episode_dict
def get_DD_model_file():

    # pth_file = "net_dy_epoch_7_iter_0_model.pth"
    pth_file = "net_best_model.pth"
    dataset_name = "box_push_1000_top_down"
    model_name = "data_aug_2020-06-14-21-47-52-389769"

    model_file = os.path.join(
        get_data_root(),
        "dev/experiments/drake_pusher_slider_v2/dataset_%s/trained_models/perception/dense_descriptors/%s/%s"
        % (dataset_name, model_name, pth_file))

    return model_file, dataset_name
示例#10
0
def get_precomputed_data_root(dataset_name):
    transporter_model_name = None
    precomputed_data_root = None

    if dataset_name == "2020-04-20-14-58-21-418302_T_aug_random_velocity_1000":
        transporter_model_name = "transporter_2020-05-06-19-11-54-206998"
        precomputed_data_root = os.path.join(
            get_data_root(), "dev/experiments/14/trained_models/perception",
            transporter_model_name,
            "precomputed_vision_data/transporter_keypoints",
            "dataset_%s" % (dataset_name))
    elif dataset_name == "2020-04-23-20-45-12-697915_T_aug_random_velocity_1000_angled_cam":
        transporter_model_name = "transporter_2020-05-07-22-26-56-654913"
        precomputed_data_root = os.path.join(
            get_data_root(), "dev/experiments/15/trained_models/perception",
            transporter_model_name,
            "precomputed_vision_data/transporter_keypoints",
            "dataset_%s" % (dataset_name))
    elif dataset_name == "dps_box_on_side_600":
        transporter_model_name = "transporter_camera_angled_2020-05-13-23-38-18-580817"
        precomputed_data_root = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_box_on_side/dataset_dps_box_on_side_600/trained_models/perception/transporter_camera_angled_2020-05-13-23-38-18-580817/precomputed_vision_data/transporter_keypoints/dataset_dps_box_on_side_600"
    elif dataset_name == "correlle_mug-small_many_colors_600":
        transporter_model_name = "transporter_2020-06-10-22-59-54-896478"
        precomputed_data_root = "/home/manuelli/data/key_dynam/dev/experiments/20/dataset_correlle_mug-small_many_colors_600/trained_models/perception/transporter/transporter_2020-06-10-22-59-54-896478/precomputed_vision_data/transporter_keypoints/dataset_correlle_mug-small_many_colors_600"
    elif dataset_name == "box_push_1000_top_down":
        transporter_model_name = "transporter_standard_2020-06-14-22-29-31-256422"
        precomputed_data_root = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_v2/dataset_box_push_1000_top_down/trained_models/perception/transporter/transporter_standard_2020-06-14-22-29-31-256422/precomputed_vision_data/transporter_keypoints/dataset_box_push_1000_top_down"
    elif dataset_name == "box_push_1000_angled":
        transporter_model_name = "transporter_standard_2020-06-15-18-35-52-478769"
        precomputed_data_root = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_v2/dataset_box_push_1000_angled/trained_models/perception/transporter/transporter_standard_2020-06-15-18-35-52-478769/precomputed_vision_data/transporter_keypoints/dataset_box_push_1000_angled"
    else:
        raise ValueError("unknown dataset:", dataset_name)

    return {
        'transporter_model_name': transporter_model_name,
        'precomputed_data_root': precomputed_data_root
    }
示例#11
0
def load_drake_pusher_slider_episodes(**kwargs):
    """
    Helper function for loading drake pusher slider dataset
    :param descriptor_images_root:
    :type descriptor_images_root:
    :return:
    :rtype:
    """
    # DATASET_NAME = "top_down_rotated"
    DATASET_NAME = "2019-12-05-15-58-48-462834_top_down_rotated_250"
    dataset_root = os.path.join(get_data_root(), "dev/experiments/05/data",
                                DATASET_NAME)

    multi_episode_dict = DrakeSimEpisodeReader.load_dataset(
        dataset_root, **kwargs)

    return multi_episode_dict
def get_DD_model_file():

    # pth_file = "net_dy_epoch_7_iter_0_model.pth"
    pth_file = "net_best_model.pth"
    dataset_name = "mugs_random_colors_1000"
    model_name = "standard_2020-06-01-15-59-32-621007"



    pth_file = "net_best_model.pth"
    model_name = "data_aug_2020-06-01-19-26-37-655086"
    dataset_name = "mugs_correlle_mug-small_1000"


    pth_file = "net_best_model.pth"
    dataset_name = "correlle_mug-small_single_color_600"
    model_name = "data_aug_2020-06-01-23-14-05-694265"

    pth_file = "net_best_model.pth"
    dataset_name = "correlle_mug-small_single_color_600"
    model_name = "standard_2020-06-02-14-31-28-600354"

    pth_file = "net_best_model.pth"
    dataset_name = "single_corelle_mug_600"
    model_name = "data_aug_2020-06-02-20-56-54-192430"

    pth_file = "net_best_model.pth"
    dataset_name = "correlle_mug-small_single_color_600"
    model_name = "data_aug_2020-06-03-00-27-50-738970"

    pth_file = "net_best_model.pth"
    dataset_name = "correlle_mug-small_many_colors_600"
    model_name = "data_aug_2020-06-03-16-41-29-740641"


    model_file = os.path.join(get_data_root(),
                              "dev/experiments/20/dataset_%s/trained_models/perception/dense_descriptors/%s/%s" % (dataset_name, model_name, pth_file))

    return model_file, dataset_name
示例#13
0
def main():
    start_time = time.time()
    config = load_yaml(
        os.path.join(get_project_root(),
                     'experiments/exp_20_mugs/config.yaml'))

    config['dataset']['num_episodes'] = 10

    set_seed(500)  # just randomly chosen

    DATASET_NAME = "mugs_%d" % (config['dataset']['num_episodes'])
    OUTPUT_DIR = os.path.join(get_data_root(), 'sandbox', DATASET_NAME)
    print("OUTPUT_DIR:", OUTPUT_DIR)

    collect_episodes(config,
                     output_dir=OUTPUT_DIR,
                     visualize=False,
                     debug=False)

    elapsed = time.time() - start_time
    print("Generating and saving dataset to disk took %d seconds" %
          (int(elapsed)))
示例#14
0
def main():
    start_time = time.time()
    config = load_yaml(
        os.path.join(get_project_root(),
                     'experiments/exp_18_box_on_side/config.yaml'))
    # config['dataset']['num_episodes'] = 500  # half for train, half for valid
    config['dataset']['num_episodes'] = 600  # half for train, half for valid

    set_seed(500)  # just randomly chosen

    DATASET_NAME = "dps_box_on_side_%d" % (config['dataset']['num_episodes'])
    OUTPUT_DIR = os.path.join(get_data_root(), "dev/experiments/18/data",
                              DATASET_NAME)
    print("OUTPUT_DIR:", OUTPUT_DIR)

    collect_episodes(config,
                     output_dir=OUTPUT_DIR,
                     visualize=False,
                     debug=False)

    elapsed = time.time() - start_time
    print("Generating and saving dataset to disk took %d seconds" %
          (int(elapsed)))
示例#15
0
    def add_object_model(self):
        # add mug
        # sdf_path = "anzu_mugs/big_mug-corelle_mug-6.sdf"
        # sdf_path = "anzu_mugs/big_mug-small_mug-0.sdf"
        # sdf_path = "anzu_mugs/corelle_mug-small_mug-8.sdf"
        # sdf_path = "manual_babson_11oz_mug/manual_babson_11oz_mug.sdf"
        # sdf_path = os.path.join(LARGE_SIM_ASSETS_ROOT, sdf_path)
        #

        self._object_name = "mug"

        sdf_file_fullpath = os.path.join(get_data_root(),
                                         self.config['env']['model']['sdf'])
        model_color = self.config['env']['model']['color']

        # output_dir = "/home/manuelli/data/key_dynam/sandbox/sdf_helper"
        output_dir = None
        sdf_data = SDFHelper.create_sdf_specific_color(
            sdf_file_fullpath=sdf_file_fullpath,
            color=model_color,
            output_dir=output_dir)

        self.diagram_wrapper.add_model_from_sdf(self._object_name,
                                                sdf_data['sdf_file'])
示例#16
0
def get_experiment_save_root(dataset_name):
    return os.path.join(get_data_root(),
                        'dev/experiments/22/dataset_%s' % (dataset_name))
示例#17
0
import os
from key_dynam.utils.utils import get_project_root, get_data_root

SIM_ASSETS_ROOT = os.path.join(get_project_root(), 'sim_assets')
block_push = os.path.join(SIM_ASSETS_ROOT, 'block_push.urdf')
extra_heavy_duty_table = os.path.join(SIM_ASSETS_ROOT, "extra_heavy_duty_table_surface_only_collision.sdf")
xy_slide = os.path.join(SIM_ASSETS_ROOT, "xy_slide.urdf")

ycb_model_paths = dict({
    'cracker_box': os.path.join(SIM_ASSETS_ROOT, "cracker_box/003_cracker_box.sdf"),
    'sugar_box': os.path.join(SIM_ASSETS_ROOT, "sugar_box/004_sugar_box.sdf"),
    'tomato_soup_can': os.path.join(SIM_ASSETS_ROOT, "tomato_soup_can/005_tomato_soup_can.sdf"),
    'mustard_bottle': os.path.join(SIM_ASSETS_ROOT, "mustard_bottle/006_mustard_bottle.sdf"),
    'gelatin_box': os.path.join(SIM_ASSETS_ROOT, "gelatin_box/009_gelatin_box.sdf"),
    'potted_meat_can': os.path.join(SIM_ASSETS_ROOT, "potted_meat_can/010_potted_meat_can.sdf")
})

ycb_model_baselink_names = dict({
    'cracker_box': 'base_link_cracker',
    'sugar_box': 'base_link_sugar',
    'tomato_soup_can': 'base_link_soup',
    'mustard_bottle': 'base_link_mustard',
    'gelatin_box': 'base_link_gelatin',
    'potted_meat_can': 'base_link_meat'
})


LARGE_SIM_ASSETS_ROOT = os.path.join(get_data_root(), 'stable/sim_assets')
示例#18
0
def train_dynamics(config,
                   train_dir, # str: directory to save output
                   multi_episode_dict, # multi_episode_dict
                   ):

    use_precomputed_keypoints = config['dataset']['visual_observation']['enabled'] and config['dataset']['visual_observation']['descriptor_keypoints']

    # set random seed for reproduction
    set_seed(config['train']['random_seed'])

    st_epoch = config['train']['resume_epoch'] if config['train']['resume_epoch'] > 0 else 0
    tee = Tee(os.path.join(train_dir, 'train_st_epoch_%d.log' % st_epoch), 'w')

    tensorboard_dir = os.path.join(train_dir, "tensorboard")
    if not os.path.exists(tensorboard_dir):
        os.makedirs(tensorboard_dir)

    writer = SummaryWriter(log_dir=tensorboard_dir)

    # save the config
    save_yaml(config, os.path.join(train_dir, "config.yaml"))


    action_function = ActionFunctionFactory.function_from_config(config)
    observation_function = ObservationFunctionFactory.function_from_config(config)

    datasets = {}
    dataloaders = {}
    data_n_batches = {}
    for phase in ['train', 'valid']:
        print("Loading data for %s" % phase)
        datasets[phase] = MultiEpisodeDataset(config,
                                              action_function=action_function,
                                              observation_function=observation_function,
                                              episodes=multi_episode_dict,
                                              phase=phase)

        dataloaders[phase] = DataLoader(
            datasets[phase], batch_size=config['train']['batch_size'],
            shuffle=True if phase == 'train' else False,
            num_workers=config['train']['num_workers'], drop_last=True)

        data_n_batches[phase] = len(dataloaders[phase])

    use_gpu = torch.cuda.is_available()

    # compute normalization parameters if not starting from pre-trained network . . .


    '''
    define model for dynamics prediction
    '''

    model_dy = build_visual_dynamics_model(config)
    K = config['vision_net']['num_ref_descriptors']

    print("model_dy.vision_net._reference_descriptors.shape", model_dy.vision_net._ref_descriptors.shape)
    print("model_dy.vision_net.descriptor_dim", model_dy.vision_net.descriptor_dim)
    print("model_dy #params: %d" % count_trainable_parameters(model_dy))

    camera_name = config['vision_net']['camera_name']
    W = config['env']['rgbd_sensors']['sensor_list'][camera_name]['width']
    H = config['env']['rgbd_sensors']['sensor_list'][camera_name]['height']
    diag = np.sqrt(W**2 + H**2) # use this to scale the loss

    # sample reference descriptors unless using precomputed keypoints
    if not use_precomputed_keypoints:
        # sample reference descriptors
        episode_names = list(datasets["train"].episode_dict.keys())
        episode_names.sort()
        episode_name = episode_names[0]
        episode = datasets["train"].episode_dict[episode_name]
        episode_idx = 0
        camera_name = config["vision_net"]["camera_name"]
        image_data = episode.get_image_data(camera_name, episode_idx)
        des_img = torch.Tensor(image_data['descriptor'])
        mask_img = torch.Tensor(image_data['mask'])
        ref_descriptor_dict = sample_descriptors(des_img,
                                                 mask_img,
                                                 config['vision_net']['num_ref_descriptors'])



        model_dy.vision_net._ref_descriptors.data = ref_descriptor_dict['descriptors']
        model_dy.vision_net.reference_image = image_data['rgb']
        model_dy.vision_net.reference_indices = ref_descriptor_dict['indices']
    else:
        metadata_file = os.path.join(get_data_root(), config['dataset']['descriptor_keypoints_dir'], 'metadata.p')
        descriptor_metadata = load_pickle(metadata_file)

        # [32, 2]
        ref_descriptors = torch.Tensor(descriptor_metadata['ref_descriptors'])

        # [K, 2]
        ref_descriptors = ref_descriptors[:K]
        model_dy.vision_net._ref_descriptors.data = ref_descriptors
        model_dy.vision_net._ref_descriptors_metadata = descriptor_metadata

        # this is just a sanity check
        assert model_dy.vision_net.num_ref_descriptors == K

    print("reference_descriptors", model_dy.vision_net._ref_descriptors)

    # criterion
    criterionMSE = nn.MSELoss()
    l1Loss = nn.L1Loss()

    # optimizer
    params = model_dy.parameters()
    lr = float(config['train']['lr'])
    optimizer = optim.Adam(params, lr=lr, betas=(config['train']['adam_beta1'], 0.999))

    # setup scheduler
    sc = config['train']['lr_scheduler']
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode='min',
                                  factor=sc['factor'],
                                  patience=sc['patience'],
                                  threshold_mode=sc['threshold_mode'],
                                  cooldown= sc['cooldown'],
                                  verbose=True)

    if use_gpu:
        print("using gpu")
        model_dy = model_dy.cuda()

    print("model_dy.vision_net._ref_descriptors.device", model_dy.vision_net._ref_descriptors.device)
    print("model_dy.vision_net #params: %d" %(count_trainable_parameters(model_dy.vision_net)))


    best_valid_loss = np.inf
    global_iteration = 0
    epoch_counter_external = 0

    try:
        for epoch in range(st_epoch, config['train']['n_epoch']):
            phases = ['train', 'valid']
            epoch_counter_external = epoch

            writer.add_scalar("Training Params/epoch", epoch, global_iteration)
            for phase in phases:
                model_dy.train(phase == 'train')

                meter_loss_rmse = AverageMeter()
                step_duration_meter = AverageMeter()


                # bar = ProgressBar(max_value=data_n_batches[phase])
                loader = dataloaders[phase]

                for i, data in enumerate(loader):

                    step_start_time = time.time()

                    global_iteration += 1

                    with torch.set_grad_enabled(phase == 'train'):
                        n_his, n_roll = config['train']['n_history'], config['train']['n_rollout']
                        n_samples = n_his + n_roll

                        if DEBUG:
                            print("global iteration: %d" %(global_iteration))


                        # visual_observations = data['visual_observations']
                        visual_observations_list = data['visual_observations_list']
                        observations = data['observations']
                        actions = data['actions']

                        if use_gpu:
                            observations = observations.cuda()
                            actions = actions.cuda()

                        # states, actions = data
                        assert actions.size(1) == n_samples

                        B = actions.size(0)
                        loss_mse = 0.


                        # compute the output of the visual model for all timesteps
                        visual_model_output_list = []
                        for visual_obs in visual_observations_list:
                            # visual_obs is a dict containing observation for a single
                            # time step (of course across a batch however)
                            # visual_obs[<camera_name>]['rgb_tensor'] has shape [B, 3, H, W]

                            # probably need to cast input to cuda
                            dynamics_net_input = None
                            if use_precomputed_keypoints:
                                # note precomputed descriptors stored on disk are of size
                                # K = 32. We need to trim it down to the appropriate size
                                # [B, K_disk, 2] where K_disk is num keypoints on disk
                                keypoints = visual_obs[camera_name]['descriptor_keypoints']


                                # [B, 32, 2] where K is num keypoints
                                keypoints = keypoints[:,:K]

                                if DEBUG:
                                    print("keypoints.shape", keypoints.shape)

                                dynamics_net_input = keypoints.flatten(start_dim=1)
                            else:
                                out_dict = model_dy.vision_net.forward(visual_obs)

                                # [B, vision_model_out_dim]
                                dynamics_net_input = out_dict['dynamics_net_input']

                            visual_model_output_list.append(dynamics_net_input)

                        # concatenate this into a tensor
                        # [B, n_samples, vision_model_out_dim]
                        visual_model_output = torch.stack(visual_model_output_list, dim=1)

                        # cast this to float so it can be concatenated below
                        visual_model_output = visual_model_output.type_as(observations)

                        if DEBUG:
                            print('visual_model_output.shape', visual_model_output.shape)
                            print("observations.shape", observations.shape)
                            print("actions.shape", actions.shape)

                        # states is gotten by concatenating visual_observations and observations
                        # [B, n_samples, vision_model_out_dim + obs_dim]
                        states = torch.cat((visual_model_output, observations), dim=-1)

                        # state_cur: B x n_his x state_dim
                        state_cur = states[:, :n_his]

                        if DEBUG:
                            print("states.shape", states.shape)

                        for j in range(n_roll):

                            if DEBUG:
                                print("n_roll j: %d" %(j))

                            state_des = states[:, n_his + j]

                            # action_cur: B x n_his x action_dim
                            action_cur = actions[:, j : j + n_his] if actions is not None else None

                            # state_pred: B x state_dim
                            # state_pred: B x state_dim
                            input = {'observation': state_cur,
                                     'action': action_cur,
                                     }

                            if DEBUG:
                                print("state_cur.shape", state_cur.shape)
                                print("action_cur.shape", action_cur.shape)

                            state_pred = model_dy.dynamics_net(input)

                            # normalize by diag to ensure the loss is in [0,1] range
                            loss_mse_cur = criterionMSE(state_pred/diag, state_des/diag)
                            loss_mse += loss_mse_cur / n_roll

                            # l1Loss
                            loss_l1 = l1Loss(state_pred, state_des)

                            # update state_cur
                            # state_pred.unsqueeze(1): B x 1 x state_dim
                            # state_cur: B x n_his x state_dim
                            state_cur = torch.cat([state_cur[:, 1:], state_pred.unsqueeze(1)], 1)

                            meter_loss_rmse.update(np.sqrt(loss_mse.item()), B)

                    step_duration_meter.update(time.time() - step_start_time)
                    if phase == 'train':
                        optimizer.zero_grad()
                        loss_mse.backward()
                        optimizer.step()

                    if (i % config['train']['log_per_iter'] == 0) or (global_iteration % config['train']['log_per_iter'] == 0):
                        log = '%s [%d/%d][%d/%d] LR: %.6f' % (
                            phase, epoch, config['train']['n_epoch'], i, data_n_batches[phase],
                            get_lr(optimizer))
                        log += ', rmse: %.6f (%.6f)' % (
                            np.sqrt(loss_mse.item()), meter_loss_rmse.avg)

                        log += ', step time %.6f' %(step_duration_meter.avg)
                        step_duration_meter.reset()


                        print(log)

                        # log data to tensorboard
                        # only do it once we have reached 100 iterations
                        if global_iteration > 100:
                            writer.add_scalar("Params/learning rate", get_lr(optimizer), global_iteration)
                            writer.add_scalar("Loss_MSE/%s" %(phase), loss_mse.item(), global_iteration)
                            writer.add_scalar("L1/%s" %(phase), loss_l1.item(), global_iteration)
                            writer.add_scalar("L1_fraction/%s" %(phase), loss_l1.item()/diag, global_iteration)
                            writer.add_scalar("RMSE average loss/%s" %(phase), meter_loss_rmse.avg, global_iteration)

                    if phase == 'train' and i % config['train']['ckp_per_iter'] == 0:
                        save_model(model_dy, '%s/net_dy_epoch_%d_iter_%d' % (train_dir, epoch, i))



                log = '%s [%d/%d] Loss: %.6f, Best valid: %.6f' % (
                    phase, epoch, config['train']['n_epoch'], meter_loss_rmse.avg, best_valid_loss)
                print(log)

                if phase == 'valid':
                    if config['train']['lr_scheduler']['enabled']:
                        scheduler.step(meter_loss_rmse.avg)

                    # print("\nPhase == valid")
                    # print("meter_loss_rmse.avg", meter_loss_rmse.avg)
                    # print("best_valid_loss", best_valid_loss)
                    if meter_loss_rmse.avg < best_valid_loss:
                        best_valid_loss = meter_loss_rmse.avg
                        save_model(model_dy, '%s/net_best_dy' % (train_dir))

                writer.flush() # flush SummaryWriter events to disk

    except KeyboardInterrupt:
        # save network if we have a keyboard interrupt
        save_model(model_dy, '%s/net_dy_epoch_%d_keyboard_interrupt' % (train_dir, epoch_counter_external))
        writer.flush() # flush SummaryWriter events to disk
示例#19
0
def load_model_and_data(
    K_matrix=None,
    T_world_camera=None,
):

    dataset_name = "push_box_hardware"

    model_name = "DD_3D/2020-07-02-17-59-21-362337_DD_3D_n_his_2_T_aug"
    train_dir = os.path.join(
        get_data_root(),
        "dev/experiments/22/dataset_push_box_hardware/trained_models/dynamics")
    # train_dir = "/home/manuelli/data/key_dynam/dev/experiments/22/dataset_push_box_hardware/trained_models/dynamics"

    train_dir = os.path.join(train_dir, model_name)
    ckpt_file = os.path.join(train_dir, "net_best_dy_state_dict.pth")

    train_config = load_yaml(os.path.join(train_dir, 'config.yaml'))
    state_dict = torch.load(ckpt_file)

    # build dynamics model
    model_dy = build_dynamics_model(train_config)
    # print("state_dict.keys()", state_dict.keys())
    model_dy.load_state_dict(state_dict)
    model_dy = model_dy.eval()
    model_dy = model_dy.cuda()

    # load the dataset
    dataset_paths = get_dataset_paths(dataset_name)
    dataset_root = dataset_paths['dataset_root']
    episodes_config = dataset_paths['episodes_config']

    spatial_descriptor_data = load_pickle(
        os.path.join(train_dir, 'spatial_descriptors.p'))
    metadata = load_pickle(os.path.join(train_dir, 'metadata.p'))

    ref_descriptors = spatial_descriptor_data['spatial_descriptors']
    ref_descriptors = torch_utils.cast_to_torch(ref_descriptors).cuda()

    # dense descriptor model
    model_dd_file = metadata['model_file']
    model_dd = torch.load(model_dd_file)
    model_dd = model_dd.eval()
    model_dd = model_dd.cuda()

    camera_name = train_config['dataset']['visual_observation_function'][
        'camera_name']

    camera_info = None
    if (T_world_camera is not None) and (K_matrix is not None):
        camera_info = {
            "K": K_matrix,
            'T_world_camera': T_world_camera,
        }
    else:
        camera_info = get_spartan_camera_info(camera_name)

    camera_info['camera_name'] = camera_name
    visual_observation_function = \
        VisualObservationFunctionFactory.descriptor_keypoints_3D(config=train_config,
                                                                 camera_name=camera_name,
                                                                 model_dd=model_dd,
                                                                 ref_descriptors=ref_descriptors,
                                                                 K_matrix=camera_info['K'],
                                                                 T_world_camera=camera_info['T_world_camera'],
                                                                 )

    action_function = ActionFunctionFactory.function_from_config(train_config)
    observation_function = ObservationFunctionFactory.function_from_config(
        train_config)

    #### PLANNER #######
    planner = None
    # make a planner config
    planner_config = copy.copy(train_config)
    config_tmp = load_yaml(
        os.path.join(get_project_root(),
                     'experiments/exp_22_push_box_hardware/config_DD_3D.yaml'))
    planner_config['mpc'] = config_tmp['mpc']
    if PLANNER_TYPE == "random_shooting":
        planner = RandomShootingPlanner(planner_config)
    elif PLANNER_TYPE == "mppi":
        planner = PlannerMPPI(planner_config)
    else:
        raise ValueError("unknown planner type: %s" % (PLANNER_TYPE))

    return {
        "model_dy": model_dy,
        'config': train_config,
        'spatial_descriptor_data': spatial_descriptor_data,
        'action_function': action_function,
        'observation_function': observation_function,
        'visual_observation_function': visual_observation_function,
        'planner': planner,
        'camera_info': camera_info,
    }
def main():
    dataset_name, multi_episode_dict = load_episodes()

    ## Load Model
    model_name, model_file = get_DD_model_file()
    model = torch.load(model_file)
    model = model.cuda()
    model = model.eval()

    # make this unique
    output_dir = os.path.join(get_data_root(),
                              "dev/experiments/09/precomputed_vision_data",
                              "dataset_%s" % (dataset_name),
                              "model_name_%s" % (model_name),
                              get_current_YYYY_MM_DD_hh_mm_ss_ms())

    camera_name = "camera_1_top_down"
    episode_name = "2020-05-13-21-55-01-487901_idx_33"
    episode_idx = 22

    # compute descriptor confidence scores
    if True:
        print("\n\n---------Computing Descriptor Confidence Scores-----------")
        metadata_file = os.path.join(output_dir, 'metadata.p')
        if os.path.isfile(metadata_file):
            answer = input(
                "metadata.p file already exists, do you want to overwrite it? y/n"
            )

            if answer == "y":
                os.rmdir(output_dir)
                print("removing existing file and continuing")

            else:
                print("aborting")
                quit()

        compute_descriptor_confidences(
            multi_episode_dict,
            model,
            output_dir,
            batch_size=10,
            num_workers=20,
            model_file=model_file,
            camera_name=camera_name,
            num_ref_descriptors=50,
            num_batches=10,
            episode_name_arg=episode_name,
            episode_idx=episode_idx,
        )

    if True:
        metadata_file = os.path.join(output_dir, 'metadata.p')
        metadata = load_pickle(metadata_file)

        # metadata_file = "/media/hdd/data/key_dynam/dev/experiments/09/precomputed_vision_data/dataset_2020-03-25-19-57-26-556093_constant_velocity_500/model_name_2020-04-07-14-31-35-804270_T_aug_dataset/2020-04-09-20-51-50-624799/metadata.p"
        # metadata = load_pickle(metadata_file)

        print("\n\n---------Precomputing Descriptor Keypoints-----------")
        descriptor_keypoints_output_dir = os.path.join(output_dir,
                                                       "descriptor_keypoints")
        precompute_descriptor_keypoints(
            multi_episode_dict,
            model,
            descriptor_keypoints_output_dir,
            ref_descriptors_metadata=metadata,
            batch_size=10,
            num_workers=20,
        )

    if True:
        confidence_score_data_file = os.path.join(output_dir, 'data.p')
        confidence_score_data = load_pickle(confidence_score_data_file)
        print(
            "\n\n---------Selecting Spatially Separated Keypoints-----------")
        score_and_select_spatially_separated_keypoints(
            metadata,
            confidence_score_data=confidence_score_data,
            K=5,
            position_diff_threshold=25,
            output_dir=output_dir,
        )

    print("Data saved at: ", output_dir)
    print("Finished Normally")
示例#21
0
config['dataset']['set_epoch_size_to_num_images'] = False
config['dataset']['epoch_size'] = {'train': 6000, 'valid': 300}

# camera_name = "camera_angled"
camera_name = "camera_1_top_down"

config['perception']['camera_name'] = camera_name

config['train'] = config['train_transporter']
config['train']['train_valid_ratio'] = 0.83

print(config)

# prepare folders
data_dir = os.path.join(get_data_root(), 'dev/experiments/18/data',
                        dataset_name)

train_dir = os.path.join(
    get_data_root(), 'dev/experiments/18/trained_models/perception',
    "transporter_%s_%s" % (camera_name, get_current_YYYY_MM_DD_hh_mm_ss_ms()))
ckp_dir = os.path.join(
    train_dir, 'train_nKp%d_invStd%.1f' %
    (config['perception']['n_kp'], config['perception']['inv_std']))

start_time = time.time()

multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset(
    dataset_root=data_dir)
print("loading dataset took %d seconds", time.time() - start_time)
示例#22
0
from key_dynam.utils.utils import get_project_root, save_yaml, load_yaml, get_current_YYYY_MM_DD_hh_mm_ss_ms, get_data_root
from key_dynam.dense_correspondence.dc_drake_sim_episode_reader import DCDrakeSimEpisodeReader

import dense_correspondence_manipulation.utils.utils as pdc_utils
from dense_correspondence.training.train_integral_heatmap_3d import train_dense_descriptors
from dense_correspondence_manipulation.utils import dev_utils
from dense_correspondence_manipulation.utils.utils import set_cuda_visible_devices

set_cuda_visible_devices([0])

# load config

# specify the dataset
dataset_name = "dps_box_on_side_600"
# prepare folders
data_dir = os.path.join(get_data_root(), 'dev/experiments/18/data',
                        dataset_name)

start_time = time.time()

multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset(
    dataset_root=data_dir)

# placeholder for now
config = dev_utils.load_integral_heatmap_3d_config()
config['dataset']['name'] = dataset_name
config['dataset']['camera_names'] = ['camera_angled', 'camera_angled_rotated']
config['dataset']['train_valid_ratio'] = 0.83

model_name = "3D_loss_%s_%s" % ("camera_angled",
                                get_current_YYYY_MM_DD_hh_mm_ss_ms())