def unique_identifier(self):
        """
        Return the unique identifier for this network, if it has one.
        If no identifier.yaml found (or we don't even have a model params folder)
        then return None
        :return:
        :rtype:
        """

        try:
            path_to_network_params_folder = self.path_to_network_params_folder
        except ValueError:
            return None

        identifier_file = os.path.join(path_to_network_params_folder,
                                       'identifier.yaml')
        if not os.path.exists(identifier_file):
            return None

        if not self.constructed_from_model_folder:
            return None

        d = utils.getDictFromYamlFilename(identifier_file)
        unique_identifier = d['id'] + "+" + self.config[
            'model_param_filename_tail']
        return unique_identifier
def main():

    model_file, dataset_name = get_DD_model_file()

    dataset_paths = exp_utils.get_dataset_paths(dataset_name)
    dataset_root = dataset_paths['dataset_root']
    multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset(dataset_root, max_num_episodes=None)


    camera_list = ['camera_1_top_down']

    model_config_file = os.path.join(os.path.dirname(model_file), 'config.yaml')
    model_config = getDictFromYamlFilename(model_config_file)


    model_config['dataset']['data_augmentation'] = False
    dataset = DynamicDrakeSimDataset(model_config, multi_episode_dict, phase="valid") # could also use train data

    model = torch.load(model_file)
    model = model.cuda()
    model = model.eval()


    heatmap_vis = HeatmapVisualization(model_config,
                                       dataset,
                                       model,
                                       visualize_3D=False,
                                       camera_list=camera_list,
                                       verbose=True,
                                       sample_same_episode=False,
                                       display_confidence_value=False,
                                       use_custom_target_image_func=True)
    heatmap_vis.run()
Beispiel #3
0
    def load_default_config():
        dc_source_dir = utils.getDenseCorrespondenceSourceDir()
        config_file = os.path.join(dc_source_dir, 'config', 'dense_correspondence',
                                   'training', 'training.yaml')

        config = utils.getDictFromYamlFilename(config_file)
        return config
    def load_default_config():
        dc_source_dir = utils.getDenseCorrespondenceSourceDir()
        config_file = os.path.join(dc_source_dir, 'config', 'dense_correspondence',
                                   'training', 'training.yaml')

        config = utils.getDictFromYamlFilename(config_file)
        return config
Beispiel #5
0
 def get_descriptor_target_from_yaml(self):
     """
     Grabs a 1-dimensional numpy array of length D from the descriptor yaml file
     """
     descriptor_filename = os.path.join(pdc_utils.getDenseCorrespondenceSourceDir(), "../config", "new_descriptor_picked.yaml")
     descriptor_dict = pdc_utils.getDictFromYamlFilename(descriptor_filename)
     descriptor_list = descriptor_dict["descriptor"]
     return np.asarray(descriptor_list)
Beispiel #6
0
    def load_specific_dataset(self):
        dataset_config_filename = os.path.join(
            utils.getDenseCorrespondenceSourceDir(), 'config',
            'dense_correspondence', 'dataset', 'composite',
            'rope_nonrigid_412vert_only.yaml')

        dataset_config = utils.getDictFromYamlFilename(dataset_config_filename)
        self._dataset = SpartanDataset(config=dataset_config)
Beispiel #7
0
    def load_pick_point_config(self):
        """
        Loads the config for the pick point. Stores the information on
        - which network to use
        - the descriptor to look for
        """
        config_filename = os.path.join(get_config_directory(), PICK_POINT_CONFIG_FILENAME)

        self.pick_point_config = pdc_utils.getDictFromYamlFilename(config_filename)
    def from_data_folder(data_folder, config=None):
        fr = FusionReconstruction()
        fr.data_dir = data_folder

        if config is None:
            config = FusionReconstruction.load_default_config()

        pose_data_filename = os.path.join(data_folder, 'images', 'pose_data.yaml')
        camera_info_filename = os.path.join(data_folder, 'images', 'camera_info.yaml')

        fr.config = config
        fr.kinematics_pose_data = utils.getDictFromYamlFilename(pose_data_filename)
        fr.camera_info = utils.getDictFromYamlFilename(camera_info_filename)
        fr.fusion_posegraph_filename = os.path.join(data_folder, 'images.posegraph')
        fr.fusion_pose_data.first_frame_to_world = transformUtils.copyFrame(fr._reconstruction_to_world)

        fr.reconstruction_filename = os.path.join(fr.data_dir, 'images.vtp')
        fr.setup()
        return fr
    def load_specific_dataset(self):
        dataset_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
                                            'dataset', 'composite', 'caterpillar_only_9.yaml')

        # dataset_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config',
        #                                        'dense_correspondence',
        #                                        'dataset', 'composite', '4_shoes_all.yaml')
        # st()
        dataset_config = utils.getDictFromYamlFilename(dataset_config_filename)
        self._dataset = SpartanDataset(config=dataset_config)
    def load_specific_dataset(self):
        dataset_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
                                            'dataset', 'composite', 'hats_3_demo_composite.yaml')

        dataset_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config',
                                               'dense_correspondence',
                                               'dataset', 'composite', '4_shoes_all.yaml')

        dataset_config = utils.getDictFromYamlFilename(dataset_config_filename)
        self._dataset = SpartanDataset(config=dataset_config)
    def load_training_dataset(self):
        """
        Loads the dataset that this was trained on
        :return: a dataset object, loaded with the config as set in the dataset.yaml
        :rtype: SpartanDataset
        """

        network_params_folder = self.path_to_network_params_folder
        network_params_folder = utils.convert_to_absolute_path(network_params_folder)
        dataset_config_file = os.path.join(network_params_folder, 'dataset.yaml')
        config = utils.getDictFromYamlFilename(dataset_config_file)
        return SpartanDataset(config_expanded=config)
Beispiel #12
0
    def from_data_folder(data_folder,
                         config=None,
                         name=None,
                         load_foreground_mesh=True):
        """

        :param data_folder: The 'processed' subfolder of a top level log folder
        :type data_folder:
        :param config: YAML file containing parameters. The default file is
        change_detection.yaml. This file contains the parameters used to crop
        the fusion reconstruction and extract the foreground.
        :type config:YAML file
        :param name:
        :type name:
        :return:
        :rtype:
        """
        fr = TSDFReconstruction(load_foreground_mesh)
        fr.data_dir = data_folder

        if name is None:
            name = ""

        if config is None:
            print "no config passed in, loading default"
            config = FusionReconstruction.load_default_config()

        pose_data_filename = os.path.join(data_folder, 'images',
                                          'pose_data.yaml')
        camera_info_filename = os.path.join(data_folder, 'images',
                                            'camera_info.yaml')

        fr.config = config
        fr.name = name
        fr.kinematics_pose_data = utils.getDictFromYamlFilename(
            pose_data_filename)
        fr.camera_info = utils.getDictFromYamlFilename(camera_info_filename)
        fr.setup()

        return fr
    def from_model_folder(model_folder, load_stored_params=True, model_param_file=None,
        iteration=None):
        """
        Loads a DenseCorrespondenceNetwork from a model folder
        :param model_folder: the path to the folder where the model is stored. This direction contains
        files like

            - 003500.pth
            - training.yaml

        :type model_folder:
        :return: a DenseCorrespondenceNetwork objecc t
        :rtype:
        """

        model_folder = utils.convert_to_absolute_path(model_folder)

        if model_param_file is None:
            model_param_file, _, _ = utils.get_model_param_file_from_directory(model_folder, iteration=iteration)

        model_param_file = utils.convert_to_absolute_path(model_param_file)

        training_config_filename = os.path.join(model_folder, "training.yaml")
        training_config = utils.getDictFromYamlFilename(training_config_filename)
        config = training_config["dense_correspondence_network"]
        config["path_to_network_params_folder"] = model_folder


        fcn = resnet_dilated.Resnet34_8s(num_classes=config['descriptor_dimension'])

        dcn = DenseCorrespondenceNetwork(fcn, config['descriptor_dimension'],
                                         image_width=config['image_width'],
                                         image_height=config['image_height'])


        # load the stored params
        if load_stored_params:
            # old syntax
            try:
                dcn.load_state_dict(torch.load(model_param_file))
            except:
                logging.info("loading params with the new style failed, falling back to dcn.fcn.load_state_dict")
                dcn.fcn.load_state_dict(torch.load(model_param_file))

            # this is the new format
            #

        dcn.cuda()
        dcn.train()
        dcn.config = config

        return dcn
    def make_default_caterpillar():
        """
        Makes a default SpartanDatase from the 10_scenes_drill data
        :return:
        :rtype:
        """
        config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
                                   'dataset', 'composite',
                                   'caterpillar_only.yaml')

        config = utils.getDictFromYamlFilename(config_file)
        dataset = SpartanDataset(mode="train", config=config)
        return dataset
    def from_data_folder(data_folder, config=None, name=None):
        fr = TSDFReconstruction()
        fr.data_dir = data_folder

        if name is None:
            name = ""

        if config is None:
            print "no config passed in, loading default"
            config = FusionReconstruction.load_default_config()

        pose_data_filename = os.path.join(data_folder, 'images', 'pose_data.yaml')
        camera_info_filename = os.path.join(data_folder, 'images', 'camera_info.yaml')

        fr.config = config
        fr.name = name
        fr.kinematics_pose_data = utils.getDictFromYamlFilename(pose_data_filename)
        fr.camera_info = utils.getDictFromYamlFilename(camera_info_filename)

        fr.reconstruction_filename = os.path.join(fr.data_dir, 'fusion_mesh.ply')
        fr.setup()
        return fr
    def load_dcn_network(self):
        """
        Loads the DCN.
        """
        config = pdc_utils.getDictFromYamlFilename(NETWORK_CONFIG_FILENAME)
        defaults_config = pdc_utils.get_defaults_config()
        pdc_utils.set_cuda_visible_devices([0])
        dce = DenseCorrespondenceEvaluation(config)

        self.dcn = dce.load_network_from_config(NETWORK_NAME)
        self.dcn.eval()
        self.dataset = self.dcn.load_training_dataset(
        )  # why do we need to do this?
        print "finished loading dcn"
Beispiel #17
0
    def __init__(self, config_filename='shoes_all.yaml'):

        with HiddenPrints():

            self.config_filename = os.path.join(
                utils.getDenseCorrespondenceSourceDir(), 'config',
                'dense_correspondence', 'dataset', 'composite',
                config_filename)
            self.train_config_filename = os.path.join(
                utils.getDenseCorrespondenceSourceDir(), 'config',
                'dense_correspondence', 'training', 'training.yaml')

            self.config = utils.getDictFromYamlFilename(self.config_filename)
            self.train_config = utils.getDictFromYamlFilename(
                self.train_config_filename)

            self.dataset = SpartanDataset(config=self.config)
            self.dataset.set_parameters_from_training_config(self.train_config)

        # holds centroid and radius for each scene
        # these are for min and max z values currently. maybe include x, y, and z in the future.
        # self.centroid_and_radius[scene_name]["centroid"] or self.centroid_and_radius[scene_name]["radius"]
        self.centroid_and_radius = {}
Beispiel #18
0
def main():
    dataset_name = "push_box_hardware"
    dataset_name = "push_box_string_pull"

    model_file = None

    if dataset_name == "push_box_hardware":
        model_file = "/home/manuelli/data_ssd/key_dynam/trained_models/perception/dense_descriptors/2020-02-27-00-32-58_3D_loss_resnet50__dataset_real_push_box/net_dy_epoch_3_iter_1000_model.pth"
    elif dataset_name == "push_box_string_pull":
        # model_name = "data_aug_2020-07-02-02-39-27-400442"
        # model_file = os.path.join(get_data_root(), 'dev/experiments/22/dataset_push_box_string_pull/net_')

        model_file = "/home/manuelli/data/key_dynam/dev/experiments/22/dataset_push_box_string_pull/trained_models/perception/dense_descriptors/data_aug_2020-07-02-02-39-27-400442/net_best_model.pth"


    # dataset_paths = exp_utils.get_dataset_paths(dataset_name)
    dataset_paths = exp_utils.get_dataset_paths("push_box_hardware")
    episodes_config = dataset_paths['episodes_config']
    episodes_root = dataset_paths['dataset_root']

    multi_episode_dict = DCDynamicSpartanEpisodeReader.load_dataset(config=episodes_config,
                                                                        episodes_root=episodes_root)
    camera_list = ['d415_01']
    target_camera_list = ['d415_01']

    config = dev_utils.load_dataset_config()

    model_config_file = os.path.join(os.path.dirname(model_file), 'config.yaml')
    model_config = getDictFromYamlFilename(model_config_file)

    # model_config['dataset']['data_augmentation'] = False
    config['dataset']['data_augmentation'] = False
    dataset = DynamicDrakeSimDataset(config, multi_episode_dict, phase="train") # could also use train data

    model = torch.load(model_file)
    model = model.cuda()
    model = model.eval()


    heatmap_vis = HeatmapVisualization(config,
                                       dataset,
                                       model,
                                       visualize_3D=False,
                                       camera_list=camera_list,
                                       verbose=True,
                                       sample_same_episode=False,
                                       display_confidence_value=True,
                                       use_custom_target_image_func=False,
                                       target_camera_names=target_camera_list)
    heatmap_vis.run()
    def from_data_folder(data_folder, config=None, name=None):
        """

        :param data_folder: The 'processed' subfolder of a top level log folder
        :type data_folder:
        :param config:
        :type config:
        :param name:
        :type name:
        :return:
        :rtype:
        """
        fr = TSDFReconstruction()
        fr.data_dir = data_folder

        if name is None:
            name = ""

        if config is None:
            print "no config passed in, loading default"
            config = FusionReconstruction.load_default_config()

        pose_data_filename = os.path.join(data_folder, 'images',
                                          'pose_data.yaml')
        camera_info_filename = os.path.join(data_folder, 'images',
                                            'camera_info.yaml')

        fr.config = config
        fr.name = name
        fr.kinematics_pose_data = utils.getDictFromYamlFilename(
            pose_data_filename)
        fr.camera_info = utils.getDictFromYamlFilename(camera_info_filename)

        fr.reconstruction_filename = os.path.join(fr.data_dir,
                                                  'fusion_mesh.ply')
        fr.setup()
        return fr
Beispiel #20
0
    def from_data_folder(data_folder, config=None):
        fr = FusionReconstruction()
        fr.data_dir = data_folder

        if config is None:
            config = FusionReconstruction.load_default_config()

        pose_data_filename = os.path.join(data_folder, 'images',
                                          'pose_data.yaml')
        camera_info_filename = os.path.join(data_folder, 'images',
                                            'camera_info.yaml')

        fr.config = config
        fr.kinematics_pose_data = utils.getDictFromYamlFilename(
            pose_data_filename)
        fr.camera_info = utils.getDictFromYamlFilename(camera_info_filename)
        fr.fusion_posegraph_filename = os.path.join(data_folder,
                                                    'images.posegraph')
        fr.fusion_pose_data.first_frame_to_world = transformUtils.copyFrame(
            fr._reconstruction_to_world)

        fr.reconstruction_filename = os.path.join(fr.data_dir, 'images.vtp')
        fr.setup()
        return fr
Beispiel #21
0
    def load_dcn_network(self):
        """
        Loads the DCN.

        Currently just edit this function to change which
        """
        config = pdc_utils.getDictFromYamlFilename(NETWORK_CONFIG_FILENAME)
        defaults_config = pdc_utils.get_defaults_config()
        pdc_utils.set_cuda_visible_devices([0])
        dce = DenseCorrespondenceEvaluation(config)

        network_name = self.pick_point_config["network_name"]
        self.dcn = dce.load_network_from_config(network_name)
        self.dataset = self.dcn.load_training_dataset() # why do we need to do this?
        print "finished loading dcn"
    def descriptor_image_stats(self):
        """
        Returns the descriptor normalization parameters, if possible.
        If they have not yet been loaded then it loads them
        :return:
        :rtype:
        """

        # if it isn't already set, then attempt to load it
        if self._descriptor_image_stats is None:
            path_to_params = utils.convert_to_absolute_path(self.path_to_network_params_folder)
            descriptor_stats_file = os.path.join(path_to_params, "descriptor_statistics.yaml")
            self._descriptor_image_stats = utils.getDictFromYamlFilename(descriptor_stats_file)


        return self._descriptor_image_stats
Beispiel #23
0
    def from_model_folder(model_folder,
                          load_stored_params=True,
                          model_param_file=None,
                          iteration=None):
        """
        Loads a DenseCorrespondenceNetwork from a model folder
        :param model_folder: the path to the folder where the model is stored. This direction contains
        files like

            - 003500.pth
            - training.yaml

        :type model_folder:
        :return: a DenseCorrespondenceNetwork objecc t
        :rtype:
        """

        from_model_folder = False
        model_folder = utils.convert_to_absolute_path(model_folder)

        if model_param_file is None:
            model_param_file, _, _ = utils.get_model_param_file_from_directory(
                model_folder, iteration=iteration)
            from_model_folder = True

        model_param_file = utils.convert_to_absolute_path(model_param_file)

        training_config_filename = os.path.join(model_folder, "training.yaml")
        training_config = utils.getDictFromYamlFilename(
            training_config_filename)
        config = training_config["dense_correspondence_network"]
        config["path_to_network_params_folder"] = model_folder
        config["model_param_filename_tail"] = os.path.split(
            model_param_file)[1]

        dcn = DenseCorrespondenceNetwork.from_config(
            config,
            load_stored_params=load_stored_params,
            model_param_file=model_param_file)

        # whether or not network was constructed from model folder
        dcn.constructed_from_model_folder = from_model_folder

        dcn.model_folder = model_folder
        return dcn
def run(data_folder,
        config_file=CONFIG_FILE,
        debug=False,
        globalsDict=None,
        background_scene_data_folder=None):
    """
    Runs the change detection pipeline
    :param data_dir: The 'processed' subfolder of a top-level log folder
    :param config_file:
    :return:
    """

    if globalsDict is None:
        globalsDict = globals()

    if background_scene_data_folder is None:
        background_scene_data_folder = data_folder

    config_file = CONFIG_FILE
    config = utils.getDictFromYamlFilename(config_file)

    changeDetection, obj_dict = change_detection.ChangeDetection.from_data_folder(
        data_folder,
        config=config,
        globalsDict=globalsDict,
        background_data_folder=background_scene_data_folder)

    app = obj_dict['app']
    globalsDict['cd'] = changeDetection
    view = obj_dict['view']

    # if debug:
    #     changeDetection.background_reconstruction.visualize_reconstruction(view, name='background')

    def single_shot_function():
        changeDetection.run()
        app.app.quit()

    if not debug:
        TimerCallback(callback=single_shot_function).singleShot(0)

    app.app.start(restoreWindow=True)
def run(data_folder, config_file=CONFIG_FILE, debug=False, globalsDict=None):
    """
    Runs the change detection pipeline
    :param data_dir:
    :param config_file:
    :return:
    """

    if globalsDict is None:
        globalsDict = globals()

    config_file = CONFIG_FILE
    config = utils.getDictFromYamlFilename(config_file)

    # make dimensions large so no cropping
    for key in config['crop_box']['dimensions']:
        config['crop_box']['dimensions'][key] = 10.0  # set it to 10 meteres

    changeDetection, obj_dict = change_detection.ChangeDetection.from_data_folder(
        data_folder,
        config=config,
        globalsDict=globalsDict,
        background_data_folder=data_folder)

    # set foreground mesh to actually be background mesh
    changeDetection.foreground_reconstruction = changeDetection.background_reconstruction

    app = obj_dict['app']
    globalsDict['cd'] = changeDetection
    view = obj_dict['view']

    # if debug:
    #     changeDetection.background_reconstruction.visualize_reconstruction(view, name='background')

    def single_shot_function():
        changeDetection.render_depth_images()
        app.app.quit()

    if not debug:
        TimerCallback(callback=single_shot_function).singleShot(0)

    app.app.start(restoreWindow=True)
def main():
    set_cuda_visible_devices([get_freer_gpu()])
    #
    # model_file, dataset_name = get_DD_model_file()

    dataset_name = "box_push_1000_top_down"
    dataset_name = "box_push_1000_angled"
    dataset_paths = exp_utils.get_dataset_paths(dataset_name)
    dataset_root = dataset_paths['dataset_root']
    model_file = dataset_paths['dense_descriptor_model_chkpt']

    multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset(
        dataset_root, max_num_episodes=None)

    camera_list = [dataset_paths['main_camera_name']]
    target_camera_names = [dataset_paths['main_camera_name']]
    # camera_list = ['camera_1_top_down']
    # target_camera_names = ['camera_1_top_down']

    model_config_file = os.path.join(os.path.dirname(model_file),
                                     'config.yaml')
    model_config = getDictFromYamlFilename(model_config_file)

    model_config['dataset']['data_augmentation'] = False
    dataset = DynamicDrakeSimDataset(
        model_config, multi_episode_dict,
        phase="valid")  # could also use train data

    model = torch.load(model_file)
    model = model.cuda()
    model = model.eval()

    heatmap_vis = HeatmapVisualization(model_config,
                                       dataset,
                                       model,
                                       visualize_3D=False,
                                       camera_list=camera_list,
                                       target_camera_names=target_camera_names,
                                       verbose=True,
                                       sample_same_episode=False)
    heatmap_vis.run()
def run(data_folder, config_file=CONFIG_FILE, debug=False, globalsDict=None,
        background_scene_data_folder=None):
    """
    Runs the change detection pipeline
    :param data_dir:
    :param config_file:
    :return:
    """

    if globalsDict is None:
        globalsDict = globals()

    if background_scene_data_folder is None:
        background_scene_data_folder = data_folder




    config_file = CONFIG_FILE
    config = utils.getDictFromYamlFilename(config_file)


    changeDetection, obj_dict = change_detection.ChangeDetection.from_data_folder(data_folder, config=config, globalsDict=globalsDict,
                                                                                  background_data_folder=background_scene_data_folder)

    app = obj_dict['app']
    globalsDict['cd'] = changeDetection
    view = obj_dict['view']

    # if debug:
    #     changeDetection.background_reconstruction.visualize_reconstruction(view, name='background')

    def single_shot_function():
        changeDetection.run()
        app.app.quit()

    if not debug:
        TimerCallback(callback=single_shot_function).singleShot(0)

    app.app.start(restoreWindow=True)
def run(data_folder, config_file=CONFIG_FILE, debug=False, globalsDict=None):
    """
    Runs the change detection pipeline
    :param data_dir:
    :param config_file:
    :return:
    """

    if globalsDict is None:
        globalsDict = globals()




    config_file = CONFIG_FILE
    config = utils.getDictFromYamlFilename(config_file)

    # make dimensions large so no cropping
    for key in config['crop_box']['dimensions']:
        config['crop_box']['dimensions'][key] = 10.0# set it to 10 meteres


    changeDetection, obj_dict = change_detection.ChangeDetection.from_data_folder(data_folder, config=config, globalsDict=globalsDict,
                                                                                  background_data_folder=data_folder)

    app = obj_dict['app']
    globalsDict['cd'] = changeDetection
    view = obj_dict['view']

    # if debug:
    #     changeDetection.background_reconstruction.visualize_reconstruction(view, name='background')

    def single_shot_function():
        changeDetection.render_depth_images()
        app.app.quit()

    if not debug:
        TimerCallback(callback=single_shot_function).singleShot(0)

    app.app.start(restoreWindow=True)
Beispiel #29
0
def main():

    model_file = "/home/manuelli/data/key_dynam/dev/experiments/drake_pusher_slider_box_on_side/dataset_dps_box_on_side_600/trained_models/perception/dense_descriptor/3D_loss_camera_angled_2020-05-13-23-39-35-818188/net_best_dy_model.pth"


    dataset_name = "dps_box_on_side_600"
    dataset_paths = get_dataset_paths(dataset_name)
    dataset_root = dataset_paths['dataset_root']
    dataset_name = dataset_paths['dataset_name']
    env_config = dataset_paths['config']

    print("dataset_root", dataset_root)

    multi_episode_dict = DCDrakeSimEpisodeReader.load_dataset(dataset_root)


    camera_list = ["camera_angled"]

    # should really use validation data, but will use train for now . . .
    # will be cross-scene so that shouldn't matter . . . .
    model_config_file = os.path.join(os.path.dirname(model_file), 'config.yaml')
    model_config = getDictFromYamlFilename(model_config_file)
    dataset = DynamicDrakeSimDataset(model_config, multi_episode_dict, phase="train") # could also use train data

    model = torch.load(model_file)
    model = model.cuda()
    model = model.eval()


    heatmap_vis = HeatmapVisualization(model_config,
                                       dataset,
                                       model,
                                       visualize_3D=False,
                                       camera_list=camera_list,
                                       verbose=True,
                                       target_camera_names=['camera_angled', 'camera_angled_rotated'])
    heatmap_vis.run()
Beispiel #30
0
import copy

import dense_correspondence_manipulation.utils.utils as utils
dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(dc_source_dir)
sys.path.append(
    os.path.join(dc_source_dir, "dense_correspondence",
                 "correspondence_tools"))
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType

from dense_correspondence_manipulation.simple_pixel_correspondence_labeler.annotate_correspondences import label_colors, draw_reticle, pil_image_to_cv2, drawing_scale_config

config_filename = os.path.join(
    utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
    'dataset', 'composite', 'caterpillar_baymax_starbot_onlymulti_front.yaml')
config = utils.getDictFromYamlFilename(config_filename)
sd = SpartanDataset(config=config)
sd.set_train_mode()

annotated_data_yaml_filename = os.path.join(os.getcwd(),
                                            "new_annotated_pairs.yaml")
annotated_data = utils.getDictFromYamlFilename(annotated_data_yaml_filename)

index_of_pair_to_display = 0


def draw_points(img, img_points_picked):
    for index, img_point in enumerate(img_points_picked):
        color = label_colors[index % len(label_colors)]
        draw_reticle(img, int(img_point["u"]), int(img_point["v"]), color)
Beispiel #31
0
from dense_correspondence.network.dense_correspondence_network import DenseCorrespondenceNetwork
import dense_correspondence.correspondence_tools.correspondence_finder as correspondence_finder

sys.path.append(
    os.path.join(os.path.dirname(__file__),
                 "../simple-pixel-correspondence-labeler"))
from annotate_correspondences import label_colors, draw_reticle, pil_image_to_cv2, drawing_scale_config, numpy_to_cv2, label_colors

COLOR_RED = np.array([0, 0, 255])
COLOR_GREEN = np.array([0, 255, 0])

utils.set_default_cuda_visible_devices()
eval_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(),
                                    'config', 'dense_correspondence',
                                    'evaluation', 'evaluation.yaml')
EVAL_CONFIG = utils.getDictFromYamlFilename(eval_config_filename)


class HeatmapVisualization(object):
    """
    Launches a live interactive heatmap visualization.
    Edit config/dense_correspondence/heatmap_vis/heatmap.yaml to specify which networks
    to visualize. Specifically add the network you want to visualize to the "networks" list.
    Make sure that this network appears in the file pointed to by EVAL_CONFIG
    Usage: Launch this file with python after sourcing the environment with
    `use_pytorch_dense_correspondence`
    Then `python live_heatmap_visualization.py`.
    """
    def __init__(self, config):
        self._config = config
        self._dce = DenseCorrespondenceEvaluation(EVAL_CONFIG)
from dense_correspondence.training.training import *
import sys
import logging

#utils.set_default_cuda_visible_devices()
# utils.set_cuda_visible_devices([0]) # use this to manually set CUDA_VISIBLE_DEVICES

from dense_correspondence.training.training import DenseCorrespondenceTraining
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset
logging.basicConfig(level=logging.INFO)

from dense_correspondence.evaluation.evaluation import DenseCorrespondenceEvaluation

config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
                               'dataset', 'composite', 'toy.yaml')
config = utils.getDictFromYamlFilename(config_filename)

train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
                               'training', 'toy_training.yaml')

train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)

logging_dir = "/home/zhouxian/git/pytorch-dense-correspondence/pdc/trained_models/tutorials"
d = 3 # the descriptor dimension
name = "toy_hacker_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d

TRAIN = True
import sys
import os
import cv2
import numpy as np
import copy

import dense_correspondence_manipulation.utils.utils as utils
dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(dc_source_dir)
sys.path.append(os.path.join(dc_source_dir, "dense_correspondence", "correspondence_tools"))
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType

config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 
                               'dataset', 'composite', 'star_bot_front_only.yaml')
config = utils.getDictFromYamlFilename(config_filename)
sd = SpartanDataset(config=config)
sd.set_train_mode()


USE_FIRST_IMAGE = False # force using first image in each log
RANDOMIZE_TEST_TRAIN = False # randomize seletcting

def numpy_to_cv2(numpy_img):
    return numpy_img[:, :, ::-1].copy() # open and convert between BGR and RGB

def pil_image_to_cv2(pil_image):
    return np.array(pil_image)[:, :, ::-1].copy() # open and convert between BGR and RGB
def get_cv2_img_pair_from_spartan():
    scene_name_a = sd.get_random_scene_name()
    num_attempts = 50
    for i in range(num_attempts):
Beispiel #34
0
from director import ioUtils
import PythonQt

from director import mainwindowapp
from PythonQt import QtCore, QtGui

import cv2

import dense_correspondence_manipulation.change_detection.change_detection as change_detection
from dense_correspondence_manipulation.utils.constants import *
import dense_correspondence_manipulation.utils.utils as utils
import dense_correspondence_manipulation.utils.director_utils as director_utils
from dense_correspondence_manipulation.fusion.fusion_reconstruction import FusionReconstruction, TSDFReconstruction
from dense_correspondence_manipulation.mesh_processing.mesh_render import MeshColorizer

CONFIG = utils.getDictFromYamlFilename(CHANGE_DETECTION_CONFIG_FILE)


class ReconstructionProcessing(object):
    def __init__(self):
        pass

    def spawnCropBox(self, dims=None):
        if dims is None:
            dim_x = CONFIG['crop_box']['dimensions']['x']
            dim_y = CONFIG['crop_box']['dimensions']['y']
            dim_z = CONFIG['crop_box']['dimensions']['z']
            dims = [dim_x, dim_y, dim_z]

        transform = director_utils.transformFromPose(
            CONFIG['crop_box']['transform'])
import dense_correspondence
from dense_correspondence.evaluation.evaluation import *
from dense_correspondence.evaluation.plotting import normalize_descriptor
from dense_correspondence.network.dense_correspondence_network import DenseCorrespondenceNetwork

sys.path.append(os.path.join(os.path.dirname(__file__), "../simple-pixel-correspondence-labeler"))
from annotate_correspondences import label_colors, draw_reticle, pil_image_to_cv2, drawing_scale_config, numpy_to_cv2, label_colors



COLOR_RED = np.array([0, 0, 255])
COLOR_GREEN = np.array([0,255,0])

utils.set_default_cuda_visible_devices()
eval_config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 'evaluation', 'lucas_evaluation.yaml')
EVAL_CONFIG = utils.getDictFromYamlFilename(eval_config_filename)


class HeatmapVisualization(object):

    def __init__(self, config):
        self._config = config
        self._dce = DenseCorrespondenceEvaluation(EVAL_CONFIG)
        self._load_networks()
        self._reticle_color = COLOR_GREEN
        # self.load_specific_dataset() # uncomment if you want to load a specific dataset

    def _load_networks(self):
        # we will use the dataset for the first network in the series
        self._dcn_dict = dict()
Beispiel #36
0
 def load_default_config():
     default_config_file = os.path.join(
         utils.getDenseCorrespondenceSourceDir(), 'config', 'stations',
         'RLG_iiwa_1', 'change_detection.yaml')
     config = utils.getDictFromYamlFilename(default_config_file)
     return config
import PythonQt


from director import mainwindowapp
from PythonQt import QtCore, QtGui

import cv2

import dense_correspondence_manipulation.change_detection.change_detection as change_detection
from dense_correspondence_manipulation.utils.constants import *
import dense_correspondence_manipulation.utils.utils as utils
import dense_correspondence_manipulation.utils.director_utils as director_utils
from dense_correspondence_manipulation.fusion.fusion_reconstruction import FusionReconstruction, TSDFReconstruction


CONFIG = utils.getDictFromYamlFilename(CHANGE_DETECTION_CONFIG_FILE)

class ReconstructionProcessing(object):

    def __init__(self):
        pass

    def spawnCropBox(self, dims=None):
        if dims is None:
            dim_x = CONFIG['crop_box']['dimensions']['x']
            dim_y = CONFIG['crop_box']['dimensions']['y']
            dim_z = CONFIG['crop_box']['dimensions']['z']
            dims = [dim_x, dim_y, dim_z]

        transform = director_utils.transformFromPose(CONFIG['crop_box']['transform'])
        d = DebugData()
 def load_default_config():
     default_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'stations', 'RLG_iiwa_1',
                                        'change_detection.yaml')
     config = utils.getDictFromYamlFilename(default_config_file)
     return config
import numpy as np
import copy

import dense_correspondence_manipulation.utils.utils as utils

dc_source_dir = utils.getDenseCorrespondenceSourceDir()
sys.path.append(dc_source_dir)
sys.path.append(
    os.path.join(dc_source_dir, "dense_correspondence",
                 "correspondence_tools"))
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset, ImageType

config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(),
                               'config', 'dense_correspondence', 'dataset',
                               'composite', 'star_bot_front_only.yaml')
config = utils.getDictFromYamlFilename(config_filename)
sd = SpartanDataset(config=config)
sd.set_train_mode()

USE_FIRST_IMAGE = False  # force using first image in each log
RANDOMIZE_TEST_TRAIN = False  # randomize seletcting


def numpy_to_cv2(numpy_img):
    return numpy_img[:, :, ::-1].copy()  # open and convert between BGR and RGB


def pil_image_to_cv2(pil_image):
    return np.array(
        pil_image)[:, :, ::-1].copy()  # open and convert between BGR and RGB
        descriptor_filename = os.path.join(
            save_dir, SceneStructure.descriptor_image_filename(img_idx))
        np.save(descriptor_filename, res)

        print("descriptor_filename", descriptor_filename)
        print("processing image %d of %d" % (counter, num_images))
        counter += 1


if __name__ == "__main__":
    dc_source_dir = utils.getDenseCorrespondenceSourceDir()
    config_filename = os.path.join(dc_source_dir, 'config',
                                   'dense_correspondence', 'evaluation',
                                   'lucas_evaluation.yaml')
    eval_config = utils.getDictFromYamlFilename(config_filename)
    default_config = utils.get_defaults_config()
    utils.set_cuda_visible_devices(default_config['cuda_visible_devices'])

    dce = DenseCorrespondenceEvaluation(eval_config)
    network_name = "caterpillar_M_background_0.500_3"
    dcn = dce.load_network_from_config(network_name)

    dataset_config_file = os.path.join(dc_source_dir, 'config',
                                       'dense_correspondence', 'dataset',
                                       'composite', 'caterpillar_only_9.yaml')
    dataset_config = utils.getDictFromYamlFilename(dataset_config_file)
    dataset = SpartanDataset(config=dataset_config)

    scene_name = SCENE_NAME
    save_dir = SAVE_DIR